diff --git "a/only_python_files.csv" "b/only_python_files.csv" new file mode 100644--- /dev/null +++ "b/only_python_files.csv" @@ -0,0 +1,107442 @@ +content +" import argparse +import sys +import os +import subprocess + +INSTALL = 'install' +LINUXINSTALL = 'linuxinstall' +FE_MIGRATE = 'migrateappfe' +LAUNCH_KAFKA = 'launchkafkaconsumer' +RUN_LOCAL_MLAC_PIPELINE = 'runpipelinelocal' +BUILD_MLAC_CONTAINER = 'buildmlaccontainerlocal' +CONVERT_MODEL = 'convertmodel' +START_MLFLOW = 'mlflow' +COMMON_SERVICE = 'service' +TRAINING = 'training' +TRAINING_AWS = 'trainingonaws' +TRAINING_DISTRIBUTED = 'distributedtraining' +START_APPF = 'appfe' +ONLINE_TRAINING = 'onlinetraining' +TEXT_SUMMARIZATION = 'textsummarization' +GENERATE_MLAC = 'generatemlac' +AWS_TRAINING = 'awstraining' +LLAMA_7B_TUNING = 'llama7btuning' +LLM_PROMPT = 'llmprompt' +LLM_TUNING = 'llmtuning' +LLM_PUBLISH = 'llmpublish' +LLM_BENCHMARKING = 'llmbenchmarking' +TELEMETRY_PUSH = 'pushtelemetry' +def aion_aws_training(confFile): + from hyperscalers.aion_aws_training import awsTraining + status = awsTraining(confFile) + print(status) + +def aion_training(confFile): + from bin.aion_pipeline import aion_train_model + status = aion_train_model(confFile) + print(status) + +def aion_awstraining(config_file): + from hyperscalers import aws_instance + print(config_file) + aws_instance.training(config_file) + +def aion_generatemlac(ConfFile): + from bin.aion_mlac import generate_mlac_code + status = generate_mlac_code(ConfFile) + print(status) + +def aion_textsummarization(confFile): + from bin.aion_text_summarizer import aion_textsummary + status = aion_textsummary(confFile) + +def aion_oltraining(confFile): + from bin.aion_online_pipeline import aion_ot_train_model + status = aion_ot_train_model(confFile) + print(status) + +def do_telemetry_sync(): + from appbe.telemetry import SyncTelemetry + SyncTelemetry() + +def aion_llm_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image): + from llm.llm_inference import LLM_publish + LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image) + +def aion_migratefe(operation): + import os + import sys + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + ""Couldn't import Django. Are you sure it's installed and "" + ""available on your PYTHONPATH environment variable? Did you "" + ""forget to activate a virtual environment?"" + ) from exc + argi=[] + argi.append(os.path.abspath(__file__)) + argi.append(operation) + execute_from_command_line(argi) +def aion_appfe(url,port): + #manage_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'manage.py') + #subprocess.check_call([sys.executable,manage_location, ""runserver"",""%s:%s""%(url,port)]) + import os + import sys + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + ""Couldn't import Django. Are you sure it's installed and "" + ""available on your PYTHONPATH environment variable? Did you "" + ""forget to activate a virtual environment?"" + ) from exc + argi=[] + argi.append(os.path.abspath(__file__)) + argi.append('runaion') + argi.append(""%s:%s""%(url,port)) + execute_from_command_line(argi) + +def aion_linux_install(version): + from install import linux_dependencies + linux_dependencies.process(version) + +def aion_install(version): + from install import dependencies + dependencies.process(version) + +def aion_service(ip,port,username,password): + from bin.aion_service import start_server + start_server(ip,port,username,password) + +def aion_distributedLearning(confFile): + from distributed_learning import learning + learning.training(confFile) + +def aion_launchkafkaconsumer(): + from mlops import kafka_consumer + kafka_consumer.launch_kafka_consumer() + +def aion_start_mlflow(): + from appbe.dataPath import DEPLOY_LOCATION + import platform + import shutil + from os.path import expanduser + mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','Scripts','mlflow.exe')) + print(mlflowpath) + home = expanduser(""~"") + if platform.system() == 'Windows': + DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns') + outputStr = subprocess.Popen([sys.executable, mlflowpath,""ui"", ""--backend-store-uri"",""file:///""+DEPLOY_LOCATION]) + else: + DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns') + subprocess.check_call(['mlflow',""ui"",""-h"",""0.0.0.0"",""--backend-store-uri"",""file:///""+DEPLOY_LOCATION]) + +def aion_model_conversion(config_file): + from conversions import model_convertions + model_convertions.convert(config_file) + +def aion_model_buildMLaCContainer(config): + from mlops import build_container + build_container.local_docker_build(config) + +def aion_model_runpipelinelocal(config): + from mlops import local_pipeline + local_pipeline.run_pipeline(config) + +def aion_llm_tuning(config): + from llm.llm_tuning import run + run(config) + +def aion_llm_prompt(cloudconfig,instanceid,prompt): + from llm.aws_instance_api import LLM_predict + LLM_predict(cloudconfig,instanceid,prompt) + +def llm_bench_marking(hypervisor,instanceid,model,usecaseid,eval): + print(eval) + from llm.bench_marking import bench_mark + bench_mark(hypervisor,instanceid,model,usecaseid,eval) + +if __name__ == ""__main__"": + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--configPath', help='Config File Path') + parser.add_argument('-i', '--instanceid', help='instanceid') + parser.add_argument('-hv', '--hypervisor', help='hypervisor') + parser.add_argument('-md', '--model', help='model') + parser.add_argument('-uc', '--usecase', help='usecase') + parser.add_argument('-cc', '--cloudConfigPath', help='Cloud Config File Path') + parser.add_argument('-m', '--module', help='MODULE=TRAINING, APPFE, ONLINETRAINING,DISTRIBUTEDTRAINING') + parser.add_argument('-ip', '--ipaddress', help='URL applicable only for APPFE method ') + parser.add_argument('-p', '--port', help='APP Front End Port applicable only for APPFE method ') + parser.add_argument('-ac', '--appfecommand', help='APP Front End Command ') + parser.add_argument('-un','--username', help=""USERNAME"") + parser.add_argument('-passw','--password', help=""PASSWORD"") + parser.add_argument('-j', '--jsoninput', help='JSON Input') + parser.add_argument('-v', '--version', help='Installer Version') + parser.add_argument('-pf', '--prompt', help='Prompt File') + parser.add_argument('-r', '--region', help='REGION NAME') + parser.add_argument('-im', '--image', help='IMAGE NAME') + parser.add_argument('-e', '--eval', help='evaluation for code or doc', default='doc') + args = parser.parse_args() + if args.module.lower() == TRAINING: + aion_training(args.configPath) + elif args.module.lower() == TRAINING_AWS: + aion_awstraining(args.configPath) + elif args.module.lower() == TRAINING_DISTRIBUTED: + aion_distributedLearning(args.configPath) + elif args.module.lower() == START_APPF: + aion_appfe(args.ipaddress,args.port) + elif args.module.lower() == ONLINE_TRAINING: + aion_oltraining(args.configPath) + elif args.module.lower() == TEXT_SUMMARIZATION: + aion_textsummarization(args.configPath) + elif args.module.lower() == GENERATE_MLAC: + aion_generatemlac(args.configPath) + elif args.module.lower() == COMMON_SERVICE: + aion_service(args.ipaddress,args.port,args.username,args.password) + elif args.module.lower() == START_MLFLOW: + aion_mlflow() + elif args.module.lower() == CONVERT_MODEL: + aion_model_conversion(args.configPath) + elif args.module.lower() == BUILD_MLAC_CONTAINER: + aion_model_buildMLaCContainer(args.jsoninput) + elif args.module.lower() == RUN_LOCAL_MLAC_PIPELINE: + aion_model_runpipelinelocal(args.jsoninput) + elif args.module.lower() == LAUNCH_KAFKA: + aion_launchkafkaconsumer() + elif args.module.lower() == INSTALL: + aion_install(args.version) + elif args.module.lower() == LINUXINSTALL: + aion_linux_install(args.version) + elif args.module.lower() == FE_MIGRATE: + aion_migratefe('makemigrations') + aion_migratefe('migrate') + elif args.module.lower() == AWS_TRAINING: + aion_aws_training(args.configPath) + elif args.module.lower() == LLAMA_7B_TUNING: + aion_llm_tuning(args.configPath) + elif args.module.lower() == LLM_TUNING: + aion_llm_tuning(args.configPath) + elif args.module.lower() == LLM_PROMPT: + aion_llm_prompt(args.cloudConfigPath,args.instanceid,args.prompt) + elif args.module.lower() == LLM_PUBLISH: + aion_llm_publish(args.cloudConfigPath,args.instanceid,args.hypervisor,args.model,args.usecase,args.region,args.image) + elif args.module.lower() == LLM_BENCHMARKING: + llm_bench_marking(args.hypervisor,args.instanceid,args.model,args.usecase, args.eval) + elif args.module.lower() == TELEMETRY_PUSH: + do_telemetry_sync() import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__)))) +from .bin.aion_pipeline import aion_train_model + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +import json +import datetime, time, timeit +import argparse +import logging +logging.getLogger('tensorflow').disabled = True +import math +import shutil +import re +from datetime import datetime as dt +import warnings +from config_manager.pipeline_config import AionConfigManager +import pandas as pd +import numpy as np +import sklearn +import string +from records import pushrecords +import logging + + +from pathlib import Path +from pytz import timezone +from config_manager.config_gen import code_configure +import joblib +from sklearn.model_selection import train_test_split +from config_manager.check_config import config_validate +from utils.file_ops import save_csv_compressed,save_csv,save_chromadb +LOG_FILE_NAME = 'model_training_logs.log' + +if 'AION' in sys.modules: + try: + from appbe.app_config import DEBUG_ENABLED + except: + DEBUG_ENABLED = False +else: + DEBUG_ENABLED = True + +def getversion(): + configFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config') + version = 'NA' + for file in os.listdir(configFolder): + if file.endswith("".var""): + version = file.rsplit('.', 1) + version = version[0] + break + return version + +AION_VERSION = getversion() +def pushRecordForTraining(): + try: + status,msg = pushrecords.enterRecord(AION_VERSION) + except Exception as e: + print(""Exception"", e) + status = False + msg = str(e) + return status,msg + +def mlflowSetPath(path,experimentname): + import mlflow + url = ""file:"" + str(Path(path).parent.parent) + ""/mlruns"" + mlflow.set_tracking_uri(url) + mlflow.set_experiment(str(experimentname)) + +def set_log_handler( basic, mode='w'): + deploy_loc = Path(basic.get('deployLocation')) + log_file_parent = deploy_loc/basic['modelName']/basic['modelVersion']/'log' + log_file_parent.mkdir(parents=True, exist_ok=True) + log_file = log_file_parent/LOG_FILE_NAME + + filehandler = logging.FileHandler(log_file, mode,'utf-8') + formatter = logging.Formatter('%(message)s') + filehandler.setFormatter(formatter) + log = logging.getLogger('eion') + log.propagate = False + for hdlr in" +"log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + return log + + +class server(): + def __init__(self): + self.response = None + self.features=[] + self.mFeatures=[] + self.emptyFeatures=[] + self.textFeatures=[] + self.vectorizerFeatures=[] + self.wordToNumericFeatures=[] + self.profilerAction = [] + self.targetType = '' + self.matrix1='{' + self.matrix2='{' + self.matrix='{' + self.trainmatrix='{' + self.numericalFeatures=[] + self.nonNumericFeatures=[] + self.similarGroups=[] + self.dfcols=0 + self.dfrows=0 + self.method = 'NA' + self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.modelSelTopFeatures=[] + self.topFeatures=[] + self.allFeatures=[] + def startScriptExecution(self, config_obj, codeConfigure, log): + oldStdout = sys.stdout + model_training_details = '' + model_tried='' + learner_type = '' + topics = {} + pred_filename = '' + numericContinuousFeatures='' + discreteFeatures='' + sessonal_freq = '' + additional_regressors = '' + threshold=-1 + targetColumn = '' + numericalFeatures ='' + nonNumericFeatures='' + categoricalFeatures='' + dataFolderLocation = '' + featureReduction = 'False' + original_data_file = '' + normalizer_pickle_file = '' + pcaModel_pickle_file = '' + bpca_features= [] + apca_features = [] + lag_order = 1 + profiled_data_file = '' + trained_data_file = '' + predicted_data_file='' + dictDiffCount={} + cleaning_kwargs = {} + grouperbyjson = '' + rowfilterexpression='' + featureEngineeringSelector = 'false' + conversion_method = '' + params={} + loss_matrix='binary_crossentropy' + optimizer='Nadam' + numericToLabel_json='[]' + preprocessing_pipe='' + firstDocFeature = '' + secondDocFeature = '' + padding_length = 30 + pipe = None + scalertransformationFile=None + column_merge_flag = False + merge_columns = [] + score = 0 + profilerObj = None + imageconfig='' + labelMaps={} + featureDataShape=[] + normFeatures = [] + preprocess_out_columns = [] + preprocess_pipe = None + label_encoder = None + unpreprocessed_columns = [] + import pickle + iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings() + inlierLabels=config_obj.getEionInliers() + scoreParam = config_obj.getScoringCreteria() + noofforecasts = config_obj.getNumberofForecasts() + datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures() + + filter_expression = config_obj.getFilterExpression() + refined_filter_expression = """" + sa_images = [] + model_tried = '' + deploy_config = {} + iterName = iterName.replace("" "", ""_"") + deployFolder = deployLocation + usecaseLocation,deployLocation,dataFolderLocation,imageFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile,reduction_data_file = config_obj.createDeploymentFolders(deployFolder,iterName,iterVersion) + outputLocation=deployLocation + mlflowSetPath(deployLocation,iterName+'_'+iterVersion) + # mlflowSetPath shut down the logger, so set again + set_log_handler( config_obj.basic, mode='a') + xtrain=pd.DataFrame() + xtest=pd.DataFrame() + log.info('Status:-|... AION Training Configuration started') + startTime = timeit.default_timer() + try: + output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}} + problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textsummarizationStatus,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus, objectDetectionStatus,stateTransitionStatus, similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus = config_obj.getModulesDetails() + status, error_id, msg = config_obj.validate_config() + if not status: + if error_id == 'fasttext': + raise ValueError(msg) + VideoProcessing = False + if(problem_type.lower() in ['classification','regression']): + if(targetFeature == ''): + output = {""status"":""FAIL"",""message"":""Target Feature is Must for Classification and Regression Problem Type""} + return output + + + from transformations.dataReader import dataReader + objData = dataReader() + DataIsFolder = False + folderdetails = config_obj.getFolderSettings() + + if os.path.isfile(dataLocation): + log.info('Status:-|... AION Loading Data') + dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier) + status,msg = save_csv_compressed(dataFrame,original_data_file) + if not status: + log.info('CSV File Error: '+str(msg)) + elif os.path.isdir(dataLocation): + if problem_type.lower() == 'summarization': + from document_summarizer import summarize + keywords, pretrained_type, embedding_sz = summarize.get_params() + dataFrame = summarize.to_dataframe(dataLocation,keywords, deploy_loc, pretrained_type, embedding_sz) + problem_type = 'classification' + targetFeature = 'label' + scoreParam = 'Accuracy' + elif folderdetails['fileType'].lower() == 'document': + dataFrame, error = objData.documentsTodf(dataLocation, folderdetails['labelDataFile']) + if error: + log.info(error) + elif folderdetails['fileType'].lower() == 'object': + testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati + intermediateLocation = os.path.join(deployLocation,'intermediate') + os.mkdir(intermediateLocation) + AugEnabled,keepAugImages,operations,augConf = config_obj.getEionImageAugmentationConfiguration() + dataFrame, n_class = objData.createTFRecord(dataLocation, intermediateLocation, folderdetails['labelDataFile'], testPercentage,AugEnabled,keepAugImages,operations, ""objectdetection"",augConf) #Unnati + DataIsFolder = True + else: + datafilelocation = os.path.join(dataLocation,folderdetails['labelDataFile']) + dataFrame = objData.csvTodf(datafilelocation,delimiter,textqualifier) + DataIsFolder = True + if textSimilarityStatus or similarityIdentificationStatus or contextualSearchStatus: + similaritydf = dataFrame + + filter = config_obj.getfilter() + if filter != 'NA': + dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame) + + timegrouper = config_obj.gettimegrouper() + grouping = config_obj.getgrouper() + if grouping != 'NA': + dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame) + elif timegrouper != 'NA': + dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame) + if timeseriesStatus or anomalyDetectionStatus: + from utils.validate_inputs import dataGarbageValue + status,msg = dataGarbageValue(dataFrame,datetimeFeature) + if status.lower() == 'error': + raise ValueError(msg) + if not DataIsFolder: + if timeseriesStatus: + if(modelFeatures != 'NA' and datetimeFeature != ''): + if datetimeFeature: + if isinstance(datetimeFeature, list): #to handle if time series having multiple time column + unpreprocessed_columns = unpreprocessed_columns + datetimeFeature + else: + unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',') + if datetimeFeature not in modelFeatures: + modelFeatures = modelFeatures+','+datetimeFeature + + dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature) + + elif survival_analysis_status or anomalyDetectionStatus: + if(modelFeatures != 'NA'): + if datetimeFeature != 'NA' and datetimeFeature != '': + unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',') + if datetimeFeature not in modelFeatures: + modelFeatures = modelFeatures+','+datetimeFeature + dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature) + else: + dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature) + + log.info('\\n-------> First Ten Rows of Input Data: ') + log.info(dataFrame.head(10)) + self.dfrows=dataFrame.shape[0] + self.dfcols=dataFrame.shape[1] + log.info('\\n-------> Rows: '+str(self.dfrows)) + log.info('\\n-------> Columns: '+str(self.dfcols)) + topFeatures=[] + + profilerObj = None + normalizer=None + dataLoadTime = timeit.default_timer() - startTime + log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime)) + if timeseriesStatus: + if datetimeFeature != 'NA' and datetimeFeature != '': + preproces_config = config_obj.basic.get('preprocessing',{}).get('timeSeriesForecasting',{}) + if preproces_config: + from transformations.preprocess import timeSeries as ts_preprocess + preprocess_obj = ts_preprocess( preproces_config,datetimeFeature, log) + dataFrame = preprocess_obj.run( dataFrame) + log.info('-------> Input dataFrame(5 Rows) after preprocessing: ') + log.info(dataFrame.head(5)) + deploy_config['preprocess'] = {} + deploy_config['preprocess']['code'] = preprocess_obj.get_code() + if profiler_status: + log.info('\\n================== Data Profiler has started ==================') + log.info('Status:-|... AION feature transformation started') + from transformations.dataProfiler import profiler as dataProfiler + dp_mlstart = time.time() + profilerJson = config_obj.getEionProfilerConfigurarion() + log.info('-------> Input dataFrame(5 Rows): ') + log.info(dataFrame.head(5)) + log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape)) + testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati + if DataIsFolder: + if folderdetails['type'].lower() != 'objectdetection': + profilerObj = dataProfiler(dataFrame) + topFeatures,VideoProcessing,tfrecord_directory = profilerObj.folderPreprocessing(dataLocation,folderdetails,deployLocation) + elif textSimilarityStatus: + firstDocFeature = config_obj.getFirstDocumentFeature() + secondDocFeature = config_obj.getSecondDocumentFeature() + profilerObj = dataProfiler(dataFrame,targetFeature, data_path=dataFolderLocation) + dataFrame,pipe,targetColumn,topFeatures = profilerObj.textSimilarityStartProfiler(firstDocFeature,secondDocFeature) + elif recommenderStatus: + profilerObj = dataProfiler(dataFrame) + dataFrame = profilerObj.recommenderStartProfiler(modelFeatures) + else: + if deeplearner_status or learner_status: + if (problem_type.lower() != 'clustering') and (problem_type.lower() != 'topicmodelling'): + if targetFeature != '': + try: + biasingDetail = config_obj.getDebiasingDetail() + if len(biasingDetail) > 0: + if biasingDetail['FeatureName'] != 'None': + protected_feature = biasingDetail['FeatureName'] + privileged_className = biasingDetail['ClassName'] + target_feature = biasingDetail['TargetFeature'] + algorithm = biasingDetail['Algorithm'] + + from debiasing.DebiasingManager import DebiasingManager + mgrObj = DebiasingManager() + log.info('Status:-|... Debiasing transformation started') + transf_dataFrame = mgrObj.Bias_Mitigate(dataFrame, protected_feature, privileged_className, target_feature, algorithm) + + log.info('Status:-|... Debiasing transformation completed') + dataFrame = transf_dataFrame + except Exception as e: + print(e) + pass + # ---------------------------------------------- ---------------------------------------------- + targetData = dataFrame[targetFeature] + featureData = dataFrame[dataFrame.columns.difference([targetFeature])] + testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati + xtrain,ytrain,xtest,ytest = self.split_into_train_test_data(featureData,targetData,testPercentage,log,problem_type.lower()) + xtrain.reset_index(drop=True,inplace=True) + ytrain.reset_index(drop=True,inplace=True) + xtest.reset_index(drop=True,inplace=True) + ytest.reset_index(drop=True,inplace=True) + dataFrame = xtrain + dataFrame[targetFeature] = ytrain + encode_target_problems = ['classification','anomalyDetection', 'timeSeriesAnomalyDetection'] #task 11997 + if problem_type == 'survivalAnalysis' and dataFrame[targetFeature].nunique() > 1: + encode_target_problems.append('survivalAnalysis') + + if timeseriesStatus: #task 12627 calling data profiler without target feature specified separately (i.e) profiling is done for model features along with target features + profilerObj = dataProfiler(dataFrame, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) + else: + profilerObj = dataProfiler(dataFrame, target=targetFeature, encode_target= problem_type in encode_target_problems, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) #task 12627 + dataFrame" +", preprocess_pipe, label_encoder = profilerObj.transform() + preprocess_out_columns = dataFrame.columns.tolist() + if not timeseriesStatus: #task 12627 preprocess_out_columns goes as output_columns in target folder script/input_profiler.py, It should contain the target feature also as it is what is used for forecasting + if targetFeature in preprocess_out_columns: + preprocess_out_columns.remove(targetFeature) + for x in unpreprocessed_columns: + preprocess_out_columns.remove(x) + if label_encoder: + joblib.dump(label_encoder, Path(deployLocation)/'model'/'label_encoder.pkl') + labelMaps = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_))) + codeConfigure.update_config('train_features',list(profilerObj.train_features_type.keys())) + codeConfigure.update_config('text_features',profilerObj.text_feature) + self.textFeatures = profilerObj.text_feature + deploy_config['profiler'] = {} + deploy_config['profiler']['input_features'] = list(profilerObj.train_features_type.keys()) + deploy_config['profiler']['output_features'] = preprocess_out_columns + deploy_config['profiler']['input_features_type'] = profilerObj.train_features_type + deploy_config['profiler']['word2num_features'] = profilerObj.wordToNumericFeatures + deploy_config['profiler']['unpreprocessed_columns'] = unpreprocessed_columns + deploy_config['profiler']['force_numeric_conv'] = profilerObj.force_numeric_conv + if self.textFeatures: + deploy_config['profiler']['conversion_method'] = config_obj.get_conversion_method() + if anomalyDetectionStatus and datetimeFeature != 'NA' and datetimeFeature != '': + if unpreprocessed_columns: + dataFrame.set_index( unpreprocessed_columns[0], inplace=True) + log.info('-------> Data Frame Post Data Profiling(5 Rows): ') + log.info(dataFrame.head(5)) + if not xtest.empty: + if targetFeature != '': + non_null_index = ytest.notna() + ytest = ytest[non_null_index] + xtest = xtest[non_null_index] + if profilerObj.force_numeric_conv: + xtest[ profilerObj.force_numeric_conv] = xtest[profilerObj.force_numeric_conv].apply(pd.to_numeric,errors='coerce') + xtest.astype(profilerObj.train_features_type) + if unpreprocessed_columns: + xtest_unprocessed = xtest[unpreprocessed_columns] + xtest = preprocess_pipe.transform(xtest) + if not isinstance(xtest, np.ndarray): + xtest = xtest.toarray() + xtest = pd.DataFrame(xtest, columns=preprocess_out_columns) + if unpreprocessed_columns: + xtest[unpreprocessed_columns] = xtest_unprocessed + if survival_analysis_status: + xtest.astype({x:'float' for x in unpreprocessed_columns}) + xtrain.astype({x:'float' for x in unpreprocessed_columns}) + #task 11997 removed setting datetime column as index of dataframe code as it is already done before + if label_encoder: + ytest = label_encoder.transform(ytest) + if preprocess_pipe: + if self.textFeatures: + from text.textProfiler import reset_pretrained_model + reset_pretrained_model(preprocess_pipe) # pickle is not possible for fasttext model ( binary) + joblib.dump(preprocess_pipe, Path(deployLocation)/'model'/'preprocess_pipe.pkl') + + self.features=topFeatures + if targetColumn in topFeatures: + topFeatures.remove(targetColumn) + self.topFeatures=topFeatures + if normalizer != None: + normalizer_file_path = os.path.join(deployLocation,'model','normalizer_pipe.sav') + normalizer_pickle_file = 'normalizer_pipe.sav' + pickle.dump(normalizer, open(normalizer_file_path,'wb')) + log.info('Status:-|... AION feature transformation completed') + dp_mlexecutionTime=time.time() - dp_mlstart + log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime)) + log.info('================== Data Profiling completed ==================\\n') + + else: + datacolumns=list(dataFrame.columns) + if targetFeature in datacolumns: + datacolumns.remove(targetFeature) + if not timeseriesStatus and not anomalyDetectionStatus and not inputDriftStatus and not outputDriftStatus and not imageClassificationStatus and not associationRuleStatus and not objectDetectionStatus and not stateTransitionStatus and not textsummarizationStatus: + self.textFeatures,self.vectorizerFeatures,pipe,column_merge_flag,merge_columns = profilerObj.checkForTextClassification(dataFrame) + self.topFeatures =datacolumns + if(pipe is not None): + preprocessing_pipe = 'pppipe'+iterName+'_'+iterVersion+'.sav' + ppfilename = os.path.join(deployLocation,'model','pppipe'+iterName+'_'+iterVersion+'.sav') + pickle.dump(pipe, open(ppfilename, 'wb')) + status, msg = save_csv_compressed(dataFrame,profiled_data_file) + if not status: + log.info('CSV File Error: ' + str(msg)) + if selector_status: + log.info(""\\n================== Feature Selector has started =================="") + log.info(""Status:-|... AION feature engineering started"") + fs_mlstart = time.time() + selectorJson = config_obj.getEionSelectorConfiguration() + if self.textFeatures: + config_obj.updateFeatureSelection(selectorJson, codeConfigure, self.textFeatures) + log.info(""-------> For vectorizer 'feature selection' is disabled and all the features will be used for training"") + from feature_engineering.featureSelector import featureSelector + selectorObj = featureSelector() + dataFrame,targetColumn,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,self.similarGroups,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pcaModel,bpca_features,apca_features,featureEngineeringSelector = selectorObj.startSelector(dataFrame, selectorJson,self.textFeatures,targetFeature,problem_type) + if(str(pcaModel) != 'None'): + featureReduction = 'True' + status, msg = save_csv(dataFrame,reduction_data_file) + if not status: + log.info('CSV File Error: ' + str(msg)) + pcaFileName = os.path.join(deployLocation,'model','pca'+iterName+'_'+iterVersion+'.sav') + pcaModel_pickle_file = 'pca'+iterName+'_'+iterVersion+'.sav' + pickle.dump(pcaModel, open(pcaFileName, 'wb')) + if not xtest.empty: + xtest = pd.DataFrame(pcaModel.transform(xtest),columns= apca_features) + if targetColumn in self.topFeatures: + self.topFeatures.remove(targetColumn) + fs_mlexecutionTime=time.time() - fs_mlstart + log.info('-------> COMPUTING: Total Feature Selection Execution Time '+str(fs_mlexecutionTime)) + log.info('================== Feature Selection completed ==================\\n') + log.info(""Status:-|... AION feature engineering completed"") + + if deeplearner_status or learner_status: + log.info('Status:-|... AION training started') + ldp_mlstart = time.time() + balancingMethod = config_obj.getAIONDataBalancingMethod() + from learner.machinelearning import machinelearning + mlobj = machinelearning() + modelType = problem_type.lower() + targetColumn = targetFeature + if modelType == ""na"": + if self.targetType == 'categorical': + modelType = 'classification' + elif self.targetType == 'continuous': + modelType = 'regression' + else: + modelType='clustering' + datacolumns=list(dataFrame.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + features =datacolumns + featureData = dataFrame[features] + if(modelType == 'clustering') or (modelType == 'topicmodelling'): + xtrain = featureData + ytrain = pd.DataFrame() + xtest = featureData + ytest = pd.DataFrame() + elif (targetColumn!=''): + xtrain = dataFrame[features] + ytrain = dataFrame[targetColumn] + else: + pass + + categoryCountList = [] + if modelType == 'classification': + if(mlobj.checkForClassBalancing(ytrain) >= 1): + xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod) + valueCount=targetData.value_counts() + categoryCountList=valueCount.tolist() + ldp_mlexecutionTime=time.time() - ldp_mlstart + log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime)) + if learner_status: + base_model_score=0 + log.info('\\n================== ML Started ==================') + + log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum())) + mlstart = time.time() + log.info('-------> Target Problem Type:'+ self.targetType) + learner_type = 'ML' + learnerJson = config_obj.getEionLearnerConfiguration() + from learner.machinelearning import machinelearning + mlobj = machinelearning() + anomalyDetectionStatus = False + anomalyMethod =config_obj.getEionanomalyModels() + if modelType.lower() == ""anomalydetection"" or modelType.lower() == ""timeseriesanomalydetection"": #task 11997 + anomalyDetectionStatus = True + + if anomalyDetectionStatus == True : + datacolumns=list(dataFrame.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + if datetimeFeature in datacolumns: + datacolumns.remove(datetimeFeature) + self.features = datacolumns + from learner.anomalyDetector import anomalyDetector + anomalyDetectorObj=anomalyDetector() + model_type =""anomaly_detection"" + saved_model = model_type+'_'+iterName+'_'+iterVersion+'.sav' + if problem_type.lower() == ""timeseriesanomalydetection"": #task 11997 + anomalyconfig = config_obj.getAIONTSAnomalyDetectionConfiguration() + modelType = ""TimeSeriesAnomalyDetection"" + else: + anomalyconfig = config_obj.getAIONAnomalyDetectionConfiguration() + testPercentage = config_obj.getAIONTestTrainPercentage() + ##Multivariate feature based anomaly detection status from gui (true/false) + mv_featurebased_selection = config_obj.getMVFeaturebasedAD() + mv_featurebased_ad_status=str(mv_featurebased_selection['uniVariate']) + model,estimator,matrix,trainmatrix,score,labelMaps=anomalyDetectorObj.startanomalydetector(dataFrame,targetColumn,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status) #Unnati + score = 'NA' + if(self.matrix != '{'): + self.matrix += ',' + self.matrix += matrix + if(self.trainmatrix != '{'): + self.trainmatrix += ',' + self.trainmatrix += trainmatrix + scoreParam = 'NA' + scoredetails = f'{{""Model"":""{model}"",""Score"":""{score}""}}' + if model_tried != '': + model_tried += ',' + model_tried += scoredetails + model = anomalyMethod + else: + log.info('-------> Target Problem Type:'+ self.targetType) + log.info('-------> Target Model Type:'+ modelType) + if(modelType == 'regression'): + allowedmatrix = ['mse','r2','rmse','mae'] + if(scoreParam.lower() not in allowedmatrix): + scoreParam = 'mse' + + if(modelType == 'classification'): + allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] + if(scoreParam.lower() not in allowedmatrix): + scoreParam = 'accuracy' + scoreParam = scoreParam.lower() + codeConfigure.update_config('scoring_criteria',scoreParam) + modelParams,modelList = config_obj.getEionLearnerModelParams(modelType) + status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=mlobj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,'MB',codeConfigure,featureEngineeringSelector,config_obj.getModelEvaluationConfig(),imageFolderLocation) + + #Getting model,data for ensemble calculation + e_model=loaded_model + base_model_score=score + if(self.matrix != '{'): + self.matrix += ',' + if(self.trainmatrix != '{'): + self.trainmatrix += ',' + self.trainmatrix += trainmatrix + self.matrix += matrix + mlexecutionTime=time.time() - mlstart + log.info('-------> Total ML Execution Time '+str(mlexecutionTime)) + log.info('================== ML Completed ==================\\n') + if deeplearner_status: + learner_type = 'DL' + log.info('Status:- |... AION DL training started') + from dlearning.deeplearning import deeplearning + dlobj = deeplearning() + from learner.machinelearning import machinelearning + mlobj = machinelearning() + log.info('\\n================== DL Started ==================') + dlstart" +"= time.time() + deeplearnerJson = config_obj.getEionDeepLearnerConfiguration() + targetColumn = targetFeature + method = deeplearnerJson['optimizationMethod'] + optimizationHyperParameter = deeplearn" +"_inv[:, targetColIndx] + predout = predout.reshape(len(pred_1d),1) + #y_future.append(predout) + + col = targetFeature.split("","") + pred = pd.DataFrame(index=range(0,len(predout)),columns=col) + for i in range(0, len(predout)): + pred.iloc[i] = predout[i] + predictions = pred + log.info(""-------> Predictions"") + log.info(predictions) + forecast_output = predictions.to_json(orient='records') + elif (model.lower() == 'mlp' or model.lower() == 'lstm'): + sfeatures.remove(datetimeFeature) + self.features = sfeatures + if len(sfeatures) == 1: + xt = xtrain[self.features].values + else: + xt = xtrain[self.features].values + + with open(scalertransformationFile, 'rb') as f: + loaded_scaler_model = pickle.load(f) + f.close() + xt = xt.astype('float32') + xt = loaded_scaler_model.transform(xt) + pred_data = xt + y_future = [] + for i in range(no_of_prediction): + pdata = pred_data[-lag_order:] + if model.lower() == 'mlp': + pdata = pdata.reshape((1,lag_order)) + else: + pdata = pdata.reshape((1,lag_order, len(sfeatures))) + if (len(sfeatures) > 1): + pred = loaded_model.predict(pdata) + predout = loaded_scaler_model.inverse_transform(pred) + y_future.append(predout) + pred_data=np.append(pred_data,pred,axis=0) + else: + pred = loaded_model.predict(pdata) + predout = loaded_scaler_model.inverse_transform(pred) + y_future.append(predout.flatten()[-1]) + pred_data = np.append(pred_data,pred) + col = targetFeature.split("","") + pred = pd.DataFrame(index=range(0,len(y_future)),columns=col) + for i in range(0, len(y_future)): + pred.iloc[i] = y_future[i] + predictions = pred + log.info(""-------> Predictions"") + log.info(predictions) + forecast_output = predictions.to_json(orient='records') + else: + pass + log.info('Status:-|... AION TimeSeries Forecasting completed') #task 11997 + log.info(""------ Forecast Prediction End -------------\\n"") + log.info('================ Time Series Forecasting Completed ================\\n') #task 11997 + if recommenderStatus: + log.info('\\n================ Recommender Started ================ ') + log.info('Status:-|... AION Recommender started') + learner_type = 'RecommenderSystem' + model_type = 'RecommenderSystem' + modelType = model_type + model = model_type + targetColumn='' + datacolumns=list(dataFrame.columns) + self.features=datacolumns + svd_params = config_obj.getEionRecommenderConfiguration() + from recommender.item_rating import recommendersystem + recommendersystemObj = recommendersystem(modelFeatures,svd_params) + + testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati + saved_model,rmatrix,score,trainingperformancematrix,model_tried = recommendersystemObj.recommender_model(dataFrame,outputLocation) + scoreParam = 'NA' #Task 11190 + log.info('Status:-|... AION Recommender completed') + log.info('================ Recommender Completed ================\\n') + + if textsummarizationStatus: + log.info('\\n================ text Summarization Started ================ ') + log.info('Status:-|... AION text Summarization started') + modelType = 'textsummarization' + model_type = 'textsummarization' + learner_type = 'Text Summarization' + modelName='TextSummarization' + from sklearn.preprocessing import LabelEncoder + from sklearn.ensemble import RandomForestClassifier + from scipy import spatial + model = model_type + dataLocationTS,deployLocationTS,KeyWordsTS,pathForKeywordFileTS = config_obj.getEionTextSummarizationConfig() + #print(""dataLocationTS"",dataLocationTS) + #print(""deployLocationTS"",deployLocationTS) + #print(""KeyWordsTS"",KeyWordsTS) + #print(""pathForKeywordFileTS"",pathForKeywordFileTS) + #PreTrained Model Download starts------------------------- + from appbe.dataPath import DATA_DIR + preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization' + preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization' + models = {'glove':{50:'glove.6B.50d.w2vformat.txt'}} + supported_models = [x for y in models.values() for x in y.values()] + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization' + Path(modelsPath).mkdir(parents=True, exist_ok=True) + p = Path(modelsPath).glob('**/*') + modelsDownloaded = [x.name for x in p if x.name in supported_models] + selected_model=""glove.6B.50d.w2vformat.txt"" + if selected_model not in modelsDownloaded: + print(""Model not in folder, downloading"") + import urllib.request + location = Path(modelsPath) + local_file_path = location/f""glove.6B.50d.w2vformat.txt"" + urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.50d.w2vformat.txt', local_file_path) + from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + tokenizer = AutoTokenizer.from_pretrained(""sshleifer/distilbart-cnn-12-6"") + model = AutoModelForSeq2SeqLM.from_pretrained(""sshleifer/distilbart-cnn-12-6"") + tokenizer.save_pretrained(preTrainedModellocation) + model.save_pretrained(preTrainedModellocation) + #PreTrained Model Download ends----------------------- + deployLocationData=deployLocation+""\\\\data\\\\"" + modelLocation=Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'/'glove.6B.50d.w2vformat.txt' + KeyWordsTS=KeyWordsTS.replace("","", "" "") + noOfKeyword = len(KeyWordsTS.split()) + keywords = KeyWordsTS.split() + embeddings = {} + word = '' + with open(modelLocation, 'r', encoding=""utf8"") as f: + header = f.readline() + header = header.split(' ') + vocab_size = int(header[0]) + embed_size = int(header[1]) + for i in range(vocab_size): + data = f.readline().strip().split(' ') + word = data[0] + embeddings[word] = [float(x) for x in data[1:]] + readData=pd.read_csv(pathForKeywordFileTS,encoding='utf-8',encoding_errors= 'replace') + for i in range(noOfKeyword): + terms=(sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6] + readData = readData.append({'Keyword': keywords[i]}, ignore_index=True) + for j in range(len(terms)): + readData = readData.append({'Keyword': terms[j]}, ignore_index=True) + deployLocationDataKwDbFile=deployLocationData+""keywordDataBase.csv"" + readData.to_csv(deployLocationDataKwDbFile,encoding='utf-8',index=False) + datalocation_path=dataLocationTS + path=Path(datalocation_path) + fileList=os.listdir(path) + textExtraction = pd.DataFrame() + textExtraction['Sentences']="""" + rowIndex=0 + for i in range(len(fileList)): + fileName=str(datalocation_path)+""\\\\""+str(fileList[i]) + if fileName.endswith("".pdf""): + print(""\\n files "",fileList[i]) + from pypdf import PdfReader + reader = PdfReader(fileName) + number_of_pages = len(reader.pages) + text="""" + textOutputForFile="""" + OrgTextOutputForFile="""" + for i in range(number_of_pages) : + page = reader.pages[i] + text1 = page.extract_text() + text=text+text1 + import nltk + tokens = nltk.sent_tokenize(text) + for sentence in tokens: + sentence=sentence.replace(""\\n"", "" "") + if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) : + continue + textExtraction.at[rowIndex,'Sentences']=str(sentence.strip()) + rowIndex=rowIndex+1 + if fileName.endswith("".txt""): + print(""\\n txt files"",fileList[i]) + data=[] + with open(fileName, ""r"",encoding=""utf-8"") as f: + data.append(f.read()) + str1 = """" + for ele in data: + str1 += ele + sentences=str1.split(""."") + count=0 + for sentence in sentences: + count += 1 + textExtraction.at[rowIndex+i,'Sentences']=str(sentence.strip()) + rowIndex=rowIndex+1 + df=textExtraction + #print(""textExtraction"",textExtraction) + deployLocationDataPreProcessData=deployLocationData+""preprocesseddata.csv"" + save_csv_compressed(deployLocationDataPreProcessData, df, encoding='utf-8') + df['Label']=0 + kw=pd.read_csv(deployLocationDataKwDbFile,encoding='utf-8',encoding_errors= 'replace') + Keyword_list = kw['Keyword'].tolist() + for i in df.index: + for x in Keyword_list: + if (str(df[""Sentences""][i])).find(x) != -1: + df['Label'][i]=1 + break + deployLocationDataPostProcessData=deployLocationData+""postprocesseddata.csv"" + #df.to_csv(deployLocationDataPostProcessData,encoding='utf-8') + save_csv_compressed(deployLocationDataPostProcessData, df, encoding='utf-8') + labelledData=df + train_df=labelledData + labelencoder = LabelEncoder() + train_df['Sentences'] = labelencoder.fit_transform(train_df['Sentences']) + X = train_df.drop('Label',axis=1) + y = train_df['Label'] + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + Classifier = RandomForestClassifier(n_estimators = 10, random_state = 42) + modelTs=Classifier.fit(X, y) + import pickle + deployLocationTS=deployLocation+""\\\\model\\\\""+iterName+'_'+iterVersion+'.sav' + deployLocationTS2=deployLocation+""\\\\model\\\\""+""classificationModel.sav"" + pickle.dump(modelTs, open(deployLocationTS, 'wb')) + pickle.dump(modelTs, open(deployLocationTS2, 'wb')) + print(""\\n trainModel Ends"") + + saved_model = 'textsummarization_'+iterName+'_'+iterVersion + log.info('Status:-|... AION text summarization completed') + model = learner_type + log.info('================ text summarization Completed ================\\n') + if survival_analysis_status: + sa_method = config_obj.getEionanomalyModels() + labeldict = {} + log.info('\\n================ SurvivalAnalysis Started ================ ') + log.info('Status:-|... AION SurvivalAnalysis started') + + log.info('\\n================ SurvivalAnalysis DataFrame ================ ') + log.info(dataFrame) + from survival import survival_analysis + from learner.machinelearning import machinelearning + sa_obj = survival_analysis.SurvivalAnalysis(dataFrame, preprocess_pipe, sa_method, targetFeature, datetimeFeature, filter_expression, profilerObj.train_features_type) + if sa_obj != None: + predict_json = sa_obj.learn() + + if sa_method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']: + predicted = sa_obj.models[0].predict(dataFrame[datetimeFeature]) + + status, msg = save_csv(predicted,predicted_data_file) + if not status: + log.info('CSV File Error: ' + str(msg)) + self.features = [datetimeFeature] + elif sa_method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']: + predicted = sa_obj.models[0].predict_cumulative_hazard(dataFrame) + datacolumns = list(dataFrame.columns) + targetColumn = targetFeature + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + self.features = datacolumns + score = sa_obj.score + scoreParam = 'Concordance_Index' + + status,msg = save_csv(predicted,predicted_data_file) + if not status: + log.info('CSV File Error: ' + str(msg)) + model = sa_method + + modelType = ""SurvivalAnalysis"" + model_type = ""SurvivalAnalysis"" + modelName = sa_method + i = 1 + for mdl in sa_obj.models: + saved_model = ""%s_%s_%s_%d.sav""%(model_type,sa_method,iterVersion,i) + pickle.dump(mdl, open(os.path.join(deployLocation,'model',saved_model), 'wb')), + i+=1 + p = 1 + for plot in sa_obj.plots: + img_name = ""%s_%d.png""%(sa_method,p) + img_location = os.path.join(imageFolderLocation,img_name" +") + plot.savefig(img_location,bbox_inches='tight') + sa_images.append(img_location) + p+=1 + log.info('Status:-|... AION SurvivalAnalysis completed') + log.info('\\n================ SurvivalAnalysis Completed ================ ') + if visualizationstatus: + visualizationJson = config_obj.getEionVisualizationConfiguration() + log.info('\\n================== Visualization Recommendation Started ==================') + + visualizer_mlstart = time.time() + from visualization.visualization import Visualization + visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfrows,self.dfcols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file) + visualizationObj.visualizationrecommandsystem() + visualizer_mlexecutionTime=time.time() - visualizer_mlstart + log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime)) + + log.info('================== Visualization Recommendation Started ==================\\n') + if similarityIdentificationStatus or contextualSearchStatus: + datacolumns=list(dataFrame.columns) + features = modelFeatures.split("","") + if indexFeature != '' and indexFeature != 'NA': + iFeature = indexFeature.split("","") + for ifea in iFeature: + if ifea not in features: + features.append(ifea) + for x in features: + dataFrame[x] = similaritydf[x] + #get vectordb(chromadb) status selected + if similarityIdentificationStatus: + learner_type = 'similarityIdentification' + else: + learner_type = 'contextualSearch' + vecDBCosSearchStatus = config_obj.getVectorDBCosSearchStatus(learner_type) + if vecDBCosSearchStatus: + status, msg = save_chromadb(dataFrame, config_obj, trained_data_file, modelFeatures) + if not status: + log.info('Vector DB File Error: '+str(msg)) + else: + status, msg = save_csv(dataFrame,trained_data_file) + if not status: + log.info('CSV File Error: '+str(msg)) + self.features = datacolumns + + model_type = config_obj.getAlgoName(problem_type) + model = model_type #bug 12833 + model_tried = '{""Model"":""'+model_type+'"",""FeatureEngineering"":""NA"",""Score"":""NA"",""ModelUncertainty"":""NA""}' + modelType = learner_type + saved_model = learner_type + score = 'NA' + if deploy_status: + if str(model) != 'None': + log.info('\\n================== Deployment Started ==================') + log.info('Status:-|... AION Creating Prediction Service Start') + deployer_mlstart = time.time() + + deployJson = config_obj.getEionDeployerConfiguration() + deploy_name = iterName+'_'+iterVersion + from prediction_package.model_deploy import DeploymentManager + if textsummarizationStatus : + deploy = DeploymentManager() + deploy.deployTSum(deployLocation,preTrainedModellocation) + codeConfigure.save_config(deployLocation) + deployer_mlexecutionTime=time.time() - deployer_mlstart + log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime)) + log.info('Status:-|... AION Deployer completed') + log.info('================== Deployment Completed ==================') + else: + deploy = DeploymentManager() + deploy.deploy_model(deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deployLocation,self.features,self.profilerAction,dataLocation,labelMaps,column_merge_flag,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer_pickle_file,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,self.method,deployFolder,iterName,iterVersion,self.wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,noofforecasts,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,deploy_config) + codeConfigure.update_config('deploy_path',os.path.join(deployLocation,'publish')) + codeConfigure.save_config(deployLocation) + deployer_mlexecutionTime=time.time() - deployer_mlstart + log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime)) + log.info('Status:-|... AION Creating Prediction Service completed') + log.info('================== Deployment Completed ==================') + + if not outputDriftStatus and not inputDriftStatus: + from transformations.dataProfiler import set_features + self.features = set_features(self.features,profilerObj) + self.matrix += '}' + self.trainmatrix += '}' + print(model_tried) + model_tried = eval('['+model_tried+']') + matrix = eval(self.matrix) + trainmatrix = eval(self.trainmatrix) + deployPath = deployLocation.replace(os.sep, '/') + if survival_analysis_status: + output_json = {""status"":""SUCCESS"",""data"":{""ModelType"":modelType,""deployLocation"":deployPath,""BestModel"":model,""BestScore"":str(score),""ScoreType"":str(scoreParam).upper(),""matrix"":matrix,""survivalProbability"":json.loads(predict_json),""featuresused"":str(self.features),""targetFeature"":str(targetColumn),""EvaluatedModels"":model_tried,""imageLocation"":str(sa_images),""LogFile"":logFileName}} + elif not timeseriesStatus: + try: + json.dumps(params) + output_json = {""status"":""SUCCESS"",""data"":{""ModelType"":modelType,""deployLocation"":deployPath,""BestModel"":model,""BestScore"":str(score),""ScoreType"":str(scoreParam).upper(),""matrix"":matrix,""trainmatrix"":trainmatrix,""featuresused"":str(self.features),""targetFeature"":str(targetColumn),""params"":params,""EvaluatedModels"":model_tried,""LogFile"":logFileName}} + except: + output_json = {""status"":""SUCCESS"",""data"":{""ModelType"":modelType,""deployLocation"":deployPath,""BestModel"":model,""BestScore"":str(score),""ScoreType"":str(scoreParam).upper(),""matrix"":matrix,""trainmatrix"":trainmatrix,""featuresused"":str(self.features),""targetFeature"":str(targetColumn),""params"":"""",""EvaluatedModels"":model_tried,""LogFile"":logFileName}} + else: + if config_obj.summarize: + modelType = 'Summarization' + output_json = {""status"":""SUCCESS"",""data"":{""ModelType"":modelType,""deployLocation"":deployPath,""BestModel"":model,""BestScore"":str(score),""ScoreType"":str(scoreParam).upper(),""matrix"":matrix,""featuresused"":str(self.features),""targetFeature"":str(targetColumn),""EvaluatedModels"":model_tried,'forecasts':json.loads(forecast_output),""LogFile"":logFileName}} + if bool(topics) == True: + output_json['topics'] = topics + with open(outputjsonFile, 'w',encoding='utf-8') as f: + json.dump(output_json, f) + f.close() + output_json = json.dumps(output_json) + + log.info('\\n------------- Summary ------------') + log.info('------->No of rows & columns in data:('+str(self.dfrows)+','+str(self.dfcols)+')') + log.info('------->No of missing Features :'+str(len(self.mFeatures))) + log.info('------->Missing Features:'+str(self.mFeatures)) + log.info('------->Text Features:'+str(self.textFeatures)) + log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures))) + log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures)) + if threshold == -1: + log.info('------->Threshold: NA') + else: + log.info('------->Threshold: '+str(threshold)) + log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps)) + for i in range(0,len(self.similarGroups)): + log.info('------->Similar Groups '+str(i+1)+' '+str(self.similarGroups[i])) + if((learner_type != 'TS') & (learner_type != 'AR')): + log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape)) + log.info('------->Features Used for Modeling:'+str(self.features)) + log.info('------->Target Feature: '+str(targetColumn)) + log.info('------->Best Model Score :'+str(score)) + log.info('------->Best Parameters:'+str(params)) + log.info('------->Type of Model :'+str(modelType)) + log.info('------->Best Model :'+str(model)) + log.info('------------- Summary ------------\\n') + log.info('Status:-|... AION Model Training Successfully Done') + + except Exception as inst: + log.info('server code execution failed !....'+str(inst)) + log.error(inst, exc_info = True) + output_json = {""status"":""FAIL"",""message"":str(inst).strip('""'),""LogFile"":logFileName} + output_json = json.dumps(output_json) + + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + executionTime = timeit.default_timer() - startTime + log.info('\\nTotal execution time(sec) :'+str(executionTime)) + log.info('\\n------------- Output JSON ------------') + log.info('aion_learner_status:'+str(output_json)) + log.info('------------- Output JSON ------------\\n') + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + hdlr.close() + log.removeHandler(hdlr) + return output_json + + def split_into_train_test_data(self,featureData,targetData,testPercentage,log,modelType='classification'): #Unnati + log.info('\\n-------------- Test Train Split ----------------') + if testPercentage == 0 or testPercentage == 100: #Unnati + xtrain=featureData + ytrain=targetData + xtest=pd.DataFrame() + ytest=pd.DataFrame() + else: + testSize= testPercentage/100 #Unnati + if modelType == 'regression': + log.info('-------> Split Type: Random Split') + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42) + else: + try: + log.info('-------> Split Type: Stratify Split') + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,random_state=42) + except Exception as ValueError: + count_unique = targetData.value_counts() + feature_with_single_count = count_unique[ count_unique == 1].index.tolist() + error = f""The least populated class in {feature_with_single_count} has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2"" + raise Exception(error) from ValueError + + except: + log.info('-------> Split Type: Random Split') + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42) + + log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') #Unnati + log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->') + log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->') + log.info('-------------- Test Train Split End ----------------\\n') + return(xtrain,ytrain,xtest,ytest) + +def aion_train_model(arg): + warnings.filterwarnings('ignore') + config_path = Path( arg) + with open( config_path, 'r') as f: + config = json.load( f) + log = set_log_handler(config['basic']) + log.info('************* Version - v'+AION_VERSION+' *************** \\n') + msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone(""Asia/Kolkata"")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') + log.info(msg) + try: + config_validate(arg) + valid, msg = pushRecordForTraining() + if valid: + serverObj = server() + configObj = AionConfigManager() + codeConfigure = code_configure() + codeConfigure.create_config(config) + readConfistatus,msg = configObj.readConfigurationFile(config) + if(readConfistatus == False): + raise ValueError( msg) + output = serverObj.startScriptExecution(configObj, codeConfigure, log) + else: + output = {""status"":""LicenseVerificationFailed"",""message"":str(msg).strip('""')} + output = json.dumps(output) + print( f""\\naion_learner_status:{output}\\n"") + log.info( f""\\naion_learner_status:{output}\\n"") + except Exception as inst: + output = {""status"":""FAIL"",""message"":str(inst).strip('""')} + output = json.dumps(output) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(f""\\naion_learner_status:{output}\\n"") + log.info( f""\\naion_learner_" +"status:{output}\\n"") + return output + +if __name__ == ""__main__"": + aion_train_model( sys.argv[1]) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import shutil +import subprocess +import sys +import glob +import json + + +def publish(data): + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + model = jsonData['modelName'] + version = jsonData['modelVersion'] + deployFolder = jsonData['deployLocation'] + model = model.replace("" "", ""_"") + deployedPath = os.path.join(deployFolder,model+'_'+version) + deployedPath = os.path.join(deployedPath,'WHEELfile') + whlfilename='na' + if os.path.isdir(deployedPath): + for file in os.listdir(deployedPath): + if file.endswith("".whl""): + whlfilename = os.path.join(deployedPath,file) + if whlfilename != 'na': + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""uninstall"",""-y"",model]) + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""install"", whlfilename]) + + status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder']) + if status == 'Running': + service_stop(json.dumps(jsonData)) + service_start(json.dumps(jsonData)) + + output_json = {'status':""SUCCESS""} + output_json = json.dumps(output_json) + + else: + output_json = {'status':'Error','Msg':'Installation Package not Found'} + output_json = json.dumps(output_json) + return(output_json) +def check_service_running(model,serviceFolder): + model = model.replace("" "", ""_"") + filename = model+'_service.py' + modelservicefile = os.path.join(serviceFolder,filename) + status = 'File Not Exist' + ip = '' + port = '' + pid = '' + if os.path.exists(modelservicefile): + status = 'File Exist' + import psutil + for proc in psutil.process_iter(): + pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections']) + if 'python' in pinfo['name']: + if filename in pinfo['cmdline'][1]: + status = 'Running' + pid = pinfo['pid'] + for x in pinfo['connections']: + ip = x.laddr.ip + port = x.laddr.port + + + return(status,pid,ip,port) +def service_stop(data): + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder']) + if status == 'Running': + import psutil + p = psutil.Process(int(pid)) + p.terminate() + time.sleep(2) + output_json = {'status':'SUCCESS'} + output_json = json.dumps(output_json) + return(output_json) + +def service_start(data): + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + model = jsonData['modelName'] + version = jsonData['modelVersion'] + ip = jsonData['ip'] + port = jsonData['port'] + deployFolder = jsonData['deployLocation'] + serviceFolder = jsonData['serviceFolder'] + model = model.replace("" "", ""_"") + deployLocation = os.path.join(deployFolder,model+'_'+version) + org_service_file = os.path.abspath(os.path.join(os.path.dirname(__file__),'model_service.py')) + filename = model+'_service.py' + modelservicefile = os.path.join(serviceFolder,filename) + status = 'File Not Exist' + if os.path.exists(modelservicefile): + status = 'File Exist' + r = ([line.split() for line in subprocess.check_output(""tasklist"").splitlines()]) + for i in range(len(r)): + if filename in r[i]: + status = 'Running' + if status == 'File Not Exist': + shutil.copy(org_service_file,modelservicefile) + with open(modelservicefile, 'r+') as file: + content = file.read() + file.seek(0, 0) + line = 'from '+model+' import aion_performance' + file.write(line+""\\n"") + line = 'from '+model+' import aion_drift' + file.write(line+ ""\\n"") + line = 'from '+model+' import featureslist' + file.write(line+ ""\\n"") + line = 'from '+model+' import aion_prediction' + file.write(line+ ""\\n"") + file.write(content) + file.close() + status = 'File Exist' + if status == 'File Exist': + status,pid,ipold,portold = check_service_running(jsonData['modelName'],jsonData['serviceFolder']) + if status != 'Running': + command = ""python ""+modelservicefile+' '+str(port)+' '+str(ip) + os.system('start cmd /c ""'+command+'""') + time.sleep(2) + status = 'Running' + output_json = {'status':'SUCCESS','Msg':status} + output_json = json.dumps(output_json) + return(output_json) +if __name__ == ""__main__"": + aion_publish(sys.argv[1]) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import logging +logging.getLogger('tensorflow').disabled = True +#from autogluon.tabular import TabularDataset, TabularPredictor +#from autogluon.core.utils.utils import setup_outputdir +#from autogluon.core.utils.loaders import load_pkl +#from autogluon.core.utils.savers import save_pkl +import datetime, time, timeit +from datetime import datetime as dt +import os.path +import json +import io +import shutil +import sys + +#from Gluon_MultilabelPredictor import MultilabelPredictor + +class MultilabelPredictor(): + + """""" Tabular Predictor for predicting multiple columns in table. + Creates multiple TabularPredictor objects which you can also use individually. + You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)` + + Parameters + ---------- + labels : List[str] + The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object. + path : str + Path to directory where models and intermediate outputs should be saved. + If unspecified, a time-stamped folder called ""AutogluonModels/ag-[TIMESTAMP]"" will be created in the working directory to store all models. + Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all. + Otherwise files from first `fit()` will be overwritten by second `fit()`. + Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors. + problem_types : List[str] + The ith element is the `problem_type` for the ith TabularPredictor stored in this object. + eval_metrics : List[str] + The ith element is the `eval_metric` for the ith TabularPredictor stored in this object. + consider_labels_correlation : bool + Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others. + If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion). + Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels. + kwargs : + Arguments passed into the initialization of each TabularPredictor. + + """""" + + multi_predictor_file = 'multilabel_predictor.pkl' + + def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs): + if len(labels) < 2: + raise ValueError(""MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column)."") + self.path = setup_outputdir(path, warn_if_exist=False) + self.labels = labels + #print(self.labels) + self.consider_labels_correlation = consider_labels_correlation + self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label + if eval_metrics is None: + self.eval_metrics = {} + else: + self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))} + problem_type = None + eval_metric = None + for i in range(len(labels)): + label = labels[i] + path_i = self.path + ""Predictor_"" + label + if problem_types is not None: + problem_type = problem_types[i] + if eval_metrics is not None: + eval_metric = self.eval_metrics[i] + self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs) + + def fit(self, train_data, tuning_data=None, **kwargs): + """""" Fits a separate TabularPredictor to predict each of the labels. + + Parameters + ---------- + train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame + See documentation for `TabularPredictor.fit()`. + kwargs : + Arguments passed into the `fit()` call for each TabularPredictor. + """""" + if isinstance(train_data, str): + train_data = TabularDataset(train_data) + if tuning_data is not None and isinstance(tuning_data, str): + tuning_data = TabularDataset(tuning_data) + train_data_og = train_data.copy() + if tuning_data is not None: + tuning_data_og = tuning_data.copy() + save_metrics = len(self.eval_metrics) == 0 + for i in range(len(self.labels)): + label = self.labels[i] + predictor = self.get_predictor(label) + if not self.consider_labels_correlation: + labels_to_drop = [l for l in self.labels if l!=label] + else: + labels_to_drop = [self.labels[j] for j in range(i+1,len(self.labels))] + train_data = train_data_og.drop(labels_to_drop, axis=1) + if tuning_data is not None: + tuning_data = tuning_data_og.drop(labels_to_drop, axis=1) + print(f""Fitting TabularPredictor for label: {label} ..."") + predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs) + self.predictors[label] = predictor.path + if save_metrics: + self.eval_metrics[label] = predictor.eval_metric + self.save() + + def eval_metrics(self): + return(self.eval_metrics) + + def predict(self, data, **kwargs): + """""" Returns DataFrame with label columns containing predictions for each label. + + Parameters + ---------- + data : str or autogluon.tabular.TabularDataset or pd.DataFrame + Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`. + kwargs : + Arguments passed into the predict() call for each TabularPredictor. + """""" + return self._predict(data, as_proba=False, **kwargs) + + def predict_proba(self, data, **kwargs): + """""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label. + + Parameters + ---------- + data : str or autogluon.tabular.TabularDataset or pd.DataFrame + Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`. + kwargs : + Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call). + """""" + return self._predict(data, as_proba=True, **kwargs) + + def evaluate(self, data, **kwargs): + """""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label. + + Parameters + ---------- + data : str or autogluon.tabular.TabularDataset or pd.DataFrame + Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`. + kwargs : + Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call). + """""" + data = self._get_data(data) + eval_dict = {} + for label in self.labels: + print(f""Evaluating TabularPredictor for label: {label} ..."") + predictor = self.get_predictor(label) + eval_dict[label] = predictor.evaluate(data, **kwargs) + if self.consider_labels_correlation: + data[label] = predictor.predict(data, **kwargs) + return eval_dict + + def save(self): + """""" Save MultilabelPredictor to disk. """""" + for label in self.labels: + if not isinstance(self.predictors[label], str): + self.predictors[label] = self.predictors[label].path + save_pkl.save(path=self.path+self.multi_predictor_file, object=self) + print(f""MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path" +"}')"") + + @classmethod + def load(cls, path): + """""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """""" + path = os.path.expanduser(path) + if path[-1] != os.path.sep: + path = path + os.path.sep + return load_pkl.load(path=path+cls.multi_predictor_file) + + def get_predictor(self, label): + """""" Returns TabularPredictor which is used to predict this label. """""" + predictor = self.predictors[label] + if isinstance(predictor, str): + return TabularPredictor.load(path=predictor) + return predictor + + def _get_data(self, data): + if isinstance(data, str): + return TabularDataset(data) + return data.copy() + + def _predict(self, data, as_proba=False, **kwargs): + data = self._get_data(data) + if as_proba: + predproba_dict = {} + for label in self.labels: + print(f""Predicting with TabularPredictor for label: {label} ..."") + predictor = self.get_predictor(label) + if as_proba: + predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs) + data[label] = predictor.predict(data, **kwargs) + if not as_proba: + return data[self.labels] + else: + return predproba_dict + +def aion_train_gluon(arg): + configFile = arg + with open(configFile, 'rb') as cfile: + data = json.load(cfile) + cfile.close() + rootElement = data['basic'] + modelname = rootElement['modelName'] + version = rootElement['modelVersion'] + dataLocation = rootElement['dataLocation'] + deployFolder = rootElement['deployLocation'] + analysisType = rootElement['analysisType'] + testPercentage = data['advance']['testPercentage'] + deployLocation = os.path.join(deployFolder,modelname+'_'+version) + try: + os.makedirs(deployLocation) + except OSError as e: + shutil.rmtree(deployLocation) + os.makedirs(deployLocation) + logLocation = os.path.join(deployLocation,'log') + try: + os.makedirs(logLocation) + except OSError as e: + pass + etcLocation = os.path.join(deployLocation,'etc') + try: + os.makedirs(etcLocation) + except OSError as e: + pass + logFileName=os.path.join(deployLocation,'log','model_training_logs.log') + filehandler = logging.FileHandler(logFileName, 'w','utf-8') + formatter = logging.Formatter('%(message)s') + filehandler.setFormatter(formatter) + log = logging.getLogger('eion') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + log.info('************* Version - v1.2.0 *************** \\n') + msg = '-------> Execution Start Time: '+ dt.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + log.info(msg) + dataLabels = rootElement['targetFeature'].split(',') + + + # Create and Write the config file used in Prediction + # ----------------------------------------------------------------------------# + + tdata = TabularDataset(dataLocation) + #train_data = tdata + train_data = tdata.sample(frac = 0.8) + test_data = tdata.drop(train_data.index) + + if rootElement['trainingFeatures'] != '': + trainingFeatures = rootElement['trainingFeatures'].split(',') + else: + trainingFeatures = list(train_data.columns) + features = trainingFeatures + for x in dataLabels: + if x not in features: + features.append(x) + indexFeature = rootElement['indexFeature'] + if indexFeature != '': + indexFeature = indexFeature.split(',') + for x in indexFeature: + if x in features: + features.remove(x) + dateTimeFeature = rootElement['dateTimeFeature'] + if dateTimeFeature != '': + dateTimeFeature = dateTimeFeature.split(',') + for x in dateTimeFeature: + if x in features: + features.remove(x) + train_data = train_data[features] + test_data = test_data[features] + + + configJsonFile = {""targetFeature"":dataLabels,""features"":"","".join([feature for feature in features])} + configJsonFilePath = os.path.join(deployLocation,'etc','predictionConfig.json') + if len(dataLabels) == 1 and analysisType['multiLabelPrediction'] == ""False"": + dataLabels = rootElement['targetFeature'] + with io.open(configJsonFilePath, 'w', encoding='utf8') as outfile: + str_ = json.dumps(configJsonFile, ensure_ascii=False) + outfile.write(str_) + # ----------------------------------------------------------------------------# + if analysisType['multiLabelPrediction'] == ""True"": + + # Copy and Write the Predictiion script file into deployment location + # ----------------------------------------------------------------------------# + srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiLabelPrediction.py') + dstFile = os.path.join(deployLocation,'aion_predict.py') + shutil.copy(srcFile,dstFile) + # ----------------------------------------------------------------------------# + + labels = dataLabels # which columns to predict based on the others + #problem_types = dataProblem_types # type of each prediction problem + save_path = os.path.join(deployLocation,'ModelPath') # specifies folder to store trained models + time_limit = 5 # how many seconds to train the TabularPredictor for each label + log.info('Status:-|... AION Gluon Start') + try: + if len(labels) < 2: + log.info('Status:-|... AION Evaluation Error: Target should be multiple column') + # ----------------------------------------------------------------------------# + output = {'status':'FAIL','message':'Number of target variable should be 2 or more than 2'} + else: + multi_predictor = MultilabelPredictor(labels=labels, path=save_path) + multi_predictor.fit(train_data, time_limit=time_limit) + log.info('Status:-|... AION Gluon Stop') + log.info('Status:-|... AION Evaluation Start') + trainevaluations = multi_predictor.evaluate(train_data) + testevaluations = multi_predictor.evaluate(test_data) + + best_model = {} + for label in labels: + predictor_class = multi_predictor.get_predictor(label) + predictor_class.get_model_best() + best_model[label] = predictor_class.get_model_best() + + log.info('Status:-|... AION Evaluation Stop') + # ----------------------------------------------------------------------------# + output = {'status':'SUCCESS','data':{'ModelType':'MultiLabelPrediction','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'ACCURACY','deployLocation':deployLocation,'matrix':trainevaluations,'testmatrix':testevaluations,'BestModel':best_model, 'LogFile':logFileName}} + except Exception as inst: + log.info('Status:-|... AION Gluon Error') + output = {""status"":""FAIL"",""message"":str(inst).strip('""')} + if analysisType['multiModalLearning'] == ""True"": + from autogluon.core.utils.utils import get_cpu_count, get_gpu_count + from autogluon.text import TextPredictor + # check the system and then set the equivelent flag + # ----------------------------------------------------------------------------# + os.environ[""AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU""] = ""0"" + if get_gpu_count() == 0: + os.environ[""AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU""] = ""1"" + # ----------------------------------------------------------------------------# + # Copy and Write the Predictiion script file into deployment location + # ----------------------------------------------------------------------------# + srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiModalPrediction.py') + dstFile = os.path.join(deployLocation,'aion_predict.py') + shutil.copy(srcFile,dstFile) + time_limit = None # set to larger value in your applications + save_path = os.path.join(deployLocation,'text_prediction') + predictor = TextPredictor(label=dataLabels, path=save_path) + predictor.fit(train_data, time_limit=time_limit) + log.info('Status:-|... AION Gluon Stop') + log.info('Status:-|... AION Evaluation Start') + trainevaluations = predictor.evaluate(train_data) + log.info('Status:-|... AION Evaluation Stop') + # ----------------------------------------------------------------------------# + output = {'status':'SUCCESS','data':{'ModelType':'MultiModelLearning','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'SCORE','deployLocation':deployLocation,'matrix':trainevaluations,'LogFile':logFileName}} + + output = json.dumps(output) + print(""\\n"") + print(""aion_learner_status:"",output) + print(""\\n"") + log.info('\\n------------- Output JSON ------------') + log.info('-------> Output :'+str(output)) + log.info('------------- Output JSON ------------\\n') + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + hdlr.close() + log.removeHandler(hdlr) + + return(output) +if __name__ == ""__main__"": + aion_train_gluon(sys.argv[1]) import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__)))) + #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer +from http.server import BaseHTTPRequestHandler,HTTPServer +#from SocketServer import ThreadingMixIn +from socketserver import ThreadingMixIn +from functools import partial +from http.server import SimpleHTTPRequestHandler, test +import base64 +from appbe.dataPath import DEPLOY_LOCATION + +''' +from augustus.core.ModelLoader import ModelLoader +from augustus.strict import modelLoader +''' +import pandas as pd +import os,sys +from os.path import expanduser +import platform +import numpy as np +import configparser +import threading +import subprocess +import argparse +from functools import partial +import re +import cgi +from datetime import datetime +import json +import sys +from datetime import datetime +user_records = {} +class LocalModelData(object): + models = {} + +class HTTPRequestHandler(BaseHTTPRequestHandler): + def __init__(self, *args, **kwargs): + username = kwargs.pop(""username"") + password = kwargs.pop(""password"") + self._auth = base64.b64encode(f""{username}:{password}"".encode()).decode() + super().__init__(*args) + + def do_HEAD(self): + self.send_response(200) + self.send_header(""Content-type"", ""text/html"") + self.end_headers() + + def do_AUTHHEAD(self): + self.send_response(401) + self.send_header(""WWW-Authenticate"", 'Basic realm=""Test""') + self.send_header(""Content-type"", ""text/html"") + self.end_headers() + + def do_POST(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + if self.headers.get(""Authorization"") == None: + self.do_AUTHHEAD() + resp = ""Authentication Failed: Auth Header Not Present"" + resp=resp.encode() + self.wfile.write(resp) + elif self.headers.get(""Authorization"") == ""Basic "" + self._auth: + length = int(self.headers.get('content-length')) + #data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) + data = self.rfile.read(length) + #print(data) + #keyList = list(data.keys()) + #print(keyList[0]) + model = self.path.split('/')[-2] + operation = self.path.split('/')[-1] + home = expanduser(""~"") + #data = json.loads(data) + dataStr = data + model_path = os.path.join(DEPLOY_LOCATION,model) + isdir = os.path.isdir(model_path) + if isdir: + if operation.lower() == 'predict': + predict_path = os.path.join(model_path,'aion_predict.py') + outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resp = outputStr + elif operation.lower() == 'spredict': + try: + predict_path = os.path.join(model_path,'aion_spredict.py') + print(predict_path) + outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resp = outputStr + except Exception as e: + print(e) + + elif operation.lower() == 'features': + predict_path = os.path.join(model_path,'featureslist.py') + outputStr = subprocess.check_output([sys" +".executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resp = outputStr + elif operation.lower() == 'explain': + predict_path = os.path.join(model_path,'explainable_ai.py') + outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + elif operation.lower() == 'monitoring': + predict_path = os.path.join(model_path,'aion_ipdrift.py') + outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + elif operation.lower() == 'performance': + predict_path = os.path.join(model_path,'aion_opdrift.py') + outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + elif operation.lower() == 'pattern_anomaly_predict': + data = json.loads(data) + anomaly = False + remarks = '' + clusterid = -1 + configfilename = os.path.join(model_path,'datadetails.json') + filename = os.path.join(model_path,'clickstream.json') + clusterfilename = os.path.join(model_path,'stateClustering.csv') + probfilename = os.path.join(model_path,'stateTransitionProbability.csv') + + dfclus = pd.read_csv(clusterfilename) + dfprod = pd.read_csv(probfilename) + f = open(configfilename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + activity = configSettingsJson['activity'] + sessionid = configSettingsJson['sessionid'] + f = open(filename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + groupswitching = configSettingsJson['groupswitching'] + page_threshold = configSettingsJson['transitionprobability'] + chain_count = configSettingsJson['transitionsequence'] + chain_probability = configSettingsJson['sequencethreshold'] + currentactivity = data[activity] + if bool(user_records): + sessionid = data[sessionid] + + if sessionid != user_records['SessionID']: + user_records['SessionID'] = sessionid + prevactivity = '' + user_records['probarry'] = [] + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + else: + prevactivity = user_records['Activity'] + user_records['Activity'] = currentactivity + pageswitch = True + if prevactivity == currentactivity or prevactivity == '': + probability = 0 + pageswitch = False + remarks = '' + else: + user_records['pageclicks'] += 1 + df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] + if df1.empty: + remarks = 'Anomaly Detected - User in unusual state' + anomaly = True + clusterid = -1 + probability = 0 + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + avg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + else: + probability = df1['Probability'].iloc[0] + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + davg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + remarks = '' + if user_records['prevclusterid'] != -1: + if probability == 0 and user_records['prevclusterid'] != clusterid: + user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 + if user_records['pageclicks'] == 1: + remarks = 'Anomaly Detected - Frequent Cluster Hopping' + anomaly = True + else: + remarks = 'Cluster Hopping Detected' + user_records['pageclicks'] = 0 + if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: + remarks = 'Anomaly Detected - Multiple Cluster Hopping' + anomaly = True + elif probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + if pageswitch == True: + if probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + remarks = '' + if davg < float(chain_probability): + if anomaly == False: + remarks = 'Anomaly Detected - In-frequent Pattern Detected' + anomaly = True + else: + user_records['SessionID'] = data[sessionid] + user_records['Activity'] = data[activity] + user_records['probability'] = 0 + user_records['probarry'] = [] + user_records['chainprobability'] = 0 + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + user_records['prevclusterid'] = clusterid + outputStr = '{""status"":""SUCCESS"",""data"":{""Anomaly"":""'+str(anomaly)+'"",""Remarks"":""'+str(remarks)+'""}}' + elif operation.lower() == 'pattern_anomaly_settings': + data = json.loads(data) + groupswitching = data['groupswitching'] + transitionprobability = data['transitionprobability'] + transitionsequence = data['transitionsequence'] + sequencethreshold = data['sequencethreshold'] + filename = os.path.join(model_path,'clickstream.json') + data = {} + data['groupswitching'] = groupswitching + data['transitionprobability'] = transitionprobability + data['transitionsequence'] = transitionsequence + data['sequencethreshold'] = sequencethreshold + updatedConfig = json.dumps(data) + with open(filename, ""w"") as fpWrite: + fpWrite.write(updatedConfig) + fpWrite.close() + outputStr = '{""Status"":""SUCCESS""}' + else: + outputStr = ""{'Status':'Error','Msg':'Operation not supported'}"" + else: + outputStr = ""{'Status':'Error','Msg':'Model Not Present'}"" + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + self.do_AUTHHEAD() + self.wfile.write(self.headers.get(""Authorization"").encode()) + resp = ""Authentication Failed"" + resp=resp.encode() + self.wfile.write(resp) + else: + print(""python ==> else1"") + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + print(""PYTHON ######## REQUEST ####### ENDED"") + return + def getModelFeatures(self,modelSignature): + datajson = {'Body':'Gives the list of features'} + home = expanduser(""~"") + if platform.system() == 'Windows': + predict_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'featureslist.py') + else: + predict_path = os.path.join(home,'HCLT','AION','target',modelSignature,'featureslist.py') + if(os.path.isfile(predict_path)): + outputStr = subprocess.check_output([sys.executable,predict_path]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'features:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + displaymsg = outputStr + #displaymsg = json.dumps(displaymsg) + return(True,displaymsg) + else: + displaymsg = ""{'status':'ERROR','msg':'Unable to fetch featuers'}"" + return(False,displaymsg) + + def getFeatures(self,modelSignature): + datajson = {'Body':'Gives the list of features'} + urltext = '/AION/UseCase_Version/features' + if modelSignature != '': + status," +"displaymsg = self.getModelFeatures(modelSignature) + if status: + urltext = '/AION/'+modelSignature+'/features' + else: + displaymsg = json.dumps(datajson) + else: + displaymsg = json.dumps(datajson) + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Output: {displaymsg}. + """""".format(url=urltext,displaymsg=displaymsg) + return(msg) + + def features_help(self,modelSignature): + home = expanduser(""~"") + if platform.system() == 'Windows': + display_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'display.json') + else: + display_path = os.path.join(home,'HCLT','AION','target',modelSignature,'display.json') + #display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json') + datajson = {'Body':'Data Should be in JSON Format'} + if(os.path.isfile(display_path)): + with open(display_path) as file: + config = json.load(file) + file.close() + datajson={} + for feature in config['numericalFeatures']: + if feature != config['targetFeature']: + datajson[feature] = 'Numeric Value' + for feature in config['nonNumericFeatures']: + if feature != config['targetFeature']: + datajson[feature] = 'Category Value' + for feature in config['textFeatures']: + if feature != config['targetFeature']: + datajson[feature] = 'Category Value' + displaymsg = json.dumps(datajson) + return(displaymsg) + def predict_help(self,modelSignature): + if modelSignature != '': + displaymsg = self.features_help(modelSignature) + urltext = '/AION/'+modelSignature+'/predict' + else: + datajson = {'Body':'Data Should be in JSON Format'} + displaymsg = json.dumps(datajson) + urltext = '/AION/UseCase_Version/predict' + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} +Output: prediction,probability(if Applicable),remarks corresponding to each row. + """""".format(url=urltext,displaymsg=displaymsg) + return(msg) + def performance_help(self,modelSignature): + if modelSignature != '': + urltext = '/AION/'+modelSignature+'/performance' + else: + urltext = '/AION/UseCase_Version/performance' + datajson = {""trainingDataLocation"":""Reference Data File Path"",""currentDataLocation"":""Latest Data File Path""} + displaymsg = json.dumps(datajson) + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} +Output: HTML File Path."""""".format(url=urltext,displaymsg=displaymsg) + return(msg) + def monitoring_help(self,modelSignature): + if modelSignature != '': + urltext = '/AION/'+modelSignature+'/monitoring' + else: + urltext = '/AION/UseCase_Version/monitoring' + datajson = {""trainingDataLocation"":""Reference Data File Path"",""currentDataLocation"":""Latest Data File Path""} + displaymsg = json.dumps(datajson) + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} +Output: Affected Columns. HTML File Path."""""".format(url=urltext,displaymsg=displaymsg) + return(msg) + def explain_help(self,modelSignature): + if modelSignature != '': + displaymsg = self.features_help(modelSignature) + urltext = '/AION/'+modelSignature+'/explain' + else: + datajson = {'Body':'Data Should be in JSON Format'} + displaymsg = json.dumps(datajson) + urltext = '/AION/UseCase_Version/explain' + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} +Output: anchor (Local Explanation),prediction,forceplot,multidecisionplot."""""".format(url=urltext,displaymsg=displaymsg) + return(msg) + def help_text(self,modelSignature): + predict_help = self.predict_help(modelSignature) + explain_help = self.explain_help(modelSignature) + features_help = self.getFeatures(modelSignature) + monitoring_help = self.monitoring_help(modelSignature) + performance_help = self.performance_help(modelSignature) + msg="""""" +Following URL: + +Prediction +{predict_help} + +Local Explaination +{explain_help} + +Features +{features_help} + +Monitoring +{monitoring_help} + +Performance +{performance_help} +"""""".format(predict_help=predict_help,explain_help=explain_help,features_help=features_help,monitoring_help=monitoring_help,performance_help=performance_help) + return msg + + def do_GET(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/', self.path): + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + helplist = self.path.split('/')[-1] + print(helplist) + if helplist.lower() == 'help': + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model ='' + msg = self.help_text(model) + elif helplist.lower() == 'predict': + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model ='' + msg = self.predict_help(model) + elif helplist.lower() == 'explain': + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model ='' + msg = self.explain_help(model) + elif helplist.lower() == 'monitoring': + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model ='' + msg = self.monitoring_help(model) + elif helplist.lower() == 'performance': + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model ='' + msg = self.performance_help(model) + elif helplist.lower() == 'features': + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model ='' + status,msg = self.getModelFeatures(model) + else: + model = self.path.split('/')[-2] + if model.lower() == 'aion': + model =helplist + msg = self.help_text(model) + self.wfile.write(msg.encode()) + else: + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + return + +class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): + allow_reuse_address = True + + def shutdown(self): + self.socket.close() + HTTPServer.shutdown(self) + +class SimpleHttpServer(): + def __init__(self, ip, port,username,password): + handler_class = partial(HTTPRequestHandler,username=username,password=password,) + self.server = ThreadedHTTPServer((ip,port), handler_class) + + def start(self): + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.daemon = True + self.server_thread.start() + + def waitForThread(self): + self.server_thread.join() + + def stop(self): + self.server.shutdown() + self.waitForThread() + +def start_server(ip,port,username,password): + server = SimpleHttpServer(ip,int(port),username,password) + print('HTTP Server Running...........') + server.start() + server.waitForThread() + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import os +from pathlib import Path +os.chdir(Path(__file__).parent) +import json +import shutil +from mlac.timeseries import app as ts_app +from mlac.ml import app as ml_app +import traceback + +def create_test_file(config): + code_file = 'aionCode.py' + text = """""" +from pathlib import Path +import subprocess +import sys +import json +import argparse + +def run_pipeline(data_path): + print('Data Location:', data_path) + cwd = Path(__file__).parent + + monitor_file = str(cwd/'ModelMonitoring'/'{code_file}') + + load_file = str(cwd/'DataIngestion'/'{code_file}') + transformer_file = str(cwd/'DataTransformation'/'{code_file}') + selector_file = str(cwd/'FeatureEngineering'/'{code_file}') + train_folder = cwd + register_file = str(cwd/'ModelRegistry'/'{code_file}') + deploy_file = str(cwd/'ModelServing'/'{code_file}') + + print('Running modelMonitoring') + cmd = [sys.executable, monitor_file, '-i', data_path] + result = subprocess.check_output(cmd) + result = result.decode('utf-8') + print(result) + result = json.loads(result[result.find('{search}'):]) + if result['Status'] == 'Failure': + exit() + + print('Running dataIngestion') + cmd = [sys.executable, load_file] + result = subprocess.check_output(cmd) + result = result.decode('utf-8') + print(result) + result = json.loads(result[result.find('{search}'):]) + if result['Status'] == 'Failure': + exit() + + print('Running DataTransformation') + cmd = [sys.executable, transformer_file] + result = subprocess.check_output(cmd) + result = result.decode('utf-8') + print(result) + result = json.loads(result[result.find('{search}'):]) + if result['Status'] == 'Failure': + exit() + + print('Running FeatureEngineering') + cmd = [sys.executable, selector_file] + result = subprocess.check_output(cmd) + result = result.decode('utf-8') + print(result) + result = json.loads(result[result.find('{search}'):]) + if result['Status'] == 'Failure': + exit() + + train_models = [f for f in train_folder.iterdir() if 'ModelTraining' in f.name] + for model in train_models: + print('Running',model.name) + cmd = [sys.executable, str(model/'{code_file}')] + train_result = subprocess.check_output(cmd) + train_result = train_result.decode('utf-8') + print(train_result) + + print('Running ModelRegistry') + cmd = [sys.executable, register_file] + result = subprocess.check_output(cmd) + result = result.decode('utf-8') + print(result) + result = json.loads(result[result.find('{search}'):]) + if result['Status'] == 'Failure': + exit() + + print('Running ModelServing') + cmd = [sys.executable, deploy_file] + result = subprocess.check_output(cmd) + result = result.decode('utf-8') + print(result) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--inputPath', help='path of the input data') + args = parser.parse_args() + if args.inputPath: + filename = args.inputPath + else: + filename = r""{filename}"" + try: + print(run_pipeline(filename)) + except Exception as e: + print(e) +"""""".format(filename=config['dataLocation'],search='{""Status"":',code_file=code_file) + deploy_path = Path(config[""deploy_path""])/'MLaC' + deploy_path.mkdir(parents=True, exist_ok=True) + py_file = deploy_path/""run_pipeline.py"" + with open(py_file, ""w"") as f: + f.write(text) + +def is_module_in_req_file(mod, folder): + status = False + if (Path(folder)/'requirements.txt').is_file(): + with open(folder/'requirements.txt', 'r') as f: + status = mod in f.read() + return status + +def copy_local_modules(config): + deploy_path = Path(config[""deploy_path""]) + local_modules_location = config.get(""local_modules_location"", None) + if local_modules_location: + folder_loc = local_modules_location + else: + folder_loc = Path(__file__).parent/'local_modules' + if not folder_loc.exists(): + folder_loc = None + if folder_loc: + file = folder_loc/'config.json' + if file.exists(): + with open(file, 'r') as f: + data = json.load(f) + for key, values in data.items(): + local_module = folder_loc/key + if local_module.exists(): + for folder in values: + target_folder = Path(deploy_path)/'MLaC'/folder + if target_folder.is_dir(): + if is_module_in_req_file(key, target_folder): + shutil.copy(local_module," +"target_folder) + +def validate(config): + error = '' + if 'error' in config.keys(): + error = config['error'] + return error + +def generate_mlac_code(config): + with open(config, 'r') as f: + config = json.load(f) + error = validate(config) + if error: + raise ValueError(error) + if config['problem_type'] in ['classification','regression']: + return generate_mlac_ML_code(config) + elif config['problem_type'].lower() == 'timeseriesforecasting': #task 11997 + return generate_mlac_TS_code(config) + +def generate_mlac_ML_code(config): + try: + ml_app.run_loader(config) + ml_app.run_transformer(config) + ml_app.run_selector(config) + ml_app.run_trainer(config) + ml_app.run_register(config) + ml_app.run_deploy(config) + ml_app.run_drift_analysis(config) + copy_local_modules(config) + create_test_file(config) + status = {'Status':'SUCCESS','MLaC_Location':str(Path(config[""deploy_path""])/'MLaC')} + except Exception as Inst: + status = {'Status':'Failure','msg':str(Inst)} + traceback.print_exc() + status = json.dumps(status) + return(status) + +def generate_mlac_TS_code(config): + try: + ts_app.run_loader(config) + ts_app.run_transformer(config) + ts_app.run_selector(config) + ts_app.run_trainer(config) + ts_app.run_register(config) + ts_app.run_deploy(config) + ts_app.run_drift_analysis(config) + create_test_file(config) + status = {'Status':'SUCCESS','MLaC_Location':str(Path(config[""deploy_path""])/'MLaC')} + except Exception as Inst: + status = {'Status':'Failure','msg':str(Inst)} + traceback.print_exc() + status = json.dumps(status) + return(status) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import joblib +import time +import pandas as pd +import numpy as np +import argparse +import json +import os +import pathlib +from pathlib import Path +from uncertainties.uq_main import aionUQ +import os +from datetime import datetime +from os.path import expanduser +import platform +import logging + +class run_uq: + def __init__(self,modelfeatures,modelFile,csvFile,target): + self.modelfeatures=modelfeatures + self.modelFile=modelFile + self.csvFile=csvFile + self.target=target + ##UQ classification fn + def getUQclassification(self,model,ProblemName,Params): + df = pd.read_csv(self.csvFile) + + # # object_cols = [col for col, col_type in df.dtypes.iteritems() if col_type == 'object'] -- Fix for python 3.8.11 update (in 2.9.0.8) + object_cols = [col for col, col_type in zip(df.columns,df.dtypes) if col_type == 'object'] + + df = df.drop(object_cols, axis=1) + df = df.dropna(axis=1) + df = df.reset_index(drop=True) + modelfeatures = self.modelfeatures + #tar = args.target + # target = df[tar] + y=df[self.target].values + y = y.flatten() + X = df.drop(self.target, axis=1) + try: + uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,self.target) + accuracy,uq_ece,output_jsonobject=uqObj.uqMain_BBMClassification() + except Exception as e: + print(""uq error"",e) + # print(""UQ Classification: \\n"",output_jsonobject) + # print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per) + #print(output_jsonobject) + return accuracy,uq_ece,output_jsonobject + + ##UQ regression fn + def getUQregression(self,model,ProblemName,Params): + + df = pd.read_csv(self.csvFile) + modelfeatures = self.modelfeatures + dfp = df[modelfeatures] + tar = self.target + target = df[tar] + uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar) + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression() + return total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject + + def uqMain(self,model): + #print(""inside uq main.\\n"") + reg_status="""" + class_status="""" + algorithm_status="""" + try: + model=model + if Path(self.modelFile).is_file(): + ProblemName = model.__class__.__name__ + if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecisionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','GradientBoostingClassifier']: + Problemtype = 'Classification' + elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor']: + Problemtype = 'Regression' + else: + Problemtype = ""None"" + + if Problemtype.lower() == 'classification': + try: + Params = model.get_params() + accuracy,uq_ece,output = self.getUQclassification(model,ProblemName,Params) + class_status=""SUCCESS"" + #print(output) + except Exception as e: + print(e) + class_status=""FAILED"" + output = {'Problem':'None','msg':str(e)} + output = json.dumps(output) + elif Problemtype.lower() == 'regression' : + try: + Params = model.get_params() + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,output = self.getUQregression(model,ProblemName,Params) + #print(uq_jsonobject) + reg_status=""SUCCESS"" + except Exception as e: + output = {'Problem':'None','msg':str(e)} + output = json.dumps(output) + reg_status=""FAILED"" + else: + try: + output={} + output['Problem']=""None"" + output['msg']=""Uncertainty Quantification not supported for this algorithm."" + output = json.dumps(output) + algorithm_status=""FAILED"" + except: + algorithm_status=""FAILED"" + except Exception as e: + print(e) + reg_status=""FAILED"" + class_status=""FAILED"" + algorithm_status=""FAILED"" + output = {'Problem':'None','msg':str(e)} + output = json.dumps(output) + return class_status,reg_status,algorithm_status,output + + +def aion_uq(modelFile,dataFile,features,targetfeatures): + try: + from appbe.dataPath import DEPLOY_LOCATION + uqLogLocation = os.path.join(DEPLOY_LOCATION,'logs') + try: + os.makedirs(uqLogLocation) + except OSError as e: + if (os.path.exists(uqLogLocation)): + pass + else: + raise OSError('uqLogLocation error.') + + filename_uq = 'uqlog_'+str(int(time.time())) + filename_uq=filename_uq+'.log' + filepath = os.path.join(uqLogLocation, filename_uq) + print(filepath) + logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') + log = logging.getLogger('aionUQ') + log.setLevel(logging.INFO) + log.info('************* Version - v1.7.0 *************** \\n') + if isinstance(features, list): + modelfeatures = features + else: + if ',' in features: + modelfeatures = [x.strip() for x in features.split(',')] + else: + modelfeatures = features.split(',') + model = joblib.load(modelFile) + + uqobj = run_uq(modelfeatures,modelFile,dataFile,targetfeatures) + class_status,reg_status,algorithm_status,output=uqobj.uqMain(model) + if (class_status.lower() == 'failed'): + log.info('uq classifiction failed./n') + elif (class_status.lower() == 'success'): + log.info('uq classifiction success./n') + else: + log.info('uq classifiction not used../n') + + if (reg_status.lower() == 'failed'): + log.info('uq regression failed./n') + elif (reg_status.lower() == 'success'): + log.info('uq regression success./n') + else: + log.info('uq regression not used./n') + if (algorithm_status.lower() == 'failed'): + log.info('Problem type issue, UQ only support classification and regression. May be selected algorithm not supported by Uncertainty Quantification currently./n') + + except Exception as e: + log.info('uq test failed.n'+str(e)) + #print(e) + output = {'Problem':'None','msg':str(e)} + output = json.dumps(output) + + return(output) +#Sagemaker main fn call + +if __name__=='__main__': + + try: + parser = argparse.ArgumentParser() + parser.add_argument('savFile') + parser.add_argument('csvFile') + parser.add_argument('features') + parser.add_argument('target') + args = parser.parse_args() + home = expanduser(""~"") + if platform.system() == 'Windows': + uqLogLocation = os.path.join(home,'AppData','Local','HCLT','AION','uqLogs') + else: + uqLogLocation = os.path.join(home,'HCLT','AION','uqLogs') + + try: + os.makedirs(uqLogLocation) + except OSError as e: + if (os.path.exists(uqLogLocation)): + pass + else: + raise OSError('uqLogLocation error.') + # self.sagemakerLogLocation=str(sagemakerLogLocation) + filename_uq = 'uqlog_'+str(int(time.time())) + filename_uq=filename_uq+'.log' + # filename = 'mlopsLog_'+Time() + filepath = os.path.join(uqLogLocation, filename_uq) + logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') + log = logging.getLogger('aionUQ') + log.setLevel(logging.DEBUG) + + + if ',' in args.features: + args.features = [x.strip() for x in args.features.split(',')] + else: + args.features = args.features.split(',') + modelFile = args.savFile + modelfeatures = args.features + csvFile = args.csvFile + target=args.target + model = joblib.load(args.savFile) + ##Main uq function call + uqobj = run_uq(modelfeatures,modelFile,csvFile,target) + class_status,reg_status,algorithm_status,output=uqobj.uqMain(model) + + if (class_status.lower() == 'failed'): + log.info('uq classifiction failed./n') + elif (class_status.lower() == 'success'): + log.info('uq classifiction success./n') + else: + log.info('uq classifiction not used../n') + + if (reg_status.lower() == 'failed'): + log.info('uq regression failed./n') + elif (reg_status.lower() == 'success'): + log.info('uq regression success./n') + else: + log.info('uq regression not used./n') + if (algorithm_status.lower() == 'failed'): + msg = 'Uncertainty Quantification not supported for this algorithm' + log.info('Algorithm not supported by Uncertainty Quantification./n') + output = {'Problem':'None','msg':str(msg)} + output = json.dumps(output) + except Exception as e: + log.info('uq test failed.n'+str(e)) + output = {'Problem':'None','msg':str(e)} + output = json.dumps(output) + #print(e) + print(output) import json +import logging +import os +import shutil +import time +import sys +from sys import platform +from distutils.util import strtobool +from config_manager.pipeline_config import AionConfigManager +from summarizer import Summarizer +# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules. + +class AionTextManager: + + + def __init__(self): + self.log = logging.getLogger('eion') + self.data = '' + self.problemType = '' + self.basic = [] + self.advance=[] + + def readTextfile(self,dataPath): + #dataPath=self.[baisc][] + file = open(dataPath, ""r"") + data = file.read() + return data + #print(data) + + def generateSummary(self,data,algo,stype): + bert_model = Summarizer() + if stype == ""large"": + bert_summary = ''.join(bert_model(data, min_length=300)) + return(bert_summary) + elif stype == ""medium"": + bert_summary = ''.join(bert_model(data, min_length=150)) + return(bert_summary) + elif stype == ""small"": + bert_summary = ''.join(bert_model(data, min_length=60)) + return(bert_summary) + +def aion_textsummary(arg): + Obj = AionTextManager() + configObj = AionConfigManager() + readConfistatus,msg = configObj.readConfigurationFile(arg) + dataPath = configObj.getTextlocation() + text_data = Obj.readTextfile(data" +"Path) + getAlgo, getMethod = configObj.getTextSummarize() + summarize = Obj.generateSummary(text_data, getAlgo, getMethod) + output = {'status':'Success','summary':summarize} + output_json = json.dumps(output) + return(output_json) +if __name__ == ""__main__"": + aion_textsummary(sys.argv[1]) + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +import json +import datetime, time, timeit +import logging +logging.getLogger('tensorflow').disabled = True +import shutil +import warnings +from config_manager.online_pipeline_config import OTAionConfigManager +from records import pushrecords +import logging +import mlflow +from pathlib import Path +from pytz import timezone + +def pushRecordForOnlineTraining(): + try: + from appbe.pages import getversion + status,msg = pushrecords.enterRecord(AION_VERSION) + except Exception as e: + print(""Exception"", e) + status = False + msg = str(e) + return status,msg + +def mlflowSetPath(path,experimentname): + import mlflow + url = ""file:"" + str(Path(path).parent.parent) + ""/mlruns"" + mlflow.set_tracking_uri(url) + mlflow.set_experiment(str(experimentname)) + +class server(): + + def __init__(self): + self.response = None + self.dfNumCols=0 + self.dfNumRows=0 + self.features=[] + self.mFeatures=[] + self.emptyFeatures=[] + self.vectorizerFeatures=[] + self.wordToNumericFeatures=[] + self.profilerAction = [] + self.targetType = '' + self.matrix1='{' + self.matrix2='{' + self.matrix='{' + self.trainmatrix='{' + self.numericalFeatures=[] + self.nonNumericFeatures=[] + self.similarGroups=[] + self.method = 'NA' + self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.modelSelTopFeatures=[] + self.topFeatures=[] + self.allFeatures=[] + + + def startScriptExecution(self, config_obj): + rowfilterexpression = '' + grouperbyjson = '' + model_tried='' + learner_type = '' + topics = {} + numericContinuousFeatures='' + discreteFeatures='' + threshold=-1 + targetColumn = '' + categoricalFeatures='' + dataFolderLocation = '' + original_data_file = '' + profiled_data_file = '' + trained_data_file = '' + predicted_data_file='' + featureReduction = 'False' + reduction_data_file='' + params={} + score = 0 + labelMaps={} + featureDataShape=[] + self.riverModels = [] + self.riverAlgoNames = ['Online Logistic Regression', 'Online Softmax Regression', 'Online Decision Tree Classifier', 'Online KNN Classifier', 'Online Linear Regression', 'Online Bayesian Linear Regression', 'Online Decision Tree Regressor','Online KNN Regressor'] + + #ConfigSettings + iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings() + scoreParam = config_obj.getScoringCreteria() + datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures() + iterName = iterName.replace("" "", ""_"") + deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile = config_obj.createDeploymentFolders(deployLocation,iterName,iterVersion) + + #Mlflow + mlflowSetPath(deployLocation,iterName+'_'+iterVersion) + + + #Logger + filehandler = logging.FileHandler(logFileName, 'w','utf-8') + formatter = logging.Formatter('%(message)s') + filehandler.setFormatter(formatter) + log = logging.getLogger('eion') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + log.info('************* Version - v2.2.5 *************** \\n') + msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone(""Asia/Kolkata"")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') + log.info(msg) + + + startTime = timeit.default_timer() + try: + + output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}} + + #ConfigSetting + problemType,targetFeature,profilerStatus,selectorStatus,learnerStatus,visualizationstatus,deployStatus = config_obj.getModulesDetails() + selectorStatus = False + if(problemType.lower() in ['classification','regression']): + if(targetFeature == ''): + output = {""status"":""FAIL"",""message"":""Target Feature is Must for Classification and Regression Problem Type""} + return output + + #DataReading + from transformations.dataReader import dataReader + objData = dataReader() + if os.path.isfile(dataLocation): + dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier) + dataFrame.rename(columns=lambda x:x.strip(), inplace=True) + + + #FilterDataframe + filter = config_obj.getfilter() + if filter != 'NA': + dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame) + + #GroupDataframe + timegrouper = config_obj.gettimegrouper() + grouping = config_obj.getgrouper() + if grouping != 'NA': + dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame) + elif timegrouper != 'NA': + dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame) + + #KeepOnlyModelFtrs + dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature) + + log.info('\\n-------> First Ten Rows of Input Data: ') + log.info(dataFrame.head(10)) + self.dfNumRows=dataFrame.shape[0] + self.dfNumCols=dataFrame.shape[1] + + dataLoadTime = timeit.default_timer() - startTime + log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime)) + + if profilerStatus: + log.info('\\n================== Data Profiler has started ==================') + log.info('Status:-|... AION feature transformation started') + dp_mlstart = time.time() + profilerJson = config_obj.getEionProfilerConfigurarion() + log.info('-------> Input dataFrame(5 Rows): ') + log.info(dataFrame.head(5)) + log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape)) + from incremental.incProfiler import incProfiler + incProfilerObj = incProfiler() + dataFrame,targetColumn,self.mFeatures,self.numericalFeatures,self.nonNumericFeatures,labelMaps,self.configDict,self.textFeatures,self.emptyFeatures,self.wordToNumericFeatures = incProfilerObj.startIncProfiler(dataFrame,profilerJson,targetFeature,deployLocation,problemType) + self.features = self.configDict['allFtrs'] + log.info('-------> Data Frame Post Data Profiling(5 Rows): ') + log.info(dataFrame.head(5)) + log.info('Status:-|... AION feature transformation completed') + dp_mlexecutionTime=time.time() - dp_mlstart + log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime)) + log.info('================== Data Profiling completed ==================\\n') + + dataFrame.to_csv(profiled_data_file,index=False) + selectorStatus = False + if learnerStatus: + log.info('Status:-|... AION Learner data preparation started') + ldp_mlstart = time.time() + testPercentage = config_obj.getAIONTestTrainPercentage() + balancingMethod = config_obj.getAIONDataBalancingMethod() + from learner.machinelearning import machinelearning + mlobj = machinelearning() + modelType = problemType.lower() + targetColumn = targetFeature + if modelType == ""na"": + if self.targetType == 'categorical': + modelType = 'classification' + elif self.targetType == 'continuous': + modelType = 'regression' + datacolumns=list(dataFrame.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + features =datacolumns + featureData = dataFrame[features] + if targetColumn != '': + targetData = dataFrame[targetColumn] + xtrain,ytrain,xtest,ytest = mlobj.split_into_train_test_data(featureData,targetData,testPercentage,modelType) + categoryCountList = [] + if modelType == 'classification': + if(mlobj.checkForClassBalancing(ytrain) >= 1): + xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod) + valueCount=targetData.value_counts() + categoryCountList=valueCount.tolist() + ldp_mlexecutionTime=time.time() - ldp_mlstart + log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime)) + log.info('Status:-|... AION Learner data preparation completed') + if learnerStatus: + log.info('\\n================== ML Started ==================') + log.info('Status:-|... AION training started') + log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum())) + mlstart = time.time() + log.info('-------> Target Problem Type:'+ self.targetType) + learner_type = 'ML' + learnerJson = config_obj.getEionLearnerConfiguration() + log.info('-------> Target Model Type:'+ modelType) + modelParams,modelList = config_obj.getEionLearnerModelParams(modelType) + if(modelType == 'regression'): + allowedmatrix = ['mse','r2','rmse','mae'] + if(scoreParam.lower() not in allowedmatrix): + scoreParam = 'mse' + + if(modelType == 'classification'): + allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] + if(scoreParam.lower() not in allowedmatrix): + scoreParam = 'accuracy' + scoreParam = scoreParam.lower() + from incremental.incMachineLearning import incMachineLearning + incMlObj = incMachineLearning(mlobj) + self.configDict['riverModel'] = False + status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=incMlObj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,self.features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps) + if model in self.riverAlgoNames: + self.configDict['riverModel'] = True + if(self.matrix != '{'): + self.matrix += ',' + if(self.trainmatrix != '{'): + self.trainmatrix += ',' + self.trainmatrix += trainmatrix + self.matrix += matrix + mlexecutionTime=time.time() - mlstart + log.info('-------> Total ML Execution Time '+str(mlexecutionTime)) + log.info('Status:-|... AION training completed') + log.info('================== ML Completed ==================\\n') + + if visualizationstatus: + visualizationJson = config_obj.getEionVisualizationConfiguration() + log.info('Status:-|... AION Visualizer started') + visualizer_mlstart = time.time() + from visualization.visualization import Visualization + visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfNum" +"Rows,self.dfNumCols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file) + visualizationObj.visualizationrecommandsystem() + visualizer_mlexecutionTime=time.time() - visualizer_mlstart + log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime)) + log.info('Status:-|... AION Visualizer completed') + try: + os.remove(os.path.join(deployLocation,'aion_xai.py')) + except: + pass + + + if deployStatus: + if str(model) != 'None': + log.info('\\n================== Deployment Started ==================') + log.info('Status:-|... AION Deployer started') + deployPath = deployLocation + deployer_mlstart = time.time() + src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','useCaseFiles') + shutil.copy2(os.path.join(src,'incBatchLearning.py'),deployPath) + os.rename(os.path.join(deployPath,'incBatchLearning.py'),os.path.join(deployPath,'aion_inclearning.py')) + shutil.copy2(os.path.join(src,'incBatchPrediction.py'),deployPath) + os.rename(os.path.join(deployPath,'incBatchPrediction.py'),os.path.join(deployPath,'aion_predict.py')) + self.configDict['modelName'] = str(model) + self.configDict['modelParams'] = params + self.configDict['problemType'] = problemType.lower() + self.configDict['score'] = score + self.configDict['metricList'] = [] + self.configDict['metricList'].append(score) + self.configDict['trainRowsList'] = [] + self.configDict['trainRowsList'].append(featureDataShape[0]) + self.configDict['scoreParam'] = scoreParam + self.configDict['partialFit'] = 0 + with open(os.path.join(deployLocation,'production', 'Config.json'), 'w', encoding='utf8') as f: + json.dump(self.configDict, f, ensure_ascii=False) + deployer_mlexecutionTime=time.time() - deployer_mlstart + log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime)) + log.info('Status:-|... AION Batch Deployment completed') + log.info('================== Deployment Completed ==================') + + + # self.features = profilerObj.set_features(self.features,self.textFeatures,self.vectorizerFeatures) + self.matrix += '}' + self.trainmatrix += '}' + matrix = eval(self.matrix) + trainmatrix = eval(self.trainmatrix) + model_tried = eval('['+model_tried+']') + try: + json.dumps(params) + output_json = {""status"":""SUCCESS"",""data"":{""ModelType"":modelType,""deployLocation"":deployPath,""BestModel"":model,""BestScore"":str(score),""ScoreType"":str(scoreParam).upper(),""matrix"":matrix,""trainmatrix"":trainmatrix,""featuresused"":str(self.features),""targetFeature"":str(targetColumn),""params"":params,""EvaluatedModels"":model_tried,""LogFile"":logFileName}} + except: + output_json = {""status"":""SUCCESS"",""data"":{""ModelType"":modelType,""deployLocation"":deployPath,""BestModel"":model,""BestScore"":str(score),""ScoreType"":str(scoreParam).upper(),""matrix"":matrix,""trainmatrix"":trainmatrix,""featuresused"":str(self.features),""targetFeature"":str(targetColumn),""params"":"""",""EvaluatedModels"":model_tried,""LogFile"":logFileName}} + + print(output_json) + if bool(topics) == True: + output_json['topics'] = topics + with open(outputjsonFile, 'w') as f: + json.dump(output_json, f) + output_json = json.dumps(output_json) + + log.info('\\n------------- Summary ------------') + log.info('------->No of rows & columns in data:('+str(self.dfNumRows)+','+str(self.dfNumCols)+')') + log.info('------->No of missing Features :'+str(len(self.mFeatures))) + log.info('------->Missing Features:'+str(self.mFeatures)) + log.info('------->Text Features:'+str(self.textFeatures)) + log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures))) + log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures)) + if threshold == -1: + log.info('------->Threshold: NA') + else: + log.info('------->Threshold: '+str(threshold)) + log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps)) + if((learner_type != 'TS') & (learner_type != 'AR')): + log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape)) + log.info('------->Features Used for Modeling:'+str(self.features)) + log.info('------->Target Feature: '+str(targetColumn)) + log.info('------->Best Model Score :'+str(score)) + log.info('------->Best Parameters:'+str(params)) + log.info('------->Type of Model :'+str(modelType)) + log.info('------->Best Model :'+str(model)) + log.info('------------- Summary ------------\\n') + + + except Exception as inst: + log.info('server code execution failed !....'+str(inst)) + output_json = {""status"":""FAIL"",""message"":str(inst).strip('""')} + output_json = json.dumps(output_json) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + executionTime = timeit.default_timer() - startTime + log.info('\\nTotal execution time(sec) :'+str(executionTime)) + log.info('\\n------------- Output JSON ------------') + log.info('-------> Output :'+str(output_json)) + log.info('------------- Output JSON ------------\\n') + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + hdlr.close() + log.removeHandler(hdlr) + return output_json + + + + +def aion_ot_train_model(arg): + warnings.filterwarnings('ignore') + try: + valid, msg = pushRecordForOnlineTraining() + if valid: + serverObj = server() + configObj = OTAionConfigManager() + jsonPath = arg + readConfistatus,msg = configObj.readConfigurationFile(jsonPath) + if(readConfistatus == False): + output = {""status"":""FAIL"",""message"":str(msg).strip('""')} + output = json.dumps(output) + print(""\\n"") + print(""aion_learner_status:"",output) + print(""\\n"") + return output + output = serverObj.startScriptExecution(configObj) + else: + output = {""status"":""LicenseVerificationFailed"",""message"":str(msg).strip('""')} + output = json.dumps(output) + print(""\\n"") + print(""aion_learner_status:"",output) + print(""\\n"") + return output + except Exception as inst: + output = {""status"":""FAIL"",""message"":str(inst).strip('""')} + output = json.dumps(output) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(""\\n"") + print(""aion_learner_status:"",output) + print(""\\n"") + return output + +if __name__ == ""__main__"": + aion_ot_train_model(sys.argv[1]) + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import joblib +import time +from pandas import json_normalize +import pandas as pd +import numpy as np +import argparse +import json +import os +import pathlib +from pathlib import Path +from sagemaker.aionMlopsService import aionMlopsService +import logging +import os.path +from os.path import expanduser +import platform,sys +from pathlib import Path +from sklearn.model_selection import train_test_split + +def getAWSConfiguration(mlops_params,log): + awsId=mlops_params['awsSagemaker']['awsID'] + if ((not awsId) or (awsId is None)): + awsId="""" + log.info('awsId error. ') + awsAccesskeyid=mlops_params['awsSagemaker']['accesskeyID'] + if ((not awsAccesskeyid) or (awsAccesskeyid is None)): + awsAccesskeyid="""" + log.info('awsAccesskeyid error. ') + awsSecretaccesskey=mlops_params['awsSagemaker']['secretAccesskey'] + if ((not awsSecretaccesskey) or (awsSecretaccesskey is None)): + awsSecretaccesskey="""" + log.info('awsSecretaccesskey error. ') + awsSessiontoken=mlops_params['awsSagemaker']['sessionToken'] + if ((not awsSessiontoken) or (awsSessiontoken is None)): + awsSessiontoken="""" + log.info('awsSessiontoken error. ') + awsRegion=mlops_params['awsSagemaker']['region'] + if ((not awsRegion) or (awsRegion is None)): + awsRegion="""" + log.info('awsRegion error. ') + IAMSagemakerRoleArn=mlops_params['awsSagemaker']['IAMSagemakerRoleArn'] + if ((not IAMSagemakerRoleArn) or (IAMSagemakerRoleArn is None)): + IAMSagemakerRoleArn="""" + log.info('IAMSagemakerRoleArn error. ') + return awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn + +def getMlflowParams(mlops_params,log): + modelInput = mlops_params['modelInput'] + data = mlops_params['data'] + + mlflowtosagemakerDeploy=mlops_params['sagemakerDeploy'] + if ((not mlflowtosagemakerDeploy) or (mlflowtosagemakerDeploy is None)): + mlflowtosagemakerDeploy=""True"" + mlflowtosagemakerPushOnly=mlops_params['deployExistingModel']['status'] + if ((not mlflowtosagemakerPushOnly) or (mlflowtosagemakerPushOnly is None)): + mlflowtosagemakerPushOnly=""False"" + mlflowtosagemakerPushImageName=mlops_params['deployExistingModel']['dockerImageName'] + if ((not mlflowtosagemakerPushImageName) or (mlflowtosagemakerPushImageName is None)): + mlflowtosagemakerPushImageName=""mlops_image"" + mlflowtosagemakerdeployModeluri=mlops_params['deployExistingModel']['deployModeluri'] + if ((not mlflowtosagemakerdeployModeluri) or (mlflowtosagemakerdeployModeluri is None)): + mlflowtosagemakerdeployModeluri=""None"" + log.info('mlflowtosagemakerdeployModeluri error. ') + cloudInfrastructure = mlops_params['modelOutput']['cloudInfrastructure'] + if ((not cloudInfrastructure) or (cloudInfrastructure is None)): + cloudInfrastructure=""Sagemaker"" + endpointName=mlops_params['endpointName'] + if ((not endpointName) or (endpointName is None)): + sagemakerAppName=""aion-demo-app"" + log.info('endpointName not given, setting default one. ') + experimentName=str(endpointName) + mlflowContainerName=str(endpointName) + return modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName + +def getPredictionParams(mlops_params,log): + + predictStatus=mlops_params['prediction']['status'] + if ((not predictStatus) or (predictStatus is None)): + predictStatus=""False"" + modelInput = mlops_params['modelInput'] + data = mlops_params['data'] + if (predictStatus == ""True"" or predictStatus.lower()== ""true""): + if ((not modelInput) or (modelInput is None)): + log.info('prediction model input error.Please check given model file or its path for prediction ') + if ((not data) or (data is None)): + log.info('prediction data input error.Please check given data file or its path for prediction ') + targetFeature=mlops_params['prediction']['target'] + return predictStatus,targetFeature + +def sagemakerPrediction(mlopsobj,data,log): + df = json_normalize(data) + model=None + predictionStatus=False + try: + endpointPrediction=mlopsobj.predict_sm_app_endpoint(df) + + if (endpointPrediction is None): + log.info('Sagemaker endpoint application prediction Issue.') + outputjson = {""status"":""Error"",""msg"":""Sagemaker endpoint application prediction Issue""} + outputjson = json.dumps(outputjson) + #print(""predictions: ""+str(outputjson)) + predictionStatus=False + else: + log.info(""sagemaker end point Prediction: \\n""+str(endpointPrediction)) + df['prediction'] = endpointPred" +"iction + outputjson = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + outputjson = json.dumps(outputjson) + #print(""predictions: ""+str(outputjson)) + predictionStatus=True + except Exception as e: + #log.info(""sagemaker end point Prediction error: \\n"") + outputjson = {""status"":""Error"",""msg"":str(e)} + outputjson=None + predictionStatus=False + return outputjson,predictionStatus +## Main aion sagemaker fn call +def sagemaker_exec(mlops_params,log): + #mlops_params = json.loads(config) + mlops_params=mlops_params + modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName = getMlflowParams(mlops_params,log) + mlflowModelname=None + awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn = getAWSConfiguration(mlops_params,log) + predictStatus,targetFeature = getPredictionParams(mlops_params,log) + + sagemakerDeployOption='create' + deleteAwsecrRepository='False' + sagemakerAppName=str(endpointName) + ecrRepositoryName='aion-ecr-repo' + #aws ecr model app_name should contain only [[a-zA-Z0-9-]], again rechecking here. + import re + if sagemakerAppName: + pattern = re.compile(""[A-Za-z0-9-]+"") + # if found match (entire string matches pattern) + if pattern.fullmatch(sagemakerAppName) is not None: + #print(""Found match: "") + pass + else: + log.info('wrong sagemaker Application Name, Nmae should contains only [A-Za-z0-9-] .') + app_name = 'aion-demo-app' + else: + app_name = 'aion-demo-app' + #Following 3 aws parameter values are now hard coded , because currently we are not using. If aion using the options, please make sure to get the values from GUI . + sagemakerDeployOption=""create"" + deleteAwsecrRepository=""False"" + ecrRepositoryName=""aion_test_repo"" + log.info('mlops parameter check done.') + # predictionStatus=False + deploystatus = 'SUCCESS' + try: + log.info('cloudInfrastructure: '+str(cloudInfrastructure)) + if(cloudInfrastructure.lower() == ""sagemaker""): + ## sagemaker app prediction call + if (predictStatus.lower() == ""true""): + + # df = json_normalize(data) + model=None + mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName) + outputjson,predictionStatus = sagemakerPrediction(mlopsobj,data,log) + print(""predictions: ""+str(outputjson)) + predictionStatus=predictionStatus + return(outputjson) + else: + if Path(modelInput).is_file(): + + msg = '' + model = joblib.load(modelInput) + ProblemName = model.__class__.__name__ + mlflowModelname=str(ProblemName) + log.info('aion mlops Model name: '+str(mlflowModelname)) + df=None + mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName) + mlflow2sm_status,localhost_container_status=mlopsobj.mlflow2sagemaker_deploy() + log.info('mlflow2sm_status: '+str(mlflow2sm_status)) + log.info('localhost_container_status: '+str(localhost_container_status)) + # Checking deploy status + if (mlflowtosagemakerPushOnly.lower() == ""true"" ): + if (mlflow2sm_status.lower() == ""success""): + deploystatus = 'SUCCESS' + msg = 'Endpoint succesfully deployed in sagemaker' + log.info('Endpoint succesfully deployed in sagemaker (Push eisting model container).\\n ') + elif(mlflow2sm_status.lower() == ""failed""): + deploystatus = 'ERROR' + msg = 'Endpoint failed to deploy in sagemaker' + log.info('Endpoint failed to deploy in sagemaker. (Push eisting model container).\\n ') + else: + pass + elif(mlflowtosagemakerDeploy.lower() == ""true""): + if (mlflow2sm_status.lower() == ""success""): + deploystatus='SUCCESS' + msg = 'Endpoint succesfully deployed in sagemaker' + log.info('Endpoint succesfully deployed in sagemaker') + elif(mlflow2sm_status.lower() == ""failed""): + deploystatus = 'ERROR' + msg = 'Endpoint failed to deploy in sagemaker' + log.info('Endpoint failed to deploy in sagemaker.\\n ') + elif (mlflow2sm_status.lower() == ""Notdeployed""): + deploystatus= 'ERROR' + msg = 'Sagemaker compatible container created' + log.info('sagemaker endpoint not deployed, check aws connection and credentials. \\n') + + elif (mlflowtosagemakerDeploy.lower() == ""false""): + if(localhost_container_status.lower() == ""success""): + deploystatus = 'SUCCESS' + msg = 'Localhost mlops docker created successfully' + log.info('Localhost mlops docker created successfully. \\n') + elif(localhost_container_status.lower() == ""failed""): + deploystatus = 'ERROR' + msg = 'Localhost mlops docker created failed' + log.info('Localhost mlops docker creation failed. \\n') + elif (localhost_container_status.lower() == ""Notdeployed""): + deploystatus= 'ERROR' + log.info('Localhost mlops docker not deployed, check local docker status. \\n') + else: + pass + else: + pass + else: + deploystatus = 'ERROR' + msg = 'Model Path not Found' + print('Error: Model Path not Found') + outputjson = {""status"":str(deploystatus),""data"":str(msg)} + outputjson = json.dumps(outputjson) + print(""predictions: ""+str(outputjson)) + return(outputjson) + except Exception as inst: + outputjson = {""status"":str(deploystatus),""data"":str(msg)} + outputjson = json.dumps(outputjson) + print(""predictions: ""+str(outputjson)) + return(outputjson) + +def aion_sagemaker(config): + try: + mlops_params = config + print(mlops_params) + from appbe.dataPath import LOG_LOCATION + sagemakerLogLocation = LOG_LOCATION + try: + os.makedirs(sagemakerLogLocation) + except OSError as e: + if (os.path.exists(sagemakerLogLocation)): + pass + else: + raise OSError('sagemakerLogLocation error.') + + filename_mlops = 'mlopslog_'+str(int(time.time())) + filename_mlops=filename_mlops+'.log' + filepath = os.path.join(sagemakerLogLocation, filename_mlops) + logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') + log = logging.getLogger('aionMLOps') + log.setLevel(logging.DEBUG) + + output = sagemaker_exec(mlops_params,log) + return output + + except Exception as inst: + print(inst) + deploystatus = 'ERROR' + output = {""status"":str(deploystatus),""data"":str(inst)} + output = json.dumps(output) + print(""predictions: ""+str(output)) + return(output) + +#Sagemaker main fn call +if __name__=='__main__': + json_config = str(sys.argv[1]) + output = aion_sagemaker(json.loads(json_config)) + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import requests +import json +import os +from datetime import datetime +import socket +import getmac +def telemetry_data(operation,Usecase,data): + now = datetime.now() + ID = datetime.timestamp(now) + record_date = now.strftime(""%y-%m-%d %H:%M:%S"") + try: + user = os.getlogin() + except: + user = 'NA' + computername = socket.getfqdn() + macaddress = getmac.get_mac_address() + item = {} + item['ID'] = str(int(ID)) + item['record_date'] = record_date + item['UseCase'] = Usecase + item['user'] = str(user) + item['operation'] = operation + item['remarks'] = data + item['hostname'] = computername + item['macaddress'] = macaddress + url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' + record = {} + record['TableName'] = 'AION_OPERATION' + record['Item'] = item + record = json.dumps(record) + try: + response = requests.post(url, data=record,headers={""x-api-key"":""Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK"",""Content-Type"":""application/json"",}) + check_telemetry_file() + except Exception as inst: + filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') + f=open(filename, ""a+"") + f.write(record+'\\n') + f.close() + +def check_telemetry_file(): + file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') + if(os.path.isfile(file_path)): + f = open(file_path, 'r') + file_content = f.read() + f.close() + matched_lines = file_content.split('\\n') + write_lines = [] + url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' + for record in matched_lines: + try: + response = requests.post(url, data=record,headers={""x-api-key"":""Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK"",""Content-Type"":""application/json"",}) + except: + write_lines.append(record) + f = open(file_path, ""a"") + f.seek(0) + f.truncate() + for record in write_lines: + f.write(record+'\\n') + f.close() + return True + else: + return True ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +import json +import datetime,time,timeit +import itertools + +#Sci-Tools imports + +import numpy as np +import pandas as pd +import math +from statsmodels.tsa.stattools import adfuller +from scipy.stats.stats import pearsonr +from numpy import cumsum, log, polyfit, sqrt, std, subtract +from numpy.random import randn + +from sklearn.metrics import normalized_mutual_info_score +from sklearn.feature_selection import mutual_info_regression +import logging + +#SDP1 class import +from feature_engineering.featureImportance import featureImp +from feature_engineering.featureReducer import featureReducer +from sklearn.linear_model import Lasso, LogisticRegression +from sklearn.feature_selection import SelectFromModel +from sklearn.ensemble import ExtraTreesClassifier +from sklearn.decomposition import PCA +from sklearn.decomposition import TruncatedSVD +from sklearn.decomposition import FactorAnalysis +from sklearn.decomposition import FastICA +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.preprocessing import MinMaxScaler +from sklearn.feature_selection import RFE + +def ranking(ranks, names, order=1): + minmax = MinMaxScaler() + ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0] + ranks = map(lambda x: round(x,2), ranks) + return dict(zip(names, ranks)) + +# noinspection PyPep8Naming +class featureSelector(): + def __init__(self): + self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.log = logging.getLogger('eion') + + def startSelector(self,df,conf_json,textFeatures,targetFeature,problem_type): + try: + categoricalMaxLabel = int(conf_json['categoryMaxLabel']) + pca='None' + pcaReducerStatus = conf_json['featureEngineering']['PCA'] + svdReducerStatus = conf_json['featureEngineering']['SVD'] + factorReducerStatus = conf_json['featureEngineering']['FactorAnalysis'] + icaReducerStatus = conf_json['featureEngineering']['ICA'] + nfeatures=float(conf_json['featureEngineering']['numberofComponents']) + statisticalConfig = conf_json['statisticalConfig'] + " +"corrThresholdInput = float(statisticalConfig.get('correlationThresholdFeatures',0.50)) + corrThresholdTarget = float(statisticalConfig.get('correlationThresholdTarget',0.85)) + pValThresholdInput = float(statisticalConfig.get('pValueThresholdFeatures',0.05)) + pValThresholdTarget = float(statisticalConfig.get('pValueThresholdTarget',0.04)) + varThreshold = float(statisticalConfig.get('varianceThreshold',0.01)) + allFeaturesSelector = conf_json['featureSelection']['allFeatures'] + correlationSelector = conf_json['featureSelection']['statisticalBased'] + modelSelector = conf_json['featureSelection']['modelBased'] + featureSelectionMethod = conf_json['selectionMethod']['featureSelection'] + featureEngineeringSelector = conf_json['selectionMethod']['featureEngineering'] + if featureSelectionMethod == 'True': + featureEngineeringSelector = 'False' + + # if feature engineering is true then we check weather PCA is true or svd is true. By default we will run PCA + if featureEngineeringSelector == 'True': + if pcaReducerStatus == 'True': + svdReducerStatus = 'False' + factorReducerStatus=='False' + icaReducerStatus == 'False' + elif svdReducerStatus == 'True': + pcaReducerStatus = 'False' + factorReducerStatus=='False' + icaReducerStatus == 'False' + elif factorReducerStatus=='True': + pcaReducerStatus=='False' + svdReducerStatus=='False' + icaReducerStatus=='False' + elif icaReducerStatus=='True': + pcaReducerStatus==""False"" + svdReducerStatus==""False"" + factorReducerStatus==""False"" + else: + pcaReducerStatus = 'True' + if featureSelectionMethod == 'False' and featureEngineeringSelector == 'False': + featureSelectionMethod = 'True' + if featureSelectionMethod == 'True': + if modelSelector == 'False' and correlationSelector == 'False' and allFeaturesSelector == 'False': + modelSelector = 'True' + reductionMethod = 'na' + bpca_features = [] + #nfeatures = 0 + + if 'maxClasses' in conf_json: + maxclasses = int(conf_json['maxClasses']) + else: + maxClasses = 20 + target = targetFeature + self.log.info('-------> Feature: '+str(target)) + dataFrame = df + pThresholdInput=pValThresholdInput + pThresholdTarget=pValThresholdTarget + cThresholdInput=corrThresholdInput + cThresholdTarget=corrThresholdTarget + numericDiscreteFeatures=[] + similarGruops=[] + numericContinuousFeatures=[] + categoricalFeatures=[] + nonNumericFeatures=[] + apca_features = [] + dTypesDic={} + dataColumns = list(dataFrame.columns) + features_list = list(dataFrame.columns) + modelselectedFeatures=[] + topFeatures=[] + allFeatures=[] + targetType="""" + # just to make sure feature engineering is false + #print(svdReducerStatus) + if featureEngineeringSelector.lower() == 'false' and correlationSelector.lower() == ""true"" and len(textFeatures) <= 0: + reducerObj=featureReducer() + self.log.info(featureReducer.__doc__) + self.log.info('Status:- |... Feature reduction started') + updatedNumericFeatures,updatedFeatures,similarGruops=reducerObj.startReducer(dataFrame,dataColumns,target,varThreshold) + if len(updatedFeatures) <= 1: + self.log.info('=======================================================') + self.log.info('Most of the features are of low variance. Use Model based feature engineering for better result') + self.log.info('=======================================================') + raise Exception('Most of the features are of low variance. Use Model based feature engineering for better result') + dataFrame=dataFrame[updatedFeatures] + dataColumns=list(dataFrame.columns) + self.log.info('Status:- |... Feature reduction completed') + elif (pcaReducerStatus.lower() == ""true"" or svdReducerStatus.lower() == 'true' or factorReducerStatus.lower() == 'true' or icaReducerStatus.lower()=='true') and featureEngineeringSelector.lower() == 'true': + # check is PCA or SVD is true + pcaColumns=[] + #print(svdReducerStatus.lower()) + if target != """": + dataColumns.remove(target) + targetArray=df[target].values + targetArray.shape = (len(targetArray), 1) + if pcaReducerStatus.lower() == ""true"": + if nfeatures == 0: + pca = PCA(n_components='mle',svd_solver = 'full') + elif nfeatures < 1: + pca = PCA(n_components=nfeatures,svd_solver = 'full') + else: + pca = PCA(n_components=int(nfeatures)) + pca.fit(df[dataColumns]) + bpca_features = dataColumns.copy() + pcaArray=pca.transform(df[dataColumns]) + method = 'PCA' + elif svdReducerStatus.lower() == 'true': + if nfeatures < 2: + nfeatures = 2 + pca = TruncatedSVD(n_components=int(nfeatures), n_iter=7, random_state=42) + pca.fit(df[dataColumns]) + bpca_features = dataColumns.copy() + pcaArray=pca.transform(df[dataColumns]) + method = 'SVD' + elif factorReducerStatus.lower()=='true': + if int(nfeatures) == 0: + pca=FactorAnalysis() + else: + pca=FactorAnalysis(n_components=int(nfeatures)) + pca.fit(df[dataColumns]) + bpca_features = dataColumns.copy() + pcaArray=pca.transform(df[dataColumns]) + method = 'FactorAnalysis' + elif icaReducerStatus.lower()=='true': + if int(nfeatures) == 0: + pca=FastICA() + else: + pca=FastICA(n_components=int(nfeatures)) + pca.fit(df[dataColumns]) + bpca_features = dataColumns.copy() + pcaArray=pca.transform(df[dataColumns]) + method = 'IndependentComponentAnalysis' + pcaDF=pd.DataFrame(pcaArray) + #print(pcaDF) + for i in range(len(pcaDF.columns)): + pcaColumns.append(method+str(i)) + topFeatures=pcaColumns + apca_features= pcaColumns.copy() + if target != '': + pcaColumns.append(target) + scaledDf = pd.DataFrame(np.hstack((pcaArray, targetArray)),columns=pcaColumns) + else: + scaledDf = pd.DataFrame(pcaArray,columns=pcaColumns) + self.log.info(""<--- dataframe after dimensionality reduction using ""+method) + self.log.info(scaledDf.head()) + dataFrame=scaledDf + dataColumns=list(dataFrame.columns) + self.log.info('Status:- |... Feature reduction started') + self.log.info('Status:- |... '+method+' done') + self.log.info('Status:- |... Feature reduction completed') + + self.numofCols = dataFrame.shape[1] + self.numOfRows = dataFrame.shape[0] + + dataFDtypes=[] + for i in dataColumns: + dataType=dataFrame[i].dtypes + dataFDtypes.append(tuple([i,str(dataType)])) + #Categoring datatypes + for item in dataFDtypes: + dTypesDic[item[0]] = item[1] + if item[0] != target: + if item[1] in ['int16', 'int32', 'int64'] : + numericDiscreteFeatures.append(item[0]) + elif item[1] in ['float16', 'float32', 'float64']: + numericContinuousFeatures.append(item[0]) + else: + nonNumericFeatures.append(item[0]) + self.numOfRows = dataFrame.shape[0] + ''' + cFRatio = 0.01 + if(self.numOfRows < 1000): + cFRatio = 0.2 + elif(self.numOfRows < 10000): + cFRatio = 0.1 + elif(self.numOfRows < 100000): + cFRatio = 0.01 + ''' + for i in numericDiscreteFeatures: + nUnique=len(dataFrame[i].unique().tolist()) + nRows=self.numOfRows + if nUnique <= categoricalMaxLabel: + categoricalFeatures.append(i) + + for i in numericContinuousFeatures: + nUnique=len(dataFrame[i].unique().tolist()) + nRows=self.numOfRows + if nUnique <= categoricalMaxLabel: + categoricalFeatures.append(i) + + discreteFeatures=list(set(numericDiscreteFeatures)-set(categoricalFeatures)) + numericContinuousFeatures=list(set(numericContinuousFeatures)-set(categoricalFeatures)) + self.log.info('-------> Numerical continuous features :'+(str(numericContinuousFeatures))[:500]) + self.log.info('-------> Numerical discrete features :'+(str(discreteFeatures))[:500]) + self.log.info('-------> Non numerical features :'+(str(nonNumericFeatures))[:500]) + self.log.info('-------> Categorical Features :'+(str(categoricalFeatures))[:500]) + + if target !="""" and featureEngineeringSelector.lower() == ""false"" and correlationSelector.lower() == ""true"": + self.log.info('\\n------- Feature Based Correlation Analysis Start ------') + start = time.time() + featureImpObj = featureImp() + topFeatures,targetType= featureImpObj.FFImpNew(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThresholdInput,pThresholdTarget,cThresholdInput,cThresholdTarget,categoricalMaxLabel,problem_type,maxClasses) + #topFeatures,targetType= featureImpObj.FFImp(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThreshold,cThreshold,categoricalMaxLabel,problem_type,maxClasses) + self.log.info('-------> Highly Correlated Features Using Correlation Techniques'+(str(topFeatures))[:500]) + executionTime=time.time() - start + self.log.info('-------> Time Taken: '+str(executionTime)) + self.log.info('Status:- |... Correlation based feature selection done: '+str(len(topFeatures))+' out of '+str(len(dataColumns))+' selected') + self.log.info('------- Feature Based Correlation Analysis End ------>\\n') + if targetType == '': + if problem_type.lower() == 'classification': + targetType = 'categorical' + + if problem_type.lower() == 'regression': + targetType = 'continuous' + + + if target !="""" and featureEngineeringSelector.lower() == ""false"" and modelSelector.lower() == ""true"": + self.log.info('\\n------- Model Based Correlation Analysis Start -------') + start = time.time() + updatedFeatures = dataColumns + updatedFeatures.remove(target) + #targetType = problem_type.lower() + modelselectedFeatures=[] + if targetType == 'categorical': + try: + xtrain=dataFrame[updatedFeatures] + ytrain=dataFrame[target] + + etc = ExtraTreesClassifier(n_estimators=100) + + etc.fit(xtrain, ytrain) + rfe = RFE(etc, n_features_to_select=1, verbose =0 ) + rfe.fit(xtrain, ytrain) + # total list of features + ranks = {} + ranks[""RFE_LR""] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1) + + for item in ranks[""RFE_LR""]: + if ranks[""RFE_LR""][item]>0.30: #threshold as 30% + modelselectedFeatures.append(item) + modelselectedFeatures = list(modelselectedFe" +"atures) + self.log.info('-------> Highly Correlated Features Using Treeclassifier + RFE: '+(str(modelselectedFeatures))[:500]) + except Exception as e: + self.log.info('---------------->'+str(e)) + selector = SelectFromModel(ExtraTreesClassifier()) + xtrain=dataFrame[updatedFeatures] + ytrain=dataFrame[target] + selector.fit(xtrain,ytrain) + modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist() + self.log.info('-------> Highly Correlated Features Using Treeclassifier: '+(str(modelselectedFeatures))[:500]) + else: + try: + xtrain=dataFrame[updatedFeatures] + ytrain=dataFrame[target] + ls = Lasso() + ls.fit(xtrain, ytrain) + rfe = RFE(ls, n_features_to_select=1, verbose = 0 ) + rfe.fit(xtrain, ytrain) + # total list of features + ranks = {} + ranks[""RFE_LR""] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1) + + for item in ranks[""RFE_LR""]: + if ranks[""RFE_LR""][item]>0.30: #threshold as 30% + modelselectedFeatures.append(item) + modelselectedFeatures = list(modelselectedFeatures) + self.log.info('-------> Highly Correlated Features Using LASSO + RFE: '+(str(modelselectedFeatures))[:500]) + except Exception as e: + self.log.info('---------------->'+str(e)) + selector = SelectFromModel(Lasso()) + xtrain=dataFrame[updatedFeatures] + ytrain=dataFrame[target] + selector.fit(xtrain,ytrain) + modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist() + self.log.info('-------> Highly Correlated Features Using LASSO: '+(str(modelselectedFeatures))[:500]) + executionTime=time.time() - start + self.log.info('-------> Time Taken: '+str(executionTime)) + self.log.info('Status:- |... Model based feature selection done: '+str(len(modelselectedFeatures))+' out of '+str(len(dataColumns))+' selected') + self.log.info('--------- Model Based Correlation Analysis End -----\\n') + + if target !="""" and featureEngineeringSelector.lower() == ""false"" and allFeaturesSelector.lower() == ""true"": + allFeatures = features_list + if target != '': + allFeatures.remove(target) + #print(allFeatures) + if len(topFeatures) == 0 and len(modelselectedFeatures) == 0 and len(allFeatures) == 0: + allFeatures = features_list + + return dataFrame,target,topFeatures,modelselectedFeatures,allFeatures,targetType,similarGruops,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pca,bpca_features,apca_features,featureEngineeringSelector + except Exception as inst: + self.log.info('Feature selector failed: '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +import json +import datetime,time,timeit +import itertools + +#Sci-Tools imports + +import numpy as np +import pandas as pd +import math +from statsmodels.tsa.stattools import adfuller +from scipy.stats.stats import pearsonr +from numpy import cumsum, log, polyfit, sqrt, std, subtract +from numpy.random import randn +#SDP1 class import +from feature_engineering.featureImportance import featureImp +from sklearn.feature_selection import VarianceThreshold +import logging + +class featureReducer(): + def __init__(self): + self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.log = logging.getLogger('eion') + def startReducer(self,df,data_columns,target,var_threshold): + self.log.info('\\n---------- Feature Reducer Start ----------') + dataframe = df + columns=data_columns + target = target + corrThreshold=1.0 + categoricalFeatures=[] + nonNumericFeatures=[] + constFeatures=[] + qconstantColumns=[] + DtypesDic={} + numericFeatures=[] + nonNumericalFeatures=[] + similarFeatureGroups=[] + try: + dataFDtypes=self.dataFramecolType(dataframe) + for item in dataFDtypes: + DtypesDic[item[0]] = item[1] + if item[1] in self.pandasNumericDtypes: + numericFeatures.append(item[0]) + else: + nonNumericFeatures.append(item[0]) + #Checking for constant data features + for col in columns: + try: + distCount = len(dataframe[col].unique()) + if(distCount == 1): + constFeatures.append(col) + except Exception as inst: + self.log.info('Unique Testing Fail for Col '+str(col)) + + numericalDataCols,nonNumericalDataCols = [],[] + #Removing constant data features + if(len(constFeatures) != 0): + self.log.info( '-------> Constant Features: '+str(constFeatures)) + numericalDataCols = list(set(numericFeatures) - set(constFeatures)) + nonNumericalDataCols = list(set(nonNumericFeatures) - set(constFeatures)) + else: + numericalDataCols = list(set(numericFeatures)) + nonNumericalDataCols = list(set(nonNumericFeatures)) + if(len(numericalDataCols) > 1): + if var_threshold !=0: + qconstantFilter = VarianceThreshold(threshold=var_threshold) + tempDf=df[numericalDataCols] + qconstantFilter.fit(tempDf) + qconstantColumns = [column for column in numericalDataCols if column not in tempDf.columns[qconstantFilter.get_support()]] + + if(len(qconstantColumns) != 0): + if target != '' and target in qconstantColumns: + qconstantColumns.remove(target) + self.log.info( '-------> Low Variant Features: '+str(qconstantColumns)) + self.log.info('Status:- |... Low variance feature treatment done: '+str(len(qconstantColumns))+' low variance features found') + numericalDataCols = list(set(numericalDataCols) - set(qconstantColumns)) + else: + self.log.info('Status:- |... Low variance feature treatment done: Found zero or 1 numeric feature') + #Minimum of two columns required for data integration + if(len(numericalDataCols) > 1): + numColPairs = list(itertools.product(numericalDataCols, numericalDataCols)) + noDupList = [] + for item in numColPairs: + if(item[0] != item[1]): + noDupList.append(item) + numColPairs = noDupList + tempArray = [] + for item in numColPairs: + tempCorr = np.abs(dataframe[item[0]].corr(dataframe[item[1]])) + if(tempCorr > corrThreshold): + tempArray.append(item[0]) + tempArray = np.unique(tempArray) + nonsimilarNumericalCols = list(set(numericalDataCols) - set(tempArray)) + ''' + Notes: + tempArray: List of all similar/equal data features + nonsimilarNumericalCols: List of all non-correlatable data features + ''' + #Grouping similar/equal features + groupedFeatures = [] + if(len(numericalDataCols) != len(nonsimilarNumericalCols)): + #self.log.info( '-------> Similar/Equal Features: Not Any') + #Correlation dictionary + corrDic = {} + for feature in tempArray: + temp = [] + for col in tempArray: + tempCorr = np.abs(dataframe[feature].corr(dataframe[col])) + temp.append(tempCorr) + corrDic[feature] = temp + #Similar correlation dataframe + corrDF = pd.DataFrame(corrDic,index = tempArray) + corrDF.loc[:,:] = np.tril(corrDF, k=-1) + alreadyIn = set() + similarFeatures = [] + for col in corrDF: + perfectCorr = corrDF[col][corrDF[col] > corrThreshold].index.tolist() + if perfectCorr and col not in alreadyIn: + alreadyIn.update(set(perfectCorr)) + perfectCorr.append(col) + similarFeatures.append(perfectCorr) + self.log.info( '-------> No Similar/Equal Features: '+str(len(similarFeatures))) + for i in range(0,len(similarFeatures)): + similarFeatureGroups.append(similarFeatures[i]) + #self.log.info((str(i+1)+' '+str(similarFeatures[i]))) + self.log.info('-------> Similar/Equal Features: '+str(similarFeatureGroups)) + self.log.info('-------> Non Similar Features :'+str(nonsimilarNumericalCols)) + updatedSimFeatures = [] + for items in similarFeatures: + if(target != '' and target in items): + for p in items: + updatedSimFeatures.append(p) + else: + updatedSimFeatures.append(items[0]) + newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols)) + updatedNumFeatures = newTempFeatures + #self.log.info( '\\n <--- Merged similar/equal features into one ---> ') + updatedFeatures = list(set(newTempFeatures + nonNumericalDataCols)) + self.log.info('Status:- |... Similar feature treatment done: '+str(len(similarFeatures))+' similar features found') + else: + updatedNumFeatures = numericalDataCols + updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns)) + self.log.info( '-------> Similar/Equal Features: Not Any') + self.log.info('Status:- |... Similar feature treatment done: No similar features found') + else: + updatedNumFeatures = numericalDataCols + updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns)) + self.log.info( '\\n-----> Need minimum of two numerical features for data integration.') + self.log.info('Status:- |... Similar feature treatment done: Found zero or 1 numeric feature') + self.log.info('---------- Feature Reducer End ----------\\n') + + return updatedNumFeatures,updatedFeatures,similarFeatureGroups + except Exception as inst: + self.log.info(""feature Reducer failed ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return [],[] + + def dataFramecolType(self,dataFrame): + dataFDtypes=[] + try: + dataColumns=list(dataFrame.columns) + for i in dataColumns: + dataType=dataFrame[i].dtypes + dataFDtypes.append(tuple([i,str(dataType)])) + return dataFDtypes + except: + self.log.info(""error in dataFramecolyType"") + return dataFDtypes + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +'''" +"''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +#System imports +import os +import sys +import json +import datetime,time,timeit +import itertools + +#Sci-Tools imports + +import numpy as np +import pandas as pd +import math + + +from sklearn.metrics import normalized_mutual_info_score +from sklearn.feature_selection import f_regression,mutual_info_regression +from sklearn.feature_selection import chi2,f_classif,mutual_info_classif + +import scipy.stats +from scipy.stats import pearsonr, spearmanr, pointbiserialr, f_oneway, kendalltau, chi2_contingency +import statsmodels.api as sm +import statsmodels.formula.api as smf +import logging + +def getHigherSignificanceColName(featureDict, colname1, colname2): + if featureDict[colname1] Change Target Type to Categorial as user defined') + + if problem_type.lower() == 'regression' and targetType == 'categorical': + targetType = 'continuous' + self.log.info( '-------> Change Target Type to Continuous as user defined') + self.log.info( '-------> Target Type: '+str(targetType)) + + impFeatures=[] + + + catFeature = [] + numFeature = [] + + catFeatureXYcat = [] + numFeatureXYcat = [] + catFeatureXYnum= [] + numFeatureXYnum = [] + + dropFeatureCat= [] + dropFeatureNum = [] + + featureDict = {} + + + + if targetType ==""categorical"": + + if len(categoricalFeatures) !=0: + # input vs target + # chi-square + for col in categoricalFeatures: + contingency = pd.crosstab(dataframe[col], targetData) + stat, p, dof, expected = chi2_contingency(contingency) + if p <= pValThTarget: + catFeatureXYcat.append(col) # categorical feature xy when target is cat + featureDict[col] = p + + #input vs input + # chi_square + if len(catFeatureXYcat) != 0: + length = len(catFeatureXYcat) + for i in range(length): + for j in range(i+1, length): + contingency = pd.crosstab(dataframe[catFeatureXYcat[i]], dataframe[catFeatureXYcat[j]]) + stat, p, dof, expected = chi2_contingency(contingency) + if p > pValThInput: + highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYcat[i], catFeatureXYcat[j]) + dropFeatureCat.append(highSignificanceColName) + break + + + catFeature = list(set(catFeatureXYcat) - set(dropFeatureCat)) + featureDict.clear() + dropFeatureCat.clear() + if len(quantFeatures) !=0: + # input vs target + # one way anova + for col in quantFeatures: + CategoryGroupLists = dataframe.groupby(target)[col].apply(list) + AnovaResults = f_oneway(*CategoryGroupLists) + if AnovaResults[1] <= pValThTarget: + numFeatureXYcat.append(col) #numeric feature xy when target is cat + featureDict[col] = AnovaResults[1] + + + #input vs input + # preason/spearman/ols # numeric feature xx when target is cat + if len(numFeatureXYcat) != 0: + df_xx = dataframe[numFeatureXYcat] + rows, cols = df_xx.shape + flds = list(df_xx.columns) + + + corr_pearson = df_xx.corr(method='pearson').values + corr_spearman = df_xx.corr(method='spearman').values + + + for i in range(cols): + for j in range(i+1, cols): + if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput: + if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput: + #f = ""'""+flds[i]+""'""+' ~ '+""'""+flds[j]+""'"" + #reg = smf.ols(formula=f, data=dataframe).fit() + tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]}) + reg = smf.ols('y~x', data=tmpdf).fit() + if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput: + highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) + dropFeatureNum.append(highSignificanceColName) + break + + else: + highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) + dropFeatureNum.append(highSignificanceColName) + break + else: + highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) + dropFeatureNum.append(highSignificanceColName) + break + + + + numFeature = list(set(numFeatureXYcat) - set(dropFeatureNum)) + dropFeatureNum.clear() + featureDict.clear() + + impFeatures = numFeature+catFeature + hCorrFeatures=list(set((impFeatures))) + + + else: # targetType ==""continuous"": + if len(categoricalFeatures) !=0: + # input vs target + # Anova + for col in categoricalFeatures: + #f = target+' ~ C('+col+')' + #model = smf.ols(f, data=dataframe).fit() + #table = sm.stats.anova_lm(model, typ=2) + tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]}) + model = smf.ols('y~x', data=tmpdf).fit() + table = sm.stats.anova_lm(model, typ=2) + if table['PR(>F)'][0] <= pValThTarget: + catFeatureXYnum.append(col) #categorical feature xy when target is numeric + featureDict[col]=table['PR(>F)'][0] + #input vs input + # chi_square + if len(catFeatureXYnum) != 0: + length = len(catFeatureXYnum) + for i in range(length): + for j in range(i+1, length): + contingency = pd.crosstab(dataframe[catFeatureXYnum[i]], dataframe[catFeatureXYnum[j]]) + stat, p, dof, expected = chi2_contingency(contingency) + if p > pValThInput: + highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYnum[i], catFeatureXYnum[j]) + dropFeatureCat.append(highSignificanceColName) + break + catFeature = list(set(catFeatureXYnum) - set(dropFeatureCat)) + dropFeatureCat.clear() + featureDict.clear() + if len(quantFeatures) !=0: + # input vs target + # preason/spearman/ols + for col in quantFeatures: + pearson_corr = pearsonr(dataframe[col], targetData) + coef = round(pearson_corr[0],5) + p_value = round(pearson_corr[1],5) + if coef > -corrThTarget and coef < corrThTarget: + spearman_corr = spearmanr(dataframe[col], targetData) + coef = round(spearman_corr[0],5) + p_value = round(spearman_corr[1],5) + if coef > -corrThTarget and coef < corrThTarget: + #f = target+' ~ '+col + #reg = smf.ols(formula=f, data=dataframe).fit() + tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]}) + reg = smf.ols('y~x', data=tmpdf).fit() + if len(reg.pvalues) > 1 and reg.pvalues[1] <= pValThTarget: + numFeatureXYnum.append(col) # numeric feature xx when target is numeric + featureDict[col]=reg.pvalues[1] + else: + numFeatureXYnum.append(col) + featureDict[col]=p_value + else: + numFeatureXYnum.append(col) + featureDict[col]=p_value + #input vs input + # preason/spearman/ols + if len(numFeatureXYnum) != 0: + df_xx = dataframe[numFeatureXYnum] + rows, cols = df_xx.shape + flds = list(df_xx.columns) + corr_pearson = df_xx.corr(method='pearson').values + corr_spearman = df_xx.corr(method='spearman').values + + + for i in range(cols): + for j in range(i+1, cols): + if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput: + if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput: + #f = flds[i]+' ~ '+flds[j] + #reg = smf.ols(formula=f, data=dataframe).fit() + tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]}) + reg = smf.ols('y~x', data=tmpdf).fit() + + if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput: + highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) + dropFeatureNum.append(highSignificanceColName) + break + " +" + else: + highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) + dropFeatureNum.append(highSignificanceColName) + break + else: + highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) + dropFeatureNum.append(highSignificanceColName) + break + + + numFeature = list(set(numFeatureXYnum) - set(dropFeatureNum)) + featureDict.clear() + dropFeatureNum.clear() + + impFeatures = numFeature+catFeature + hCorrFeatures=list(set(impFeatures)) + + + + + return hCorrFeatures,targetType + except Exception as inst: + self.log.info( '\\n--> Failed calculating feature importance '+str(inst)) + hCorrFeatures=[] + targetType='' + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + self.log.info('\\n--> Taking all the features as highest correlation features') + hCorrFeatures = list(dataframe.columns) + return hCorrFeatures,targetType + + + def FFImp(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValTh,corrTh,categoricalMaxLabel,problem_type,maxClasses): + ''' + Input: dataframe, numeric continuous features, numeric discrete features + Output: feature importance dictionary + ''' + try: + dataframe =df + contiFeatures= contFeatures + discreteFeatures = discreteFeatures + nonNumeric = nonNumericFeatures + categoricalFeatures=categoricalFeatures + self.dTypesDic = dTypesDic + numericFeatures = contiFeatures + discreteFeatures+categoricalFeatures + quantFeatures=discreteFeatures+contiFeatures + scorrDict={} + fScoreDict={} + pcorrDict={} + miDict={} + targetData=dataframe[target] + data=dataframe[numericFeatures] + nUnique=len(targetData.unique().tolist()) + nRows=targetData.shape[0] + ''' + print(""\\n ===> nUnique :"") + print(nUnique) + print(""\\n ===> nRows :"") + print(nRows) + print(""\\n ===> cFRatio :"") + print(cFRatio) + print(""\\n ===> nUnique/nRows :"") + ''' + #calratio = nUnique + + self.log.info( '-------> Target Column Unique Stats: '+str(nUnique)+' nRows: '+str(nRows)+' Unique:'+str(nUnique)) + #sys.exit() + if nUnique <= categoricalMaxLabel: + targetType=""categorical"" + else: + targetType=""continuous"" + + if problem_type.lower() == 'classification' and targetType == 'continuous': + targetType = 'categorical' + self.log.info( '-------> Change Target Type to Categorial as user defined') + + if problem_type.lower() == 'regression' and targetType == 'categorical': + targetType = 'continuous' + self.log.info( '-------> Change Target Type to Continuous as user defined') + self.log.info( '-------> Target Type: '+str(targetType)) + impFeatures=[] + featureImpDict={} + if targetType ==""categorical"": + try: + if len(categoricalFeatures) !=0: + categoricalData=dataframe[categoricalFeatures] + chiSqCategorical=chi2(categoricalData,targetData)[1] + corrSeries=pd.Series(chiSqCategorical, index=categoricalFeatures) + impFeatures.append(corrSeries[corrSeriescorrTh].index.tolist()) + featureImpDict['anovaPValue']=fClassSeries.to_dict() + featureImpDict['MIScore']=miClassSeries.to_dict() + except MemoryError as inst: + self.log.info( '-------> MemoryError in feature selection. '+str(inst)) + + pearsonScore=dataframe.corr() + + targetPScore=abs(pearsonScore[target]) + impFeatures.append(targetPScore[targetPScorecorrTh].index.tolist()) + featureImpDict['anovaPValue']=fregSeries.to_dict() + featureImpDict['MIScore']=miregSeries.to_dict() + except MemoryError as inst: + self.log.info( '-------> MemoryError in feature selection. '+str(inst)) + + pearsonScore=dataframe.corr() + targetPScore=abs(pearsonScore[target]) + impFeatures.append(targetPScore[targetPScore Failed calculating feature importance '+str(inst)) + hCorrFeatures=[] + targetType='' + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + return hCorrFeatures,targetType + + ''' + Importance degree + Computes set of relational parameters + pearson correlation, mutual information + ''' + def importanceDegree(self,dataframe,feature1,feature2): + try: + tempList = [] + #Parameter 1: pearson correlation + pcorr = self.pearsonCoff(dataframe,feature1,feature2) + tempList.append(pcorr) + #Parameter 2: mutual information + #Testing + mi = self.mutualInfo(dataframe,feature1,feature2,self.dTypesDic) + tempList.append(mi) + #return the highest parameter + return np.max(tempList) + except: + return 0.0 + + ''' + Compute pearson correlation + ''' + def pearsonCoff(self,dataframe,feature1,feature2): + try: + value=dataframe[feature1].corr(dataframe[feature2]) + return np.abs(value) + except: + return 0.0 + + ''' + Compute mutual information + ''' + def mutualInfo(self,dataframe,feature1,feature2,typeDic): + try: + numType = {'int64': 'discrete','int32' : 'discrete','int16' : 'discrete','float16' : 'continuous','float32' : 'continuous','float64' : 'continuous'} + featureType1 = numType[typeDic[feature1]] + featureType2 = numType[typeDic[feature2]] + bufferList1=dataframe[feature1].values.tolist() + bufferList2=dataframe[feature2].values.tolist() + #Case 1: Only if both are discrete + if(featureType1 == 'discrete' and featureType2 == 'discrete'): + tempResult = discreteMI(bufferList1,bufferList2) + return np.mean(tempResult) + #Case 2: If one of the features is continuous + elif(featureType1 == 'continuous' and featureType2 == 'discrete'): + tempResult = self.categoricalMI(bufferList1,bufferList2) + return np.mean(tempResult) + else: + tempResult = self.continuousMI(bufferList1,bufferList2) + return np.mean(tempResult) + except: + return 0.0 + + + def continuousMI(self,bufferList1,bufferList2): + mi = 0.0 + #Using mutual info regression from feature selection + mi = mutual_info_regression(self.vec(bufferList1),bufferList2) + return mi + + def categoricalMI(self,bufferList1,bufferList2): + mi = 0.0 + #Using mutual info classification from feature selection + mi = mutual_info_classif(self.vec(bufferList1),bufferList2) + return mi + + def discreteMI(self,bufferList1,bufferList2): + mi = 0.0 + #Using scikit normalized mutual information function + mi = normalized_mutual_info_score(bufferList1,bufferList2) + return mi + + def vec(self,x): + return [[i] for i in x] + + + + + +import pandas as pd +import numpy as np +from appbe.eda import ux_eda +from sklearn.preprocessing import LabelEncoder +import json +import matplotlib.pyplot as plt +import os +import mpld3 +import subprocess +import os +import sys +import re +import json +import pandas as pd + +from appbe.eda import ux_eda +from aif360.datasets import StandardDataset +from aif360.metrics import ClassificationMetric +from aif360.datasets import BinaryLabelDataset + +def get_metrics(request): + dataFile = os.path.join(request.session['deploypath'], ""data"", ""preprocesseddata.csv.gz"") + predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py') + displaypath = os.path.join(request.session['deploypath'], ""etc"", ""display.json"") + f = open(displaypath, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + Target_feature = configSettings['targetFeature'] + + outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + predict_dict = json.loads(outputStr) + + df = pd.read_csv(dataFile) + df_p = pd.DataFrame.from_dict(predict_dict['data']) + + d3_url = request.GET.get('d3_url') + mpld3_url = request.GET.get('mpld3_url') + df_temp = request.GET.get('feature') + global metricvalue + metricvalue = request.GET.get('metricvalue') + + Protected_feature = df_temp + + df_p = df_p.drop(columns=[Target_feature, 'remarks', 'probability']) + df_p.rename(columns={'prediction': Target_feature}, inplace=True) + + + eda_obj = ux_eda(dataFile, optimize=1) + features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() + + features_to_Encode = features + + categorical_names = {} + encoders = {} + + for feature in features_to_Encode: + le = LabelEncoder() + le.fit(df[feature]) + df[feature] = le.transform(df[feature]) + le.fit(df_p[feature]) + df_p[feature] = le.transform(df_p[feature]) + categorical_names[feature] = le.classes_ + encoders[feature]" +"= le + + new_list = [item for item in categorical_names[Protected_feature] if not(pd.isnull(item)) == True] + claas_size = len(new_list) + + if claas_size > 10: + return 'HeavyFeature' + + metrics = fair_metrics(categorical_names" +"satype.lower() == 'first': + S = Si['S1'] + else: + S = Si['ST'] + return S + except Exception as e: + print('Error in calculating Si for Regression: ', str(e)) + raise ValueError(str(e)) + + + def plotSi(self, S, saType): + try: + import matplotlib.pyplot as plt + if saType.lower() == 'first': + title, label = 'Sensitivity Analysis', 'First order' + else: + title, label = 'Sensitivity Analysis', 'Total order' + x = np.arange(len(self.problem['names'])) + width = 0.35 + fig, ax = plt.subplots() + ax.bar(x - width / 2, S, width, label=label) + ax.set_xticks(x) + ax.set_xlabel('Features') + ax.set_ylabel('Sensitivity Indices') + ax.set_title(title) + ax.set_xticklabels(self.problem['names'], rotation=45, ha=""right"") + ax.legend() + + plt.tight_layout() + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + SAimage = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as e: + print(e) + SAimage = '' + return SAimage + +def checkModelType(modelName): + isML= False + isDL = False + + if modelName in [""Neural Network"", ""Convolutional Neural Network (1D)"", ""Recurrent Neural Network"",""Recurrent Neural Network (GRU)"", + ""Recurrent Neural Network (LSTM)"", ""Neural Architecture Search"", ""Deep Q Network"", ""Dueling Deep Q Network""]: + isDL = True + elif modelName in [""Linear Regression"",""Lasso"",""Ridge"",""Logistic Regression"", ""Naive Bayes"", ""Decision Tree"", ""Random Forest"", ""Support Vector Machine"", ""K Nearest Neighbors"", ""Gradient Boosting"", + ""Extreme Gradient Boosting (XGBoost)"", ""Light Gradient Boosting (LightGBM)"", ""Categorical Boosting (CatBoost)"",""Bagging (Ensemble)""]: + isML = True + + return isML,isDL + + + +def startSA(request): + + try: + displaypath = os.path.join(request.session['deploypath'], ""etc"", ""display.json"") + if not os.path.exists(displaypath): + raise Exception('Config file not found.') + with open(displaypath) as file: + config = json.load(file) + + probelmType = config['problemType'] + + if probelmType.lower() not in ['classification','regression']: + raise Exception(f""Probolem Type: {probelmType} not supported"") + + isML,isDL = checkModelType(config['modelname']) + sample_size = 1024 + if isML: + model = joblib.load(os.path.join(request.session['deploypath'], 'model', config['saved_model'])) + sample_size = 2048 + if isDL: + from tensorflow.keras.models import load_model + model = load_model(os.path.join(request.session['deploypath'], 'model', config['saved_model'])) + sample_size = 512 + + target = config['targetFeature'] + featureName = config['modelFeatures'] + dataPath = os.path.join(request.session['deploypath'], 'data', 'postprocesseddata.csv.gz') + if not os.path.exists(dataPath): + raise Exception('Data file not found.') + + from utils.file_ops import read_df_compressed + read_status,dataFrame = read_df_compressed(dataPath) + + obj = sensitivityAnalysis(model, probelmType, dataFrame, target, featureName) + obj.preprocess() + obj.generate_samples(sample_size) + submitType = str(request.GET.get('satype')) + saType = 'first' if submitType == 'first' else 'total' + if probelmType.lower() == 'classification': + SA_values = obj.calSiClass(saType,isML,isDL) + else: + SA_values = obj.calSiReg(saType,isML,isDL) + if SA_values.size and saType: + + graph = obj.plotSi(SA_values, saType) + if graph: + outputJson = {'Status': ""Success"", ""graph"": graph} + else: + outputJson = {'Status': ""Error"", ""graph"": '','reason':'Error in Plotting Graph'} + else: + outputJson = {'Status': ""Error"", ""graph"": '','reason':'Error in calculating Si values'} + output_json = json.dumps(outputJson) + return output_json + except Exception as e: + print(str(e)) + raise ValueError(str(e)) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' import joblib +import pandas as pd +import sys +import math +import time +import pandas as pd +import numpy as np +from sklearn.metrics import confusion_matrix +from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score +from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LinearRegression +import argparse +import json + +def mltesting(modelfile,datafile,features,target): + + + model = joblib.load(modelfile) + + ProblemName = model.__class__.__name__ + + + if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']: + Problemtype = 'Classification' + elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']: + Problemtype = 'Regression' + else: + Problemtype = 'Unknown' + if Problemtype == 'Classification': + Params = model.get_params() + try: + df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) + if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC': + features = model.feature_names_in_ + elif ProblemName == 'XGBClassifier': + features = model.get_booster().feature_names + elif ProblemName == 'LGBMClassifier': + features = model.feature_name_ + elif ProblemName == 'CatBoostClassifier': + features = model.feature_names_ + + modelfeatures = features + dfp = df[modelfeatures] + tar = target + target = df[tar] + predic = model.predict(dfp) + output = {} + matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) + matrixconfusion = matrixconfusion.to_json(orient='index') + classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() + classificationreport = round(classificationreport,2) + classificationreport = classificationreport.to_json(orient='index') + output[""Precision""] = ""%.2f"" % precision_score(target, predic,average='weighted') + output[""Recall""] = ""%.2f"" % recall_score(target, predic,average='weighted') + output[""Accuracy""] = ""%.2f"" % accuracy_score(target, predic) + output[""ProblemName""] = ProblemName + output[""Status""] = ""Success"" + output[""Params""] = Params + output[""Problemtype""] = Problemtype + output[""Confusionmatrix""] = matrixconfusion + output[""classificationreport""] = classificationreport + + # import statistics + # timearray = [] + # for i in range(0,5): + # start = time.time() + # predic1 = model.predict(dfp.head(1)) + # end = time.time() + # timetaken = (round((end - start) * 1000,2),'Seconds') + # timearray.append(timetaken) + # print(timearray) + + + start = time.time() + for i in range(0,5): + predic1 = model.predict(dfp.head(1)) + end = time.time() + timetaken = (round((end - start) * 1000,2),'Seconds') + + # print(timetaken) + + start1 = time.time() + for i in range(0,5): + predic2 = model.predict(dfp.head(10)) + end1 = time.time() + timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds') + # print(timetaken1) + + + start2 = time.time() + for i in range(0,5): + predic3 = model.predict(dfp.head(100)) + end2 = time.time() + timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') + # print(timetaken2) + + output[""onerecord""] = timetaken + output[""tenrecords""] = timetaken1 + output[""hundrecords""] = timetaken2 + print(json.dumps(output)) + except Exception as e: + output = {} + output['Problemtype']='Classification' + output['Status']= ""Fail"" + output[""ProblemName""] = ProblemName + output[""Msg""] = 'Detected Model : {} \\\\n Problem Type : Classification \\\\n Error : {}'.format(ProblemName, str(e).replace('""','//""').replace('\\n', '\\\\n')) + print(output[""Msg""]) + print(json.dumps(output)) + + elif Problemtype == 'Regression': + Params = model.get_params() + try: + df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) + if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor': + features = model.feature_names_in_ + elif ProblemName == 'XGBRegressor': + features = model.get_booster().feature_names + elif ProblemName == 'LGBMRegressor': + features = model.feature_name_ + elif ProblemName == 'CatBoostRegressor': + features = model.feature_names_ + modelfeatures = features + dfp = df[modelfeatures] + tar = target + target = df[tar] + predict = model.predict(dfp) + mse = mean_squared_error(target, predict) + mae = mean_absolute_error(target, predict) + rmse = math.sqrt(mse) + r2 = r2_score(target,predict,multioutput='variance_weighted') + output = {} + output[""MSE""] = ""%.2f"" % mean_squared_error(target, predict) + output[""MAE""] = ""%.2f"" % mean_absolute_error(target, predict) + output[""RMSE""] = ""%.2f"" % math.sqrt(mse) + output[""R2""] = ""%.2f"" %r2_score(target,predict,multioutput='variance_weighted') + output[""ProblemName""] = ProblemName + output[""Problemtype""] = Problemtype + output[""Params""] = Params + output['Status']='Success' + start = time.time() + predic1 = model.predict(dfp.head(1)) + end = time.time() + timetaken = (round((end - start) * 1000,2) ,'Seconds') + + # print(timetaken) + + start1 = time.time() + predic2 = model.predict(dfp.head(10)) + end1 = time.time() + timetaken1 = (round((end1 - start1) * 1000,2),'Seconds') + # print(timetaken1) + + + start2 = time.time() + predic3 = model.predict(dfp.head(100)) + end2 = time.time() + timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') + # print(timetaken2) + + output[""onerecord""] = timetaken + output[""tenrecords""] = timetaken1 + output[""hundrecords""] = timetaken2 + print(json.dumps(output)) + except Exception as e: + output = {} + output['Problemtype']='Regression' + output['Status']='Fail' + output[""ProblemName""] = ProblemName + output[""Msg""] = 'Detected Model : {} \\\\n Problem Type : Regression \\\\n Error : {}'.format(ProblemName, str(e).replace('""','//""').replace('\\n', '\\\\n')) + print(json.dumps(output)) + + else: + output = {} + output['Problemtype']='Unknown' + output['Status']='Fail' + output['Params'] = '' + output[""ProblemName""] = ProblemName + output[""Msg""] = 'Detected Model : {} \\\\n Error : {}'.format(ProblemName, 'Model not supported') + print(json.dumps(output)) + return(json.dumps(output)) + +def baseline_testing(modelFile,csvFile,features,target): + features = [x.strip() for x in features.split(',')] + return mltesting(modelFile,csvFile,features,target) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Techn" +"ologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +import numpy as np +import pandas as pd +import sklearn.metrics as metrics +from collections import defaultdict +from sklearn.metrics import confusion_matrix +import re +import shutil +import scipy.stats as st +import json +import os,sys +import glob +import logging +from utils.file_ops import read_df_compressed +class Visualization(): + def __init__(self,usecasename,version,dataframe,visualizationJson,dateTimeColumn,deployPath,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,modelFeatures,targetFeature,modeltype,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,vectorizerFeatures,textFeatures,numericalFeatures,nonNumericFeatures,emptyFeatures,nrows,ncols,saved_model,scoreParam,learner_type,modelname,featureReduction,reduction_data_file): + self.dataframe = dataframe + self.displayjson = {} + self.visualizationJson = visualizationJson + self.dateTimeColumn = dateTimeColumn + self.deployPath = deployPath + #shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'aion_portal.py'),self.deployPath) + if learner_type == 'ML' and modelname != 'Neural Architecture Search': + if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))): + os.remove(os.path.join(self.deployPath,'explainable_ai.py')) + shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainable_ai.py'),self.deployPath) + # os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + try: + os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + except FileExistsError: + os.remove(os.path.join(self.deployPath,'aion_xai.py')) + os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + elif learner_type == 'DL' or modelname == 'Neural Architecture Search': + if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))): + os.remove(os.path.join(self.deployPath,'explainable_ai.py')) + shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainabledl_ai.py'),self.deployPath) + # os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + try: + os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + except FileExistsError: + os.remove(os.path.join(self.deployPath,'aion_xai.py')) + os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + self.jsondeployPath = deployPath + #self.deployPath = self.deployPath+'visualization/' + self.dataFolderLocation = dataFolderLocation + self.vectorizerFeatures = vectorizerFeatures + self.textFeatures = textFeatures + self.emptyFeatures = emptyFeatures + ''' + try: + os.makedirs(self.deployPath) + except OSError as e: + print(""\\nFolder Already Exists"") + ''' + self.numericContinuousFeatures = numericContinuousFeatures + self.discreteFeatures = discreteFeatures + self.categoricalFeatures = categoricalFeatures + self.modelFeatures = modelFeatures + self.modeltype = modeltype + self.targetFeature = targetFeature + self.displayjson['usecasename'] = str(usecasename) + self.displayjson['version'] = str(version) + self.displayjson['problemType'] = str(self.modeltype) + self.displayjson['targetFeature'] = self.targetFeature + self.displayjson['numericalFeatures'] = numericalFeatures + self.displayjson['nonNumericFeatures'] = nonNumericFeatures + self.displayjson['modelFeatures'] = self.modelFeatures + self.displayjson['textFeatures'] = self.textFeatures + self.displayjson['emptyFeatures'] = self.emptyFeatures + self.displayjson['modelname']= str(modelname) + self.displayjson['preprocessedData'] = str(original_data_file) + self.displayjson['nrows'] = str(nrows) + self.displayjson['ncols'] = str(ncols) + self.displayjson['saved_model'] = str(saved_model) + self.displayjson['scoreParam'] = str(scoreParam) + self.displayjson['labelMaps'] = eval(str(labelMaps)) + self.original_data_file = original_data_file + self.displayjson['featureReduction'] = featureReduction + if featureReduction == 'True': + self.displayjson['reduction_data_file'] = reduction_data_file + else: + self.displayjson['reduction_data_file'] = '' + self.pred_filename = predicted_data_file + self.profiled_data_file = profiled_data_file + self.displayjson['predictedData'] = predicted_data_file + self.displayjson['postprocessedData'] = profiled_data_file + #self.trained_data_file = trained_data_file + #self.displayjson['trainingData'] = trained_data_file + #self.displayjson['categorialFeatures']=categoricalFeatures + #self.displayjson['discreteFeatures']=discreteFeatures + #self.displayjson['continuousFeatures']=numericContinuousFeatures + #y = json.dumps(self.displayjson) + #print(y) + self.labelMaps = labelMaps + self.log = logging.getLogger('eion') + + def visualizationrecommandsystem(self): + try: + import tensorflow.keras.utils as kutils + datasetid = self.visualizationJson['datasetid'] + self.log.info('\\n================== Data Profiling Details==================') + datacolumns=list(self.dataframe.columns) + self.log.info('================== Data Profiling Details End ==================\\n') + self.log.info('================== Features Correlation Details ==================\\n') + self.log.info('\\n================== Model Performance Analysis ==================') + if os.path.exists(self.pred_filename): + try: + status,df=read_df_compressed(self.pred_filename) + if self.modeltype == 'Classification' or self.modeltype == 'ImageClassification' or self.modeltype == 'anomaly_detection': + y_actual = df['actual'].values + y_predict = df['predict'].values + y_actual = kutils.to_categorical(y_actual) + y_predict = kutils.to_categorical(y_predict) + classes = df.actual.unique() + n_classes = y_actual.shape[1] + self.log.info('-------> ROC AUC CURVE') + roc_curve_dict = [] + for i in classes: + try: + classname = i + if str(self.labelMaps) != '{}': + inv_map = {v: k for k, v in self.labelMaps.items()} + classname = inv_map[i] + fpr, tpr, threshold = metrics.roc_curve(y_actual[:,i],y_predict[:,i]) + roc_auc = metrics.auc(fpr, tpr) + class_roc_auc_curve = {} + class_roc_auc_curve['class'] = str(classname) + fprstring = ','.join(str(v) for v in fpr) + tprstring = ','.join(str(v) for v in tpr) + class_roc_auc_curve['FP'] = str(fprstring) + class_roc_auc_curve['TP'] = str(tprstring) + roc_curve_dict.append(class_roc_auc_curve) + self.log.info('----------> Class: '+str(classname)) + self.log.info('------------> ROC_AUC: '+str(roc_auc)) + self.log.info('------------> False Positive Rate (x Points): '+str(fpr)) + self.log.info('------------> True Positive Rate (y Points): '+str(tpr)) + except: + pass + self.displayjson['ROC_AUC_CURVE'] = roc_curve_dict + self.log.info('-------> Precision Recall CURVE') + precision_recall_curve_dict = [] + for i in range(n_classes): + try: + lr_precision, lr_recall, threshold = metrics.precision_recall_curve(y_actual[:,i],y_predict[:,i]) + classname = i + if str(self.labelMaps) != '{}': + inv_map = {v: k for k, v in self.labelMaps.items()} + classname = inv_map[i] + roc_auc = metrics.auc(lr_recall,lr_precision) + class_precision_recall_curve = {} + class_precision_recall_curve['class'] = str(classname) + Precisionstring = ','.join(str(round(v,2)) for v in lr_precision) + Recallstring = ','.join(str(round(v,2)) for v in lr_recall) + class_precision_recall_curve['Precision'] = str(Precisionstring) + class_precision_recall_curve['Recall'] = str(Recallstring) + precision_recall_curve_dict.append(class_precision_recall_curve) + except: + pass + + self.log.info('----------> Class: '+str(classname)) + self.log.info('------------> ROC_AUC: '+str(roc_auc)) + self.log.info('------------> Recall (x Points): '+str(lr_precision)) + self.log.info('------------> Precision (y Points): '+str(lr_recall)) + self.displayjson['PRECISION_RECALL_CURVE'] = precision_recall_curve_dict + status,predictdataFrame=read_df_compressed(self.displayjson['predictedData']) + except Exception as e: + self.log.info('================== Error in Calculation ROC_AUC/Recall Precision Curve '+str(e)) + self.log.info('================== Model Performance Analysis End ==================\\n') + self.log.info('\\n================== For Descriptive Analysis of Model Features ==================') + + + outputfile = os.path.join(self.jsondeployPath,'etc','display.json') + with open(outputfile, 'w') as fp: + json.dump(self.displayjson, fp) + self.log.info('================== For Descriptive Analysis of Model Features End ==================\\n') + except Exception as inst: + self.log.info('Visualization Failed !....'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def drawlinechart(self,xcolumn,ycolumn,deploy_path,datasetid): + title = 'aion_visualization_'+xcolumn+""_""+ycolumn+""_linechart"" + yaxisname = 'Average '+ycolumn + datasetindex = datasetid + visulizationjson = '[{""_id"": ""543234"",""_type"": ""visualization"",""_source"": {""title"": ""'+title+'"",' + visulizationjson = visulizationjson+'""visState"": ""{\\\\""title\\\\"":\\\\""'+title+'\\\\"",' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""line\\\\"",\\\\""params\\\\"":{\\\\""type\\\\"":\\\\""line\\\\"",\\\\""grid\\\\"":{\\\\""categoryLines\\\\"":false,\\\\""style\\\\"":{\\\\""color\\\\"":\\\\""#eee\\\\""}},\\\\""categoryAxes\\\\"":[{\\\\""id\\\\"":\\\\""CategoryAxis-1\\\\"",\\\\""type\\\\"":\\\\""category\\\\"",\\\\""position\\\\"":\\\\""bottom\\\\"",\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""scale\\\\"":{\\\\""type\\\\"":\\\\""linear\\\\""},\\\\""labels\\\\"":{\\\\""show\\\\"":true,\\\\""truncate\\\\"":100},\\\\""title\\\\"":{}}],\\\\""valueAxes\\\\"":[{\\\\""id\\\\"":\\\\""ValueAxis-1\\\\"",\\\\""name\\\\"":\\\\""LeftAxis-1\\\\"",\\\\""type\\\\"":\\\\""value\\\\"",\\\\""position\\\\"":\\\\""left\\\\"",\\\\""" +"show\\\\"":true,\\\\""style\\\\"":{},\\\\""scale\\\\"":{\\\\""type\\\\"":\\\\""linear\\\\"",\\\\""mode\\\\"":\\\\""normal\\\\""},\\\\""labels\\\\"":{\\\\""show\\\\"":true,\\\\""rotate\\\\"":0,\\\\""filter\\\\"":false,\\\\""truncate\\\\"":100},\\\\""title\\\\"":' + visulizationjson = visulizationjson+'{\\\\""text\\\\"":\\\\""'+yaxisname+'\\\\""}}],\\\\""seriesParams\\\\"":[{\\\\""show\\\\"":\\\\""true\\\\"",\\\\""type\\\\"":\\\\""line\\\\"",\\\\""mode\\\\"":\\\\""normal\\\\"",\\\\""data\\\\"":' + visulizationjson = visulizationjson+'{\\\\""label\\\\"":\\\\""'+yaxisname+'\\\\"",\\\\""id\\\\"":\\\\""1\\\\""},\\\\""valueAxis\\\\"":\\\\""ValueAxis-1\\\\"",\\\\""drawLinesBetweenPoints\\\\"":true,\\\\""showCircles\\\\"":true}],\\\\""addTooltip\\\\"":true,\\\\""addLegend\\\\"":true,\\\\""legendPosition\\\\"":\\\\""right\\\\"",\\\\""times\\\\"":[],\\\\""addTimeMarker\\\\"":false},\\\\""aggs\\\\"":[{\\\\""id\\\\"":\\\\""1\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""avg\\\\"",\\\\""schema\\\\"":\\\\""metric\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+str(ycolumn)+'\\\\""}},{\\\\""id\\\\"":\\\\""2\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""terms\\\\"",\\\\""schema\\\\"":\\\\""segment\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+xcolumn+'\\\\"",\\\\""size\\\\"":100,\\\\""order\\\\"":\\\\""desc\\\\"",\\\\""orderBy\\\\"":\\\\""1\\\\"",\\\\""otherBucket\\\\"":false,\\\\""otherBucketLabel\\\\"":\\\\""Other\\\\"",\\\\""missingBucket\\\\"":false,\\\\""missingBucketLabel\\\\"":\\\\""Missing\\\\""}}]}"",""uiStateJSON"": ""{}"", ""description"": """",""version"": 1,""kibanaSavedObjectMeta"": {""searchSourceJSON"": ""{\\\\""index\\\\"":\\\\""'+datasetindex+'\\\\"",\\\\""query\\\\"":{\\\\""query\\\\"":\\\\""\\\\"",\\\\""language\\\\"":\\\\""lucene\\\\""},\\\\""filter\\\\"":[]}""}},""_migrationVersion"": {""visualization"": ""6.7.2""}}]' + filename = deploy_path+title+'.json' + f = open(filename, ""w"") + f.write(str(visulizationjson)) + f.close() + + def drawbarchart(self,xcolumn,ycolumn,deploy_path,datasetid): + title = 'aion_visualization_'+xcolumn+""_""+ycolumn+""_barchart"" + yaxisname = 'Average '+ycolumn + datasetindex = datasetid + visulizationjson = '[{""_id"": ""123456"",""_type"": ""visualization"",""_source"": {""title"":""'+title+'"",' + visulizationjson = visulizationjson+'""visState"": ""{\\\\""title\\\\"":\\\\""'+title+'\\\\"",' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""histogram\\\\"",\\\\""params\\\\"":{\\\\""addLegend\\\\"":true,\\\\""addTimeMarker\\\\"":false,\\\\""addTooltip\\\\"":true,\\\\""categoryAxes\\\\"":[{\\\\""id\\\\"":\\\\""CategoryAxis-1\\\\"",\\\\""labels\\\\"":{\\\\""show\\\\"":true,\\\\""truncate\\\\"":100},\\\\""position\\\\"":\\\\""bottom\\\\"",\\\\""scale\\\\"":{\\\\""type\\\\"":\\\\""linear\\\\""},\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""title\\\\"":{},\\\\""type\\\\"":\\\\""category\\\\""}],\\\\""grid\\\\"":{\\\\""categoryLines\\\\"":false,\\\\""style\\\\"":{\\\\""color\\\\"":\\\\""#eee\\\\""}},\\\\""legendPosition\\\\"":\\\\""right\\\\"",\\\\""seriesParams\\\\"":[{\\\\""data\\\\"":{\\\\""id\\\\"":\\\\""1\\\\"",' + visulizationjson = visulizationjson+'\\\\""label\\\\"":\\\\""'+yaxisname+'\\\\""},' + visulizationjson = visulizationjson+'\\\\""drawLinesBetweenPoints\\\\"":true,\\\\""mode\\\\"":\\\\""stacked\\\\"",\\\\""show\\\\"":\\\\""true\\\\"",\\\\""showCircles\\\\"":true,\\\\""type\\\\"":\\\\""histogram\\\\"",\\\\""valueAxis\\\\"":\\\\""ValueAxis-1\\\\""}],\\\\""times\\\\"":[],\\\\""type\\\\"":\\\\""histogram\\\\"",\\\\""valueAxes\\\\"":[{\\\\""id\\\\"":\\\\""ValueAxis-1\\\\"",\\\\""labels\\\\"":{\\\\""filter\\\\"":false,\\\\""rotate\\\\"":0,\\\\""show\\\\"":true,\\\\""truncate\\\\"":100},\\\\""name\\\\"":\\\\""LeftAxis-1\\\\"",\\\\""position\\\\"":\\\\""left\\\\"",\\\\""scale\\\\"":{\\\\""mode\\\\"":\\\\""normal\\\\"",\\\\""type\\\\"":\\\\""linear\\\\""},\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""title\\\\"":' + visulizationjson = visulizationjson+'{\\\\""text\\\\"":\\\\""'+yaxisname+'\\\\""},' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""value\\\\""}]},\\\\""aggs\\\\"":[{\\\\""id\\\\"":\\\\""1\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""avg\\\\"",\\\\""schema\\\\"":\\\\""metric\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+str(xcolumn)+'\\\\""}},{\\\\""id\\\\"":\\\\""2\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""terms\\\\"",\\\\""schema\\\\"":\\\\""segment\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+ycolumn+'\\\\"",\\\\""size\\\\"":100,\\\\""order\\\\"":\\\\""asc\\\\"",\\\\""orderBy\\\\"":\\\\""1\\\\"",\\\\""otherBucket\\\\"":false,\\\\""otherBucketLabel\\\\"":\\\\""Other\\\\"",\\\\""missingBucket\\\\"":false,\\\\""missingBucketLabel\\\\"":\\\\""Missing\\\\""}}]}"",""uiStateJSON"":""{}"",""description"": """",""version"": 1,""kibanaSavedObjectMeta"": {' + visulizationjson = visulizationjson+'""searchSourceJSON"": ""{\\\\""index\\\\"":\\\\""'+datasetindex+'\\\\"",\\\\""query\\\\"":{\\\\""language\\\\"":\\\\""lucene\\\\"",\\\\""query\\\\"":\\\\""\\\\""},\\\\""filter\\\\"":[]}""}},""_migrationVersion"":{""visualization"": ""6.7.2""}}]' + filename = deploy_path+title+'.json' + f = open(filename, ""w"") + f.write(str(visulizationjson)) + f.close() + + def drawpiechart(self,xcolumn,deploy_path,datasetid): + title = 'aion_visualization_'+xcolumn+""_piechart"" + datasetindex = datasetid + visulizationjson = '[{""_id"": ""123456"",""_type"": ""visualization"",""_source"": {""title"":""'+title+'"",' + visulizationjson = visulizationjson+'""visState"": ""{\\\\""title\\\\"":\\\\""'+title+'\\\\"",' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""pie\\\\"",\\\\""params\\\\"":{\\\\""type\\\\"":\\\\""pie\\\\"",\\\\""addTooltip\\\\"":true,\\\\""addLegend\\\\"":true,\\\\""legendPosition\\\\"":\\\\""right\\\\"",\\\\""isDonut\\\\"":true,\\\\""labels\\\\"":{\\\\""show\\\\"":false,\\\\""values\\\\"":true,\\\\""last_level\\\\"":true,\\\\""truncate\\\\"":100}},\\\\""aggs\\\\"":[{\\\\""id\\\\"":\\\\""1\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""count\\\\"",\\\\""schema\\\\"":\\\\""metric\\\\"",\\\\""params\\\\"":{}},{\\\\""id\\\\"":\\\\""2\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""terms\\\\"",\\\\""schema\\\\"":\\\\""segment\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+xcolumn+'\\\\"",\\\\""size\\\\"":100,\\\\""order\\\\"":\\\\""asc\\\\"",\\\\""orderBy\\\\"":\\\\""1\\\\"",\\\\""otherBucket\\\\"":false,\\\\""otherBucketLabel\\\\"":\\\\""Other\\\\"",\\\\""missingBucket\\\\"":false,\\\\""missingBucketLabel\\\\"":\\\\""Missing\\\\""}}]}"",' + visulizationjson = visulizationjson+'""uiStateJSON"": ""{}"",""description"": """",""version"": 1,""kibanaSavedObjectMeta"": {""searchSourceJSON"":""{\\\\""index\\\\"":\\\\""'+datasetid+'\\\\"",\\\\""query\\\\"":{\\\\""query\\\\"":\\\\""\\\\"",\\\\""language\\\\"":\\\\""lucene\\\\""},\\\\""filter\\\\"":[]}""}},""_migrationVersion"": {""visualization"": ""6.7.2""}}]' + filename = deploy_path+title+'.json' + f = open(filename, ""w"") + f.write(str(visulizationjson)) + f.close() + + def get_confusion_matrix(self,df): + setOfyTrue = set(df['actual']) + unqClassLst = list(setOfyTrue) + if(str(self.labelMaps) != '{}'): + inv_mapping_dict = {v: k for k, v in self.labelMaps.items()} + unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) + unqClassLst2 = list(unqClassLst2) + else: + unqClassLst2 = unqClassLst + indexName = [] + columnName = [] + for item in unqClassLst2: + indexName.append(""act:""+str(item)) + columnName.append(""pre:""+str(item)) + result = pd.DataFrame(confusion_matrix(df['actual'], df['predict'], labels = unqClassLst),index = indexName, columns = columnName) + resultjson = result.to_json(orient='index') + return(resultjson) + + def DistributionFinder(self,data): + try: + + distributionName ="""" + sse =0.0 + KStestStatic=0.0 + dataType="""" + if(data.dtype == ""float64""): + dataType =""Continuous"" + elif(data.dtype ==""int"" or data.dtype ==""int64""): + dataType=""Discrete"" + + if(dataType == ""Discrete""): + distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson] + index, counts = np.unique(abs(data.astype(int)),return_counts=True) + + if(len(index)>=2): + best_sse = np.inf + y1=[] + total=sum(counts) + mean=float(sum(index*counts))/total + variance=float((sum(index**2*counts) -total*mean**2))/(total-1) + dispersion=mean/float(variance) + theta=1/float(dispersion) + r=mean*(float(theta)/1-theta) + + for j in counts: + y1.append(float(j)/total) + + pmf1=st.bernoulli.pmf(index,mean) + pmf2=st.binom.pmf(index,len(index),p=mean/len(index)) + pmf3=st.geom.pmf(index,1/float(1+mean)) + pmf4=st.nbinom.pmf(index,mean,r) + pmf5=st.poisson.pmf(index,mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1- pmf5, 2.0)) + + sselist=[sse1,sse2,sse3,sse4,sse5] + for i in range(0,len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName =best_distribution + sse=best_sse + + elif(dataType == ""Continuous""): + + distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] + + best_distribution = st.norm.name + best_sse = np.inf + datamin=data.min() + datamax=data.max() + nrange=datamax-datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') + + params = distribution.fit(data.astype(float)) + # Separate parts of parameters + arg = params[:-2] + loc = params[-2] + scale = params[-1] + + # Calculate fitted PDF and error with fit in distribution + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if(best_sse >sse > 0): + best_distribution = distribution.name + best_sse = sse + + distributionName =best_distribution + sse=best_sse + except: + response = str(sys.exc_info()[0]) + message='Job has Failed'+response + print(message) + return distributionName,sse + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited" +"unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import matplotlib.pyplot as plt +from lifelines import KaplanMeierFitter, CoxPHFitter +from lifelines.utils import datetimes_to_durations +import logging +import numpy as np +import re +import sys +import os + + +class SurvivalAnalysis(object): + + def __init__(self, df, pipe, method, event_column, duration_column, filterExpression, train_features_type,start=None, end=None): + pd.options.display.width = 30 + self.df = df + self.pipe = pipe + self.train_features_type = train_features_type + self.filterExpression = filterExpression + self.covariateExpression = filterExpression + self.method = method + self.event_column = event_column + if start is not None and end is not None: + self.df['duration'], _ = datetimes_to_durations(start, end) + self.duration_column = 'duration' + else: + self.duration_column = duration_column + self.models = [] + self.score = 0 + self.log = logging.getLogger('eion') + self.plots = [] + + def transform_filter_expression(self, covariate, covariate_input): + ''' + Filter expression given by user will be encoded if it is categorical and if it is a numerical feature that + is normalised in data profiler, in filter expression feature also it will be converted to normalised value + ''' + cols = list(self.df.columns) + if self.duration_column in cols: + cols.remove(self.duration_column) + if self.event_column in cols: + cols.remove(self.event_column) + df_filter = pd.DataFrame([{covariate:covariate_input}], columns=cols) + df_filter[covariate] = df_filter[covariate].astype(self.train_features_type[covariate]) + df_transform_array = self.pipe.transform(df_filter) + df_transform = pd.DataFrame(df_transform_array, columns=cols) + return df_transform[covariate].iloc[0] + + def learn(self): + self.log.info('\\n---------- SurvivalAnalysis learner has started ----------') + self.log.info('\\n---------- SurvivalAnalysis learner method is ""%s"" ----------' % self.method) + + if self.method.lower() in ['kaplanmeierfitter', 'kaplanmeier', 'kaplan-meier', 'kaplan meier', 'kaplan', 'km', + 'kmf']: + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has started ----------' % self.method) + kmf = KaplanMeierFitter() + T = self.df[self.duration_column] + E = self.df[self.event_column] + self.log.info('\\n T : \\n%s' % str(T)) + self.log.info('\\n E : \\n%s' % str(E)) + K = kmf.fit(T, E) + kmf_sf = K.survival_function_ + kmf_sf_json = self.survival_probability_to_json(kmf_sf) + self.models.append(K) + if isinstance(self.filterExpression, str): + df_f, df_n, refined_filter_expression = self.parse_filterExpression() + kmf1 = KaplanMeierFitter() + kmf2 = KaplanMeierFitter() + self.log.info( + '\\n---------- SurvivalAnalysis learner ""%s"" fitting for filter expression has started----------' % self.method) + T1 = df_f[self.duration_column] + E1 = df_f[self.event_column] + T2 = df_n[self.duration_column] + E2 = df_n[self.event_column] + kmf1.fit(T1, E1) + fig, ax = plt.subplots(1, 1) + ax = kmf1.plot_survival_function(ax=ax, label='%s' % refined_filter_expression) + self.log.info( + '\\n---------- SurvivalAnalysis learner ""%s"" fitting for filter expression has ended----------' % self.method) + plt.title(""KM Survival Functions - Filter vs Negation"") + self.log.info( + '\\n---------- SurvivalAnalysis learner ""%s"" fitting for negation has started----------' % self.method) + kmf2.fit(T2, E2) + ax = kmf2.plot_survival_function(ax=ax, label='~%s' % refined_filter_expression) + self.log.info( + '\\n---------- SurvivalAnalysis learner ""%s"" fitting for negation has ended----------' % self.method) + self.models.extend([kmf1, kmf2]) + + kmf1_sf = kmf1.survival_function_ + kmf2_sf = kmf2.survival_function_ + kmf1_sf_json = self.survival_probability_to_json(kmf1_sf) + self.plots.append(fig) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------' % self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter') + return kmf1_sf_json + else: + fig, ax = plt.subplots(1, 1) + ax = kmf_sf.plot(ax=ax) + plt.title(""KM Survival Functions"") + self.plots.append(fig) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------' % self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter') + return kmf_sf_json + + + elif self.method.lower() in ['coxphfitter', 'coxregression', 'cox-regression', 'cox regression', + 'coxproportionalhazard', 'coxph', 'cox', 'cph']: + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has started ----------' % self.method) + + cph = CoxPHFitter(penalizer=0.1) + self.df = self.drop_constant_features(self.df) + C = cph.fit(self.df, self.duration_column, self.event_column) + self.models.append(C) + cph_sf = C.baseline_survival_ + self.score = C.score(self.df, scoring_method=""concordance_index"") + self.log.info( + '\\n---------- SurvivalAnalysis learner ""%s"" score is ""%s""----------' % (self.method, str(self.score))) + cph_sf_json = self.survival_probability_to_json(cph_sf) + if isinstance(self.covariateExpression, str): + covariate, covariate_inputs, covariate_values = self.parse_covariateExpression() + fig, (ax1, ax2) = plt.subplots(1, 2) + fig.tight_layout() + ax1 = C.plot(ax=ax1, hazard_ratios=True) + self.log.info('\\n Summary : \\n%s' % str(C.summary)) + ax1.set_title(""COX hazard ratio"") + ax2 = C.plot_partial_effects_on_outcome(covariate, covariate_values, ax=ax2) + mylabels = [covariate + '=' + str(x) for x in covariate_inputs] + mylabels.append('baseline') + ax2.legend(labels=mylabels) + ax2.set_title(""Covariate Plot"") + self.plots.append(fig) + else: + fig = plt.figure() + ax1 = C.plot(hazard_ratios=True) + self.log.info('\\n Summary : \\n%s' % str(C.summary)) + plt.title(""COX hazard ratio"") + self.plots.append(fig) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------' % self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + self.log.info('Status:- |... Algorithm applied: CoxPHFitter') + return cph_sf_json + + def parse_filterExpression(self): + import operator + self.log.info('\\n---------- Filter Expression parsing has started ----------') + self.log.info('Filter Expression provided : %s' % self.filterExpression) + self.log.info('Shape before filter : %s' % str(self.df.shape)) + f = self.filterExpression.split('&') + f = list(filter(None, f)) + if len(f) == 1: + p = '[<>=!]=?' + op = re.findall(p, self.filterExpression)[0] + covariate, covariate_input = [x.strip().strip('\\'').strip('\\""') for x in self.filterExpression.split(op)] + refined_filter_expression = covariate + op + covariate_input + self.log.info('Final refined filter : %s' % refined_filter_expression) + ops = {""=="": operator.eq, "">"": operator.gt, ""<"": operator.lt, "">="": operator.ge, ""<="": operator.le, + ""!="": operator.ne} + try: + fv = self.transform_filter_expression(covariate, covariate_input) + df_f = self.df[ops[op](self.df[covariate], fv)] + self.log.info('Shape after filter : %s' % str(df_f.shape)) + df_n = self.df[~self.df[covariate].isin(df_f[covariate])] + self.log.info('Shape of negation : %s' % str(df_n.shape)) + self.log.info('---------- Filter Expression has ended ----------') + return df_f, df_n, refined_filter_expression + except Exception: + self.log.info('\\n-----> Filter Expression parsing encountered error!!!') + exc_type, exc_obj, exc_tb = sys.exc_info() + if exc_type == IndexError or ValueError or KeyError: + self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid') + self.log.info('Valid examples are ""A>100"", ""B==category1"", ""C>=10 && C<=20"" etc..') + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno)) + raise Exception(str(exc_type)+str(exc_obj)) + else: + full_f = [] + try: + for filterExpression in f: + p = '[<>=!]=?' + op = re.findall(p, filterExpression)[0] + covariate, covariate_input = [x.strip().strip('\\'').strip('\\""') for x in filterExpression.split(op)] + full_f.append(covariate + op + covariate_input) + ops = {""=="": operator.eq, "">"": operator.gt, ""<"": operator.lt, "">="": operator.ge, ""<="": operator.le, + ""!="": operator.ne} + + fv = self.transform_filter_expression(covariate, covariate_input) + df_f = self.df[ops[op](self.df[covariate], fv)] + df_n = self.df[~self.df[covariate].isin(df_f[covariate])] + + refined_filter_expression = "" & "".join(full_f) + self.log.info('Final refined filter : %s' % refined_filter_expression) + self.log.info('Shape after filter : %s' % str(df_f.shape)) + self.log.info('Shape of negation : %s' % str(df_n.shape)) + self.log.info('---------- Filter Expression has ended ----------') + return df_f, df_n, refined_filter_expression + # except (IndexError, ValueError, KeyError): + except Exception: + self.log.info('\\n-----> Filter Expression parsing encountered error!!!') + exc_type, exc_obj, exc_tb = sys.exc_info() + if exc_type == IndexError or ValueError or KeyError: + self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid') + self.log.info('Valid examples are ""A>100"", ""B==category1"", ""C>=10 && C<=20"" etc..') + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno)) + raise Exception(str(exc_type)+str(exc_obj)) + + def parse_covariateExpression(self): + self.log.info('\\n---------- Covariate Expression parsing has started ----------') + self.log.info('\\n Covariate Expression provided : %s' % self.covariateExpression) + import ast + p = '[=:]' + try: + op = re.findall(p, self.covariateExpression)[0] + covariate, covariate_inputs = [x.strip().strip('\\'').strip('\\""') for x in + self.covariateExpression.split(op)] + covariate_inputs = ast.literal_eval(covariate_inputs) + covariate_values = [self.transform_filter_expression(covariate, x) for x in covariate_inputs] + self.log.info('\\n---------- Covariate Expression parsing has ended ----------') + return covariate, covariate_inputs, covariate_values + except Exception: + self.log.info('\\n-----> Covariate Expression parsing encountered error!!!') + exc_type, exc_obj, exc_tb = sys.exc_info() + if exc_type == IndexError or ValueError or KeyError: + self.log.info('----->Given covariate expression '+ self.filterExpression +' is invalid') + self.log.info(""\\n Valid examples are A=['Yes','No'] or B=[100,500,1000]"") + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno)) + raise Exception(str(exc_type)+str(exc_obj)) + + def" +"survival_probability_to_json(self, sf): + ''' + sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_ + returns json of survival probabilities + ''' + sf = sf[sf.columns[0]].apply(lambda x: ""%4.2f"" % (x * 100)) + self.log.info('\\n Survival probabilities : \\n%s' % str(sf)) + sf = sf.reset_index() + sf = sf.sort_values(sf.columns[0]) + sf_json = sf.to_json(orient='records') + self.log.info('\\n Survival probability json : \\n%s' % str(sf_json)) + + return sf_json + + def drop_constant_features(self, df): + dropped = [] + for col in df.columns: + if (len(df[col].unique()) == 1) and (col not in [self.duration_column, self.event_column]): + df.drop(col, inplace=True, axis=1) + dropped.append(col) + if len(dropped) != 0: + self.log.info('\\n Dropping constant features %s' % str(col)) + self.log.info('\\n After dropping constant features : \\n%s' % str(df)) + return df + + def predict(self): + if self.method == 'KaplanMeierFitter': + return self.model.predict(self.test[self.duration_column]) + + + elif self.method == 'CoxPHFitter': + res = [] + for idx, row in self.test.iterrows(): + res.append( + self.model.predict_survival_function(self.test, times=row[self.model.duration_col])[idx].values[0]) + return pd.DataFrame(res) + import os +import traceback +import sys +print(""before function process"") +def process(version): + print(""inside fun process"") + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + print(currentDirectory) + try: + from os.path import expanduser + import platform + import subprocess + import sys + import demoji + try: + print('Downloading NLTK additional packages...') + import nltk + nltk.download('punkt') + nltk.download('wordnet') + nltk.download('stopwords') + nltk.download('averaged_perceptron_tagger') + except Exception as e: + print('NLTK Error: '+str(e)) + pass + from appbe.dataPath import DATA_DIR + + import shutil + import importlib + + license_path = DATA_DIR + if os.path.isdir(license_path) == False: + os.makedirs(license_path) + + import warnings + warnings.filterwarnings(""ignore"") + + LicenseFolder = os.path.join(license_path,'License') + if os.path.isdir(LicenseFolder) == False: + os.makedirs(LicenseFolder) + + + sqlite_path = os.path.join(license_path,'sqlite') + if os.path.isdir(sqlite_path) == False: + os.makedirs(sqlite_path) + pretrainedModel_path = os.path.join(license_path,'PreTrainedModels') + if os.path.isdir(pretrainedModel_path) == False: + os.makedirs(pretrainedModel_path) + config_path = os.path.join(license_path,'config') + if os.path.isdir(config_path) == False: + os.makedirs(config_path) + target_path = os.path.join(license_path,'target') + if os.path.isdir(target_path) == False: + os.makedirs(target_path) + data_path = os.path.join(license_path,'storage') + if os.path.isdir(data_path) == False: + os.makedirs(data_path) + log_path = os.path.join(license_path,'logs') + if os.path.isdir(log_path) == False: + os.makedirs(log_path) + + + configFolder = os.path.join(currentDirectory,'..','config') + for file in os.listdir(configFolder): + if file.endswith("".var""): + os.remove(os.path.join(configFolder,file)) + versionfile = os.path.join(configFolder,str(version)+'.var') + with open(versionfile, 'w') as fp: + pass + + manage_path = os.path.join(currentDirectory,'..','aion.py') + print('Setting up Django Environment for AION User Interface') + proc = subprocess.Popen([sys.executable, manage_path, ""-m"",""migrateappfe""],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout, stderr) = proc.communicate() + if proc.returncode != 0: + err_string = stderr.decode('utf8') + import re + result = re.search(""No module named '(.*)'"", err_string) + if 'ModuleNotFoundError' in err_string: + print('\\n""{}"" module is missing. The dependencies of AION were not installed properly. Uninstall and reinstall AION'.format(result.group(1))) + else: + print('\\nThe dependencies of AION were not installed properly. Uninstall and reinstall AION') + raise Exception(err_string) + else: + print('AION User Interface successfully set') + print('--------------AION Installed Successfully--------------') + except Exception as e: + print(e) + f = open(os.path.join(currentDirectory, 'workspace_error_logs.txt'), ""w"") + f.write(str(traceback.format_exc())) + f.close() + pass + +if __name__ == ""__main__"": + process(sys.argv[1]) import os +import traceback + +def process(version): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + try: + import win32com.client + from os.path import expanduser + import platform + import subprocess + import sys + import demoji + try: + print('Downloading NLTK additional packages...') + import nltk + nltk.download('punkt') + nltk.download('wordnet') + nltk.download('stopwords') + nltk.download('averaged_perceptron_tagger') + except Exception as e: + print('NLTK Error: '+str(e)) + pass + from appbe.dataPath import DATA_DIR + from win32com.shell import shell, shellcon + import shutil + import importlib + + license_path = DATA_DIR + if os.path.isdir(license_path) == False: + os.makedirs(license_path) + + import warnings + warnings.filterwarnings(""ignore"") + + LicenseFolder = os.path.join(license_path,'License') + if os.path.isdir(LicenseFolder) == False: + os.makedirs(LicenseFolder) + + + sqlite_path = os.path.join(license_path,'sqlite') + if os.path.isdir(sqlite_path) == False: + os.makedirs(sqlite_path) + pretrainedModel_path = os.path.join(license_path,'PreTrainedModels') + if os.path.isdir(pretrainedModel_path) == False: + os.makedirs(pretrainedModel_path) + config_path = os.path.join(license_path,'config') + if os.path.isdir(config_path) == False: + os.makedirs(config_path) + target_path = os.path.join(license_path,'target') + if os.path.isdir(target_path) == False: + os.makedirs(target_path) + data_path = os.path.join(license_path,'storage') + if os.path.isdir(data_path) == False: + os.makedirs(data_path) + log_path = os.path.join(license_path,'logs') + if os.path.isdir(log_path) == False: + os.makedirs(log_path) + + + configFolder = os.path.join(currentDirectory,'..','config') + for file in os.listdir(configFolder): + if file.endswith("".var""): + os.remove(os.path.join(configFolder,file)) + versionfile = os.path.join(configFolder,str(version)+'.var') + with open(versionfile, 'w') as fp: + pass + + manage_path = os.path.join(currentDirectory,'..','aion.py') + print('Setting up Django Environment for AION User Interface') + proc = subprocess.Popen([sys.executable, manage_path, ""-m"",""migrateappfe""],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout, stderr) = proc.communicate() + if proc.returncode != 0: + err_string = stderr.decode('utf8') + import re + result = re.search(""No module named '(.*)'"", err_string) + if 'ModuleNotFoundError' in err_string: + print('\\n""{}"" module is missing. The dependencies of AION were not installed properly. Uninstall and reinstall AION'.format(result.group(1))) + else: + print('\\nThe dependencies of AION were not installed properly. Uninstall and reinstall AION') + raise Exception(err_string) + else: + print('AION User Interface successfully set') + desktop = shell.SHGetFolderPath (0, shellcon.CSIDL_DESKTOP, 0, 0) + #desktop = os.path.expanduser('~/Desktop') + path = os.path.join(desktop, 'Explorer {0}.lnk'.format(version)) + target = os.path.normpath(os.path.join(currentDirectory,'..', 'sbin', 'AION_Explorer.bat')) + icon = os.path.join(currentDirectory,'icons','aion.ico') + shell = win32com.client.Dispatch(""WScript.Shell"") + shortcut = shell.CreateShortCut(path) + shortcut.Targetpath = '""'+target+'""' + shortcut.WorkingDirectory = currentDirectory + #shortcut.WorkingDirectory = os.path.dirname(__file__) + shortcut.IconLocation = icon + shortcut.WindowStyle = 1 # 7 - Minimized, 3 - Maximized, 1 - Normal + shortcut.save() + path = os.path.join(desktop, 'Shell {0}.lnk'.format(version)) + target = os.path.normpath(os.path.join(currentDirectory,'..','sbin', 'AION_Shell.bat')) + icon = os.path.join(currentDirectory,'icons','aion_shell.ico') + shell = win32com.client.Dispatch(""WScript.Shell"") + shortcut = shell.CreateShortCut(path) + shortcut.Targetpath = '""'+target+'""' + shortcut.WorkingDirectory = currentDirectory + #shortcut.WorkingDirectory = os.path.dirname(__file__) + shortcut.IconLocation = icon + shortcut.WindowStyle = 1 # 7 - Minimized, 3 - Maximized, 1 - Normal + shortcut.save() + print('--------------AION Installed Successfully--------------') + except Exception as e: + print(e) + f = open(os.path.join(currentDirectory, 'workspace_error_logs.txt'), ""w"") + f.write(str(traceback.format_exc())) + f.close() + pass + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' import warnings +import sys +warnings.simplefilter(action='ignore', category=FutureWarning) +import xgboost as xgb +import dask.array as da +import shutil +import dask.distributed +import dask.dataframe as dd +import dask_ml +import logging +from sklearn.metrics import accuracy_score, recall_score, \\ + roc_auc_score, precision_score, f1_score, \\ + mean_squared_error, mean_absolute_error, \\ + r2_score, classification_report, confusion_matrix, \\ + mean_absolute_percentage_error + +import lightgbm as lgb +import re +from sklearn.pipeline import Pipeline +from sklearn.base import BaseEstimator, TransformerMixin +from dask_ml.impute import SimpleImputer +from dask_ml.compose import ColumnTransformer +from dask_ml.decomposition import TruncatedSVD, PCA +from dask_ml.preprocessing import StandardScaler, \\ + MinMaxScaler, \\ + OneHotEncoder, LabelEncoder +from dask_ml.wrappers import ParallelPostFit +import numpy as np +import json +import time +from sklearn.ensemble import IsolationForest +import joblib +import pickle as pkl +import os +predict_config={} + +dask.config.set({""distributed.workers.memory.terminate"": 0.99}) +dask.config.set({""array.chunk-size"": ""128 MiB""}) +dask.config.set({""distributed.admin.tick.limit"": ""3h""}) + +# dask.config.set({""distributed.workers.memory.pause"": 0.9}) + +class MinImputer(BaseEstimator, TransformerMixin): + def fit(self, X, y=None): + return self + def transform(self, X, y=None): + # to_fillna = ['public_meeting', 'scheme_management', 'permit'] + # X[to_fillna] = X[to_fillna].fillna(value='NaN') + # X[to_fillna] = X[to_fillna].astype(str) + X = X.fillna(value=X.min()) + # X = X.astype(str) + return X + +class MaxImputer(BaseEstimator, TransformerMixin): + def fit(self, X, y=None): + return self + def transform(self, X, y=None): + X = X.fillna(value=X.max()) + return X + +class DropImputer(BaseEstimator, TransformerMixin): + def fit(self, X, y=None): + return self + def transform(self, X, y=None): + X = X.dropna() + return X + +class ModeCategoricalImputer(BaseEstimator, TransformerMixin): + def fit(self, X, y=None): + return self + def transform(self, X, y=None): + X = X.fillna(value=X.mode()) + return X + +class IsoForestOutlierExtractor(TransformerMixin): + def fit(self, X, y=None): + return self + def transform(self, X, y): + lcf = IsolationForest() + with joblib.parallel_backend('dask'): + lcf.fit(X) + y_pred_train = lcf.predict(X) + y_pred_train = y_pred_train == 1 + return X + +def load_config_json(json_file): + with open(json_file" +", 'r') as j: + contents = json.loads(j.read()) + return contents + +def load_data_dask(data_file, npartitions=500): + big_df = dd.read_csv(data_file, # sep=r'\\s*,\\s*', + assume_missing=True, + parse_dates=True, infer_datetime_format=True, + sample=1000000, + # dtype={'caliper': 'object', + # 'timestamp': 'object'}, + # dtype='object', + na_values=['-','?'] + ) + big_df = big_df.repartition(npartitions) + return big_df + +def get_dask_eda(df_dask): + descr = df_dask.describe().compute() + corr = df_dask.corr().compute() + return descr, corr + +def normalization(config): + scaler = config[""advance""] \\ + [""profiler""][""normalization""] + scaler_method = None + if scaler[""minMax""] == ""True"": + scaler_method = MinMaxScaler() + if scaler[""standardScaler""] == ""True"": + scaler_method = StandardScaler() + return scaler_method + +def categorical_encoding(config): + encoder = config[""advance""][""profiler""] \\ + [""categoryEncoding""] + encoder_method = None + if encoder[""OneHotEncoding""] == ""True"": + encoder_method = OneHotEncoder() + # OneHotEncoder(handle_unknown='ignore', sparse=False) + if encoder[""LabelEncoding""] == ""True"": + encoder_method = LabelEncoder() + return encoder_method + +def numeric_feature_imputing(config): + imputer_numeric_method = None + imputer_numeric = config[""advance""] \\ + [""profiler""][""numericalFillMethod""] + if imputer_numeric[""Median""] == ""True"": + print(""Median Simple Imputer"") + imputer_numeric_method = SimpleImputer(strategy='median') + if imputer_numeric[""Mean""] == ""True"": + print(""Mean Simple Imputer"") + imputer_numeric_method = SimpleImputer(strategy='mean') + if imputer_numeric[""Min""] == ""True"": + print(""Min Simple Imputer"") + imputer_numeric_method = MinImputer() + if imputer_numeric[""Max""] == ""True"": + print(""Max Simple Imputer"") + imputer_numeric_method = MaxImputer() + if imputer_numeric[""Zero""] == ""True"": + print(""Zero Simple Imputer"") + imputer_numeric_method = SimpleImputer(strategy='constant', + fill_value=0) + # if imputer_numeric[""Drop""] == ""True"": + # print(""Median Simple Imputer"") + # imputer_numeric_method = DropImputer() + return imputer_numeric_method + +def categorical_feature_imputing(config): + imputer_categorical_method = None + imputer_categorical = config[""advance""] \\ + [""profiler""][""categoricalFillMethod""] + if imputer_categorical[""MostFrequent""] == ""True"": + imputer_categorical_method = SimpleImputer(strategy='most_frequent') + if imputer_categorical[""Mode""] == ""True"": + imputer_categorical_method = ModeCategoricalImputer() + if imputer_categorical[""Zero""] == ""True"": + imputer_categorical_method = SimpleImputer(strategy='constant', + fill_value=0) + return imputer_categorical_method + +def preprocessing_pipeline(config, X_train): + print(""Start preprocessing"") + scaler_method = normalization(config) + encoding_method = categorical_encoding(config) + imputer_numeric_method = numeric_feature_imputing(config) + imputer_categorical_method = categorical_feature_imputing(config) + + numeric_pipeline = Pipeline(steps=[ + ('impute', imputer_numeric_method), + ('scale', scaler_method) + ]) + + categorical_pipeline = Pipeline(steps=[ + ('impute', imputer_categorical_method), + ('encoding', encoding_method) + ]) + + numerical_features = X_train._get_numeric_data().columns.values.tolist() + categorical_features = list(set(X_train.columns) - set(X_train._get_numeric_data().columns)) + print(""numerical_features: "", numerical_features) + print(""categorical_features: "", categorical_features) + full_processor = ColumnTransformer(transformers=[ + ('number', numeric_pipeline, numerical_features), + # ('category', categorical_pipeline, categorical_features) + ]) + return full_processor + +def full_pipeline(X_train, X_test, config): + full_processor = preprocessing_pipeline(config, X_train) + reduce_dim = config[""advance""] \\ + [""selector""][""featureEngineering""] + feature_reduce = None + if reduce_dim[""SVD""] == ""True"": + feature_reduce = TruncatedSVD(n_components=3) + if reduce_dim[""PCA""] == ""True"": + feature_reduce = PCA(n_components=3) + X_train = full_processor.fit_transform(X_train) + # joblib.dump(full_processor, 'full_processor_pipeline.pkl') + deploy_location = config[""basic""][""modelLocation""] + profiler_file = os.path.join(deploy_location,'model','profiler.pkl') + selector_file = os.path.join(deploy_location,'model','selector.pkl') + save_pkl(full_processor, profiler_file) + X_test = full_processor.transform(X_test) + predict_config['profilerLocation'] = 'profiler.pkl' + if feature_reduce != None: + X_train = feature_reduce.fit_transform(X_train.to_dask_array(lengths=True)) + save_pkl(feature_reduce, selector_file) + predict_config['selectorLocation'] = 'selector.pkl' + # joblib.dump(feature_reduce, 'feature_reduce_pipeline.pkl') + X_test = feature_reduce.transform(X_test.to_dask_array(lengths=True)) + X_train = dd.from_dask_array(X_train) + X_test = dd.from_dask_array(X_test) + else: + predict_config['selectorLocation'] = '' + return X_train, X_test + +def train_xgb_classification(client, X_train, y_train, X_test, config): + print(""Training XGBoost classification"") + model_hyperparams = config[""advance""] \\ + [""distributedlearner_config""] \\ + [""modelParams""] \\ + [""classifierModelParams""] \\ + [""Distributed Extreme Gradient Boosting (XGBoost)""] + dask_model = xgb.dask.DaskXGBClassifier( + tree_method=model_hyperparams[""tree_method""], + n_estimators=int(model_hyperparams[""n_estimators""]), + max_depth=int(model_hyperparams[""max_depth""]), + gamma=float(model_hyperparams[""gamma""]), + min_child_weight=float(model_hyperparams[""min_child_weight""]), + subsample=float(model_hyperparams[""subsample""]), + colsample_bytree=float(model_hyperparams[""colsample_bytree""]), + learning_rate=float(model_hyperparams[""learning_rate""]), + reg_alpha=float(model_hyperparams[""reg_alpha""]), + reg_lambda=float(model_hyperparams[""reg_lambda""]), + random_state=int(model_hyperparams[""random_state""]), + verbosity=3) + dask_model.client = client + X_train, X_test = full_pipeline(X_train, X_test, config) + dask_model.fit(X_train, y_train) + save_model(config, dask_model) + save_config(config) + return dask_model, X_train, X_test + +def train_xgb_regression(client, X_train, y_train, X_test, config): + model_hyperparams = config[""advance""] \\ + [""distributedlearner_config""] \\ + [""modelParams""] \\ + [""regressorModelParams""] \\ + [""Distributed Extreme Gradient Boosting (XGBoost)""] + print(""Training XGBoost regression"") + dask_model = xgb.dask.DaskXGBRegressor( + tree_method=model_hyperparams[""tree_method""], + n_estimators=int(model_hyperparams[""n_estimators""]), + max_depth=int(model_hyperparams[""max_depth""]), + gamma=float(model_hyperparams[""gamma""]), + min_child_weight=float(model_hyperparams[""min_child_weight""]), + subsample=float(model_hyperparams[""subsample""]), + colsample_bytree=float(model_hyperparams[""colsample_bytree""]), + learning_rate=float(model_hyperparams[""learning_rate""]), + reg_alpha=float(model_hyperparams[""reg_alpha""]), + reg_lambda=float(model_hyperparams[""reg_lambda""]), + random_state=int(model_hyperparams[""random_state""]), + verbosity=3) + dask_model.client = client + X_train, X_test = full_pipeline(X_train, X_test, config) + dask_model.fit(X_train, y_train) + # dask_model.fit(X_train, y_train, eval_set=[(X_test, y_test)]) + save_model(config, dask_model) + save_config(config) + return dask_model, X_train, X_test + +def train_lgbm_regression(client, X_train, y_train, X_test, config): + print(""Training lightGBM regression"") + model_hyperparams = config[""advance""] \\ + [""distributedlearner_config""] \\ + [""modelParams""] \\ + [""regressorModelParams""] \\ + [""Distributed Light Gradient Boosting (LightGBM)""] + + dask_model = lgb.DaskLGBMRegressor( + client=client, + n_estimators=int(model_hyperparams[""n_estimators""]), + num_leaves=int(model_hyperparams[""num_leaves""]), + max_depth =int(model_hyperparams[""max_depth""]), + learning_rate=float(model_hyperparams[""learning_rate""]), + min_child_samples=int(model_hyperparams[""min_child_samples""]), + reg_alpha=int(model_hyperparams[""reg_alpha""]), + subsample=float(model_hyperparams[""subsample""]), + reg_lambda=int(model_hyperparams[""reg_lambda""]), + colsample_bytree=float(model_hyperparams[""colsample_bytree""]), + n_jobs=4, + verbosity=3) + + X_train, X_test = full_pipeline(X_train, X_test, config) + + # print(""before X_train.shape, y_train.shape"", + # X_train.shape, + # y_train.shape) + # indices = dask_findiforestOutlier(X_train) + # print(""X_train type: "", type(X_train)) + # print(""y_train type: "", type(y_train)) + # X_train, y_train = X_train.iloc[indices, :], \\ + # y_train.iloc[indices] + # print(""after X_train.shape, y_train.shape"", + # X_train.shape, + # y_train.shape) + + dask_model.fit(X_train, y_train) + # dask_model.fit(X_train, y_train, + # # eval_set=[(X_test,y_test), + # # (X_train,y_train)], + # verbose=20,eval_metric='l2') + save_model(config, dask_model) + save_config(config) + return dask_model, X_train, X_test + +def train_lgbm_classification(client, X_train, y_train, X_test, config): + print(""Training lightGBM classification"") + model_hyperparams = config[""advance""] \\ + [""distributedlearner_config""] \\ + [""modelParams""] \\ + [""classifierModelParams""] \\ + [""Distributed Light Gradient Boosting (LightGBM)""] + dask_model = lgb.DaskLGBMClassifier( + client=client, + num_leaves=int(model_hyperparams[""num_leaves""]), + learning_rate=float(model_hyperparams[""learning_rate""]), + feature_fraction=float(model_hyperparams[""feature_fraction""]), + bagging_fraction=float(model_hyperparams[""bagging_fraction""]), + bagging_freq=int(model_hyperparams[""bagging_freq""]), + max_depth=int(model_hyperparams[""max_depth""]), + min_data_in_leaf=int(model_hyperparams[""min_data_in_leaf""]), + n_estimators=int(model_hyperparams[""n_estimators""]), + verbosity=3) + X_train, X_test = full_pipeline(X_train, X_test, config) + dask_model.fit(X_train, y_train) + # dask_model.fit(X_train, y_train, + # eval_set=[(X_test,y_test), + # (X_train,y_train)], + # verbose=20,eval_metric='logloss') + save_model(config, dask_model) + save_config(config) + return dask_model, X_train, X_test + +def evaluate_model_classification(model, config, X_test, y_test, class_names): + metrics = config[""basic""][""scoringCriteria""][""classification""] + y_test = y_test.to_dask_array().compute() + log = logging.getLogger('eion') + X_test = X_test.to_dask_array(lengths=True) + y_pred = model.predict(X_test) + if metrics[""Accuracy""] == ""True"": + # ParallelPostFit(estimator=model, scoring='accuracy') + # score = model.score(X_test, y_test) * 100.0 + score = accuracy_score(y_test, y_pred) * 100.0 + type = 'Accuracy' + log.info('Status:-|... Accuracy Score '+str(score)) + + if metrics[""Recall""] == ""True"": + score = recall_score(y_test, y_pred) + type = 'Recall' + log.info('Status:-|... Recall Score '+str(score)) + + if metrics[""Precision""] == ""True"": + score = precision_score(y_test, y_pred) + type = 'Precision' + log.info('Status:-|... Precision Score '+str(score)) + + if metrics[""F1_Score""] == ""True"": + score = f1_score(y_test, y_pred) +" +" type = 'F1' + log.info('Status:-|... F1 Score '+str(score)) + + y_pred_prob = model.predict_proba(X_test) + if len(class_names) == 2: + roc_auc = roc_auc_score(y_test, y_pred) + else: + roc_auc = roc_auc_score(y_test, y_pred_prob, multi_class='ovr') + if metrics[""ROC_AUC""] == ""True"": + score = roc_auc + type = 'ROC_AUC' + log.info('Status:-|... ROC AUC Score '+str(score)) + + class_report = classification_report(y_test, y_pred, output_dict=True, target_names=class_names) + conf_matrix = confusion_matrix(y_test, y_pred) + return type, score, class_report, conf_matrix, roc_auc + +def evaluate_model_regression(model, config, X_test, y_test): + metrics = config[""basic""][""scoringCriteria""][""regression""] + y_pred = model.predict(X_test).compute() + y_test = y_test.to_dask_array().compute() + X_test = X_test.to_dask_array(lengths=True) + log = logging.getLogger('eion') + + mse = mean_squared_error(y_test, y_pred) + rmse = mean_squared_error(y_test, y_pred, squared=False) + norm_rmse = rmse * 100 / (y_test.max() - y_test.min()) + mape = mean_absolute_percentage_error(y_test, y_pred) + r2 = r2_score(y_test, y_pred) + mae = mean_absolute_error(y_test, y_pred) + + if metrics[""Mean Squared Error""] == ""True"": + type = 'Mean Squared Error' + score = mse + log.info('Status:-|... Mean Squared Error '+str(score)) + + if metrics[""Root Mean Squared Error""] == ""True"": + type = 'Root Mean Squared Error' + score = rmse + log.info('Status:-|... Root Mean Square Error '+str(score)) + + if metrics[""R-Squared""] == ""True"": + type = 'R-Squared' + score = r2 + log.info('Status:-|... R Squared Error '+str(score)) + + if metrics[""Mean Absolute Error""] == ""True"": + type = 'Mean Absolute Error' + score = mae + log.info('Status:-|... Mean Absolute Error '+str(score)) + + return type, score, mse, rmse, norm_rmse, r2, mae, mape + +def save_config(config): + deploy_location = config[""basic""][""modelLocation""] + saved_model_file = os.path.join(deploy_location,'etc','config.json') + print(predict_config) + with open (saved_model_file,'w') as f: + json.dump(predict_config, f) + f.close() +def save_model(config, model): + model_name = config[""basic""][""modelName""] + model_version = config[""basic""][""modelVersion""] + analysis_type = config[""basic""][""analysisType""] + deploy_location = config[""basic""][""modelLocation""] + if analysis_type[""classification""] == ""True"": + problem_type = ""classification"" + if analysis_type[""regression""] == ""True"": + problem_type = ""regression"" + print(""model_name"", model_name) + print(""model_version"", model_version) + print(""problem_type"", problem_type) + print(""deploy_location"", deploy_location) + file_name = problem_type + '_' + model_version + "".sav"" + saved_model = os.path.join(deploy_location,'model',file_name) + print(""Save trained model to directory: "", save_model) + with open (saved_model,'wb') as f: + pkl.dump(model,f) + f.close() + predict_config['modelLocation'] = file_name +def save_pkl(model, filename): + with open(filename, 'wb') as f: + pkl.dump(model, f, + protocol=pkl.HIGHEST_PROTOCOL) + + +def dask_findiforestOutlier(X): + print(""Outlier removal with Isolation Forest..."") + isolation_forest = IsolationForest(n_estimators=100) + with joblib.parallel_backend('dask'): + isolation_forest.fit(X) + y_pred_train = isolation_forest.fit_predict(X) + mask_isoForest = y_pred_train != -1 + return mask_isoForest + +def training(configFile): + start_time = time.time() + config = load_config_json(configFile) + + data_dir = config[""basic""][""dataLocation""] + + n_workers = int(config[""advance""] + [""distributedlearner_config""] + [""n_workers""]) + npartitions = int(config[""advance""] + [""distributedlearner_config""] + [""npartitions""]) + + threads_per_worker = int(config[""advance""] + [""distributedlearner_config""] + [""threads_per_worker""]) + predict_config['modelName'] = config[""basic""][""modelName""] + predict_config['modelVersion'] = config[""basic""][""modelVersion""] + predict_config['targetFeature'] = config[""basic""][""targetFeature""] + predict_config['trainingFeatures'] = config[""basic""][""trainingFeatures""] + predict_config['dataLocation'] = config[""basic""][""dataLocation""] + predict_config['n_workers'] = n_workers + predict_config['npartitions'] = npartitions + predict_config['threads_per_worker'] = threads_per_worker + if config['basic']['analysisType'][""classification""] == ""True"": + problemType = ""classification"" + oProblemType = ""Distributed Classification"" + if config['basic']['analysisType'][""regression""] == ""True"": + problemType = ""regression"" + oProblemType = ""Distributed Regression"" + predict_config['analysisType'] = problemType + predict_config['scoringCriteria'] = '' + target_feature = config[""basic""][""targetFeature""] + training_features = config[""basic""][""trainingFeatures""] + deploy_location = config[""basic""][""deployLocation""] + + is_xgb_class = config[""basic""] \\ + [""algorithms""][""classification""] \\ + [""Distributed Extreme Gradient Boosting (XGBoost)""] + + is_lgbm_class = config[""basic""] \\ + [""algorithms""][""classification""] \\ + [""Distributed Light Gradient Boosting (LightGBM)""] + + is_xgb_regress = config[""basic""] \\ + [""algorithms""][""regression""] \\ + [""Distributed Extreme Gradient Boosting (XGBoost)""] + + is_lgbm_regress = config[""basic""] \\ + [""algorithms""][""regression""] \\ + [""Distributed Light Gradient Boosting (LightGBM)""] + + if is_xgb_class==""True"" or is_xgb_regress==""True"": + algorithm = ""Distributed Extreme Gradient Boosting (XGBoost)"" + predict_config['algorithm'] = algorithm + if is_lgbm_class==""True"" or is_lgbm_regress==""True"": + algorithm = ""Distributed Light Gradient Boosting (LightGBM)"" + predict_config['algorithm'] = algorithm + + cluster = dask.distributed.LocalCluster(n_workers=n_workers, + threads_per_worker=threads_per_worker, + # dashboard_address=""127.0.0.1:8787"" + ) + client = dask.distributed.Client(cluster) + df_dask = load_data_dask(data_dir, npartitions=npartitions) + deployFolder = config[""basic""][""deployLocation""] + modelName = config[""basic""][""modelName""] + modelName = modelName.replace("" "", ""_"") + modelVersion = config[""basic""][""modelVersion""] + modelLocation = os.path.join(deployFolder,modelName) + os.makedirs(modelLocation,exist_ok = True) + deployLocation = os.path.join(modelLocation,modelVersion) + predict_config['deployLocation'] = deployLocation + try: + os.makedirs(deployLocation) + except OSError as e: + shutil.rmtree(deployLocation) + time.sleep(2) + os.makedirs(deployLocation) + modelFolderLocation = os.path.join(deployLocation,'model') + try: + os.makedirs(modelFolderLocation) + except OSError as e: + print(""\\nModel Folder Already Exists"") + etcFolderLocation = os.path.join(deployLocation,'etc') + try: + os.makedirs(etcFolderLocation) + except OSError as e: + print(""\\ETC Folder Already Exists"") + logFolderLocation = os.path.join(deployLocation,'log') + try: + os.makedirs(logFolderLocation) + except OSError as e: + print(""\\nLog Folder Already Exists"") + logFileName=os.path.join(logFolderLocation,'model_training_logs.log') + outputjsonFile=os.path.join(deployLocation,'etc','output.json') + filehandler = logging.FileHandler(logFileName, 'w','utf-8') + formatter = logging.Formatter('%(message)s') + filehandler.setFormatter(formatter) + log = logging.getLogger('eion') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + log.info('Status:-|... Distributed Learning Started') + config['basic']['modelLocation'] = deployLocation + # Get input for EDA + # descr, corr = get_dask_eda(df_dask=df_dask) + #print(descr) + # print(corr) + #print(df_dask.columns) + #print(""target feature"", target_feature) + df_dask = df_dask.dropna(subset=[target_feature]) + if is_xgb_class == ""True"" or is_lgbm_class == ""True"": + df_dask = df_dask.categorize(columns=[target_feature]) + df_dask[target_feature] = df_dask[target_feature].astype('category') + df_dask[target_feature] = df_dask[target_feature].cat.as_known() + label_mapping = dict(enumerate(df_dask[target_feature].cat.categories)) + df_dask[target_feature] = df_dask[target_feature].cat.codes + label_mapping_file =os.path.join(deployLocation,'etc','label_mapping.json') + with open(label_mapping_file, 'w') as f: + json.dump(label_mapping, f) + + if config[""advance""][""profiler""][""removeDuplicate""] == ""True"": + df_dask = df_dask.drop_duplicates() + + # Need to dropna for case of categoricalFillMethod + # if config[""advance""][""profiler""][""numericalFillMethod""][""Drop""] == ""True"": + # df_dask = df_dask.dropna() + trainingFeatures = config[""basic""][""trainingFeatures""].split(',') + if target_feature not in trainingFeatures: + trainingFeatures.append(target_feature) + df_dask = df_dask[trainingFeatures] + y = df_dask[target_feature] + X = df_dask.drop(target_feature, axis=1) + + print(""after X.shape, y.shape"", X.shape, y.shape) + + X_train, X_test, y_train, y_test = dask_ml.model_selection.train_test_split(X, y, + test_size=0.2, random_state=0) + trainingFeatures = config[""basic""][""trainingFeatures""].split(',') + + outputJson = None + conf_matrix_dict = {} + train_conf_matrix_dict = {} + try: + if is_xgb_class == ""True"": + modelName = 'Distributed Extreme Gradient Boosting (XGBoost)' + dask_model, X_train, X_test = train_xgb_classification(client, X_train, y_train, X_test, config) + class_names = list(label_mapping.values()) + _, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config, + X_train, y_train, class_names) + scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config, + X_test, y_test, class_names) + for i in range(len(conf_matrix)): + conf_matrix_dict_1 = {} + for j in range(len(conf_matrix[i])): + conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j]) + conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1 + + for i in range(len(train_conf_matrix)): + train_conf_matrix_dict_1 = {} + for j in range(len(train_conf_matrix[i])): + train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j]) + train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1 + # print(roc_auc) + outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\ + 'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\ + 'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\\ + 'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\\ + 'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}], + 'LogFile':logFileName}} + if is_lgbm_class == ""True"": + modelName = 'Distributed Light Gradient Boosting (LightGBM)' + dask_model, X_train, X_test = train_lgbm_classification(client, X_train, y_train, X_test, config) + class_names = list(label_mapping.values()) + _, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config, + X_train, y_train, class_names) + scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config, + X_test, y_test, class_names) + for i in range(len(conf_matrix)): + conf_matrix_dict_1 = {} + for j in range" +"(len(conf_matrix[i])): + conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j]) + conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1 + + for i in range(len(train_conf_matrix)): + train_conf_matrix_dict_1 = {} + for j in range(len(train_conf_matrix[i])): + train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j]) + train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1 + + outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\ + 'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\ + 'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\\ + 'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\\ + 'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}], + 'LogFile':logFileName}} + if is_xgb_regress == ""True"": + modelName = 'Distributed Extreme Gradient Boosting (XGBoost)' + dask_model, X_train, X_test = train_xgb_regression(client, X_train, y_train, X_test, config) + _, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config, + X_train, y_train) + scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config, + X_test, y_test) + outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\ + 'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\ + 'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \\ + 'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \\ + 'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}], + 'LogFile':logFileName}} + if is_lgbm_regress == ""True"": + modelName = 'Distributed Light Gradient Boosting (LightGBM)' + dask_model, X_train, X_test = train_lgbm_regression(client, X_train, y_train, X_test, config) + _, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config, + X_train, y_train) + scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config, + X_test, y_test) + outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\ + 'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\ + 'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \\ + 'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \\ + 'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}], + 'LogFile':logFileName}} + src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','dl_aion_predict.py') + shutil.copy2(src,deployLocation) + os.rename(os.path.join(deployLocation,'dl_aion_predict.py'),os.path.join(deployLocation,'aion_predict.py')) + + except Exception as e: + outputJson = {""status"":""FAIL"",""message"":str(e)} + print(e) + client.close() + cluster.close() + log.info('Status:-|... Distributed Learning Completed') + with open(outputjsonFile, 'w') as f: + json.dump(outputJson, f) + f.close() + output_json = json.dumps(outputJson) + log.info('aion_learner_status:'+str(output_json)) + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + hdlr.close() + log.removeHandler(hdlr) + print(""\\n"") + print(""aion_learner_status:"",output_json) + print(""\\n"") + end_time = time.time() + print(""--- %s processing time (sec) ---"" % (end_time - start_time)) import autograd +import autograd.numpy as np +import scipy.optimize +from autograd import grad +from autograd.scipy.special import logsumexp +from sklearn.cluster import KMeans + + +class HMM: + """""" + A Hidden Markov Model with Gaussian observations with + unknown means and known precisions. + """""" + def __init__(self, X, config_dict=None): + self.N, self.T, self.D = X.shape + self.K = config_dict['K'] # number of HMM states + self.I = np.eye(self.K) + self.Precision = np.zeros([self.D, self.D, self.K]) + self.X = X + if config_dict['precision'] is None: + for k in np.arange(self.K): + self.Precision[:, :, k] = np.eye(self.D) + else: + self.Precision = config_dict['precision'] + self.dParams_dWeights = None + self.alphaT = None # Store the final beliefs. + self.beta1 = None # store the first timestep beliefs from the beta recursion. + self.forward_trellis = {} # stores \\alpha + self.backward_trellis = {} # stores \\beta + + def initialize_params(self, seed=1234): + np.random.seed(seed) + param_dict = {} + A = np.random.randn(self.K, self.K) + # use k-means to initialize the mean parameters + X = self.X.reshape([-1, self.D]) + kmeans = KMeans(n_clusters=self.K, random_state=seed, + n_init=15).fit(X) + labels = kmeans.labels_ + _, counts = np.unique(labels, return_counts=True) + pi = counts + phi = kmeans.cluster_centers_ + + param_dict['A'] = np.exp(A) + param_dict['pi0'] = pi + param_dict['phi'] = phi + return self.pack_params(param_dict) + + def unpack_params(self, params): + param_dict = dict() + K = self.K + # For unpacking simplex parameters: have packed them as + # log(pi[:-1]) - log(pi[-1]). + unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1), + np.zeros((K, 1)), + axis=1) + ) + Z = np.sum(unnorm_A[:, :-1], axis=1) + unnorm_A /= Z[:, np.newaxis] + norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True) + param_dict['A'] = norm_A + + unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], 0.0)) + Z = np.sum(unnorm_pi[:-1]) + unnorm_pi /= Z + param_dict['pi0'] = unnorm_pi / unnorm_pi.sum() + param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K) + return param_dict + + def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False): + """""" + Computes the weighted marginal probability of the sequence xseq given parameters; + weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B) + :param xseq: T * D + :param pi: K * 1 + :param phi: D * K + :param wseq: T * 1 + :param A: + :return: + """""" + ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma) + alpha = np.log(pi.ravel()) + wseq[0] * ll[0] + if wseq[0] == 0: + self.forward_trellis[0] = alpha[:, np.newaxis] + for t in np.arange(1, self.T): + alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t] + if wseq[t] == 0: + # store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T + self.forward_trellis[t] = alpha[:, np.newaxis] + if store_belief: + # store the final belief + self.alphaT = alpha + return logsumexp(alpha) + + def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False): + """""" + Runs beta recursion; + weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B) + :param xseq: T * D + :param pi: K * 1 + :param phi: D * K + :param wseq: T * 1 + :param A: + :return: + """""" + ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma) + beta = np.zeros_like(pi.ravel()) # log(\\beta) of all ones. + max_t = ll.shape[0] + if wseq[max_t - 1] == 0: + # store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T + self.backward_trellis[max_t - 1] = beta[:, np.newaxis] + for i in np.arange(1, max_t): + t = max_t - i - 1 + beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1) + if wseq[t] == 0: + # store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T + self.backward_trellis[t] = beta[:, np.newaxis] + # account for the init prob + beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel()) + if store_belief: + # store the final belief + self.beta1 = beta + return logsumexp(beta) + + def weighted_loss(self, params, weights): + """""" + For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion + :param params: + :param weights: + :return: + """""" + param_dict = self.unpack_params(params) + logp = self.get_prior_contrib(param_dict) + logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'], + param_dict['phi'], + self.Precision, + param_dict['A'], + weights) + return -logp + + def loss_at_missing_timesteps(self, weights, params): + """""" + :param weights: zeroed out weights indicate missing values + :param params: packed parameters + :return: + """""" + # empty forward and backward trellis + self.clear_trellis() + param_dict = self.unpack_params(params) + # populate forward and backward trellis + lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'], + param_dict['phi'], + self.Precision, + param_dict['A'], + weights, + store_belief=True ) + lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'], + param_dict['phi'], + self.Precision, + param_dict['A'], + weights, + store_belief=True) + assert np.allclose(lpx, lpx_alt) # sanity check + test_ll = [] + # compute loo likelihood + ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision) + # compute posterior p(z_t | x_1,...t-1, t+1,...T) \\forall missing t + tsteps = [] + for t in self.forward_trellis.keys(): + lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx + test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel())) + tsteps.append(t) + # empty forward and backward trellis + self.clear_trellis() + return -np.array(test_ll) + + def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None): + if maxiter: + options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter} + else: + options_dict = {'disp': verbose, 'gtol': 1e-10} + + # Define a function that returns gradients of training loss using Autograd. + training_loss_fun =" +"lambda params: self.weighted_loss(params, weights) + training_gradient_fun = grad(training_loss_fun, 0) + if init_params is None: + init_params = self.initialize_params() + if verbose: + print(""Initial loss: "", training_loss_fun(init_params)) + res = scipy.optimize.minimize(fun=training_loss_fun, + jac=training_gradient_fun, + x0=init_params, + tol=1e-10, + options=options_dict) + if verbose: + print('grad norm =', np.linalg.norm(res.jac)) + return res.x + + def clear_trellis(self): + self.forward_trellis = {} + self.backward_trellis = {} + + #### Required for IJ computation ### + def compute_hessian(self, params_one, weights_one): + return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one) + + def compute_jacobian(self, params_one, weights_one): + return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\\ + (params_one, weights_one).squeeze() + ################################################### + + @staticmethod + def log_obs_lik(x, phi, Sigma): + """""" + :param x: T*D*1 + :param phi: 1*D*K + :param Sigma: D*D*K --- precision matrices per state + :return: ll + """""" + centered_x = x - phi + ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma ) + return ll + + @staticmethod + def pack_params(params_dict): + param_list = [(np.log(params_dict['A'][:, :-1]) - + np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(), + np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]), + params_dict['phi'].ravel()] + return np.concatenate(param_list) + + @staticmethod + def get_prior_contrib(param_dict): + logp = 0.0 + # Prior + logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum() + logp += (1.1 - 1) * np.log(param_dict['A']).sum() + logp += (1.1 - 1) * np.log(param_dict['pi0']).sum() + return logp + + @staticmethod + def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False): + """""" + :param T: length of the sequence + :param pct_to_drop: % of T in the held out fold + :param contiguous: if True generate a block of indices to drop else generate indices by iid sampling + :return: o (the set of indices in the fold) + """""" + if contiguous: + l = np.floor(pct_to_drop / 100. * T) + anchor = np.random.choice(np.arange(l + 1, T)) + o = np.arange(anchor - l, anchor).astype(int) + else: + # i.i.d LWCV + o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1 + return o + + @staticmethod + def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0, + diagonal_upweight=False): + """""" + :param K: Number of HMM states + :param T: length of the sequence + """""" + N = 1 # For structured IJ we will remove data / time steps from a single sequence + np.random.seed(seed) + if sigma0 is None: + sigma0 = np.eye(D) + + A = np.random.dirichlet(alpha=np.ones(K), size=K) + if diagonal_upweight: + A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions + A = A / A.sum(axis=1) + + pi0 = np.random.dirichlet(alpha=np.ones(K)) + mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean)) + zs = np.empty((N, T), dtype=np.int) + X = np.empty((N, T, D)) + + for n in range(N): + zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0)) + X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0) + for t in range(1, T): + zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :])) + X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0) + + return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus} + from builtins import range + +import autograd.numpy as np + + +def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False): + """"""Adapted from autograd.misc.optimizers"""""" + m = np.zeros(len(x)) + v = np.zeros(len(x)) + for i in range(num_iters): + g = grad(x, i) + if callback: callback(x, i, g, polyak) + m = (1 - b1) * g + b1 * m # First moment estimate. + v = (1 - b2) * (g**2) + b2 * v # Second moment estimate. + mhat = m / (1 - b1**(i + 1)) # Bias correction. + vhat = v / (1 - b2**(i + 1)) + x = x - step_size*mhat/(np.sqrt(vhat) + eps) + return x import matplotlib.pyplot as plt +import numpy as np +import numpy.random as npr +import torch as torch + + +def make_data_gap(seed, data_count=100): + import GPy + npr.seed(0) + x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))]) + x = x[:, np.newaxis] + k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.) + K = k.K(x) + L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count)) + + # draw a noise free random function from a GP + eps = np.random.randn(data_count) + f = L @ eps + + + # use a homoskedastic Gaussian noise model N(f(x)_i, \\sigma^2). \\sigma^2 = 0.1 + eps_noise = np.sqrt(0.1) * np.random.randn(data_count) + y = f + eps_noise + y = y[:, np.newaxis] + + plt.plot(x, f, 'ko', ms=2) + plt.plot(x, y, 'ro') + plt.title(""GP generated Data"") + plt.pause(1) + return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y) + + +def make_data_sine(seed, data_count=450): + # fix the random seed + np.random.seed(seed) + noise_var = 0.1 + + X = np.linspace(-4, 4, data_count) + y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count) + + train_count = int (0.2 * data_count) + idx = npr.permutation(range(data_count)) + X_train = X[idx[:train_count], np.newaxis ] + X_test = X[ idx[train_count:], np.newaxis ] + y_train = y[ idx[:train_count] ] + y_test = y[ idx[train_count:] ] + + mu = np.mean(X_train, 0) + std = np.std(X_train, 0) + X_train = (X_train - mu) / std + X_test = (X_test - mu) / std + mu = np.mean(y_train, 0) + std = np.std(y_train, 0) + # mu = 0 + # std = 1 + y_train = (y_train - mu) / std + y_test = (y_test -mu) / std + train_stats = dict() + train_stats['mu'] = torch.FloatTensor([mu]) + train_stats['sigma'] = torch.FloatTensor([std]) + return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\\ + train_stats import autograd +import autograd.numpy as np +import numpy.random as npr +import scipy.optimize + +sigmoid = lambda x: 0.5 * (np.tanh(x / 2.) + 1) +get_num_train = lambda inputs: inputs.shape[0] +logistic_predictions = lambda params, inputs: sigmoid(np.dot(inputs, params)) + + +class LogisticRegression: + def __init__(self): + self.params = None + + def set_parameters(self, params): + self.params = params + + def predict(self, X): + if self.params is not None: + # Outputs probability of a label being true according to logistic model + return np.atleast_2d(sigmoid(np.dot(X, self.params))).T + else: + raise RuntimeError(""Params need to be fit before predictions can be made."") + + def loss(self, params, weights, inputs, targets): + # Training loss is the negative log-likelihood of the training labels. + preds = logistic_predictions(params, inputs) + label_probabilities = preds * targets + (1 - preds) * (1 - targets) + return -np.sum(weights * np.log(label_probabilities + 1e-16)) + + def fit(self, weights, init_params, inputs, targets, verbose=True): + training_loss_fun = lambda params: self.loss(params, weights, inputs, targets) + # Define a function that returns gradients of training loss using Autograd. + training_gradient_fun = autograd.grad(training_loss_fun, 0) + # optimize params + if verbose: + print(""Initial loss:"", self.loss(init_params, weights, inputs, targets)) + # opt_params = sgd(training_gradient_fun, params, hyper=1, num_iters=5000, step_size=0.1) + res = scipy.optimize.minimize(fun=training_loss_fun, + jac=training_gradient_fun, + x0=init_params, + tol=1e-10, + options={'disp': verbose}) + opt_params = res.x + if verbose: + print(""Trained loss:"", self.loss(opt_params, weights, inputs, targets)) + self.params = opt_params + return opt_params + + def get_test_acc(self, params, test_targets, test_inputs): + preds = np.round(self.predict(test_inputs).T).astype(np.int) + err = np.abs(test_targets - preds).sum() + return 1 - err/ test_targets.shape[1] + + #### Required for IJ computation ### + def compute_hessian(self, params_one, weights_one, inputs, targets): + return autograd.hessian(self.loss, argnum=0)(params_one, weights_one, inputs, targets) + + def compute_jacobian(self, params_one, weights_one, inputs, targets): + return autograd.jacobian(autograd.jacobian(self.loss, argnum=0), argnum=1)\\ + (params_one, weights_one, inputs, targets).squeeze() + ################################################### + + @staticmethod + def synthetic_lr_data(N=10000, D=10): + x = 1. * npr.randn(N, D) + x_test = 1. * npr.randn(int(0.3 * N), D) + w = npr.randn(D, 1) + y = sigmoid((x @ w)).ravel() + y = npr.binomial(n=1, p=y) # corrupt labels + y_test = sigmoid(x_test @ w).ravel() + # y_test = np.round(y_test) + y_test = npr.binomial(n=1, p=y_test) + return x, np.atleast_2d(y), x_test, np.atleast_2d(y_test) + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + +from copy import deepcopy + +import numpy as np +import numpy.random as npr + + +def make_batches(n_data, batch_size): + return [slice(i, min(i+batch_size, n_data)) for i in range(0, n_data, batch_size)] + + +def generate_regression_data(seed, data_count=500): + """""" + Generate data from a noisy sine wave. + :param seed: random number seed + :param data_count: number of data points. + :return: + """""" + np.random.seed(seed) + noise_var = 0.1 + + x = np.linspace(-4, 4, data_count) + y = 1*np.sin(x) + np.sqrt(noise_var)*npr.randn(data_count) + + train_count = int (0.2 * data_count) + idx = npr.permutation(range(data_count)) + x_train = x[idx[:train_count], np.newaxis ] + x_test = x[ idx[train_count:], np.newaxis ] + y_train = y[ idx[:train_count] ] + y_test = y[ idx[train_count:] ] + + mu = np.mean(x_train, 0) + std = np.std(x_train, 0) + x_train = (" +"x_train - mu) / std + x_test = (x_test - mu) / std + mu = np.mean(y_train, 0) + std = np.std(y_train, 0) + y_train = (y_train - mu) / std + train_stats = dict() + train_stats['mu'] = mu + train_stats['sigma'] = std + + return x_train, y_train, x_test, y_test, train_stats + + +def form_D_for_auucc(yhat, zhatl, zhatu): + # a handy routine to format data as needed by the UCC fit() method + D = np.zeros([yhat.shape[0], 3]) + D[:, 0] = yhat.squeeze() + D[:, 1] = zhatl.squeeze() + D[:, 2] = zhatu.squeeze() + return D + + +def fitted_ucc_w_nullref(y_true, y_pred_mean, y_pred_lower, y_pred_upper): + """""" + Instantiates an UCC object for the target predictor plus a 'null' (constant band) reference + :param y_pred_lower: + :param y_pred_mean: + :param y_pred_upper: + :param y_true: + :return: ucc object fitted for two systems: target + null reference + """""" + # form matrix for ucc: + X_for_ucc = form_D_for_auucc(y_pred_mean.squeeze(), + y_pred_mean.squeeze() - y_pred_lower.squeeze(), + y_pred_upper.squeeze() - y_pred_mean.squeeze()) + # form matrix for a 'null' system (constant band) + X_null = deepcopy(X_for_ucc) + X_null[:,1:] = np.std(y_pred_mean) # can be set to any other constant (no effect on AUUCC) + # create an instance of ucc and fit data + from uq360.metrics.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc + u = ucc() + u.fit([X_for_ucc, X_null], y_true.squeeze()) + return u + + +def make_sklearn_compatible_scorer(task_type, metric, greater_is_better=True, **kwargs): + """""" + + Args: + task_type: (str) regression or classification. + metric: (str): choice of metric can be one of these - [aurrrc, ece, auroc, nll, brier, accuracy] for + classification and [""rmse"", ""nll"", ""auucc_gain"", ""picp"", ""mpiw"", ""r2""] for regression. + greater_is_better: is False the scores are negated before returning. + **kwargs: additional arguments specific to some metrics. + + Returns: + sklearn compatible scorer function. + + """""" + + from uq360.metrics.classification_metrics import compute_classification_metrics + from uq360.metrics.regression_metrics import compute_regression_metrics + + def sklearn_compatible_score(model, X, y_true): + """""" + + Args: + model: The model being scored. Currently uq360 and sklearn models are supported. + X: Input features. + y_true: ground truth values for the target. + + Returns: + Computed score of the model. + + """""" + + from uq360.algorithms.builtinuq import BuiltinUQ + from uq360.algorithms.posthocuq import PostHocUQ + if isinstance(model, BuiltinUQ) or isinstance(model, PostHocUQ): + # uq360 models + if task_type == ""classification"": + score = compute_classification_metrics( + y_true=y_true, + y_prob=model.predict(X).y_prob, + option=metric, + **kwargs + )[metric] + elif task_type == ""regression"": + y_mean, y_lower, y_upper = model.predict(X) + score = compute_regression_metrics( + y_true=y_true, + y_mean=y_mean, + y_lower=y_lower, + y_upper=y_upper, + option=metric, + **kwargs + )[metric] + else: + raise NotImplementedError + + else: + # sklearn models + if task_type == ""classification"": + score = compute_classification_metrics( + y_true=y_true, + y_prob=model.predict_proba(X), + option=metric, + **kwargs + )[metric] + else: + if metric in [""rmse"", ""r2""]: + score = compute_regression_metrics( + y_true=y_true, + y_mean=model.predict(X), + y_lower=None, + y_upper=None, + option=metric, + **kwargs + )[metric] + else: + raise NotImplementedError(""{} is not supported for sklearn regression models"".format(metric)) + + if not greater_is_better: + score = -score + return score + return sklearn_compatible_score + + +class DummySklearnEstimator(ABC): + def __init__(self, num_classes, base_model_prediction_fn): + self.base_model_prediction_fn = base_model_prediction_fn + self.classes_ = [i for i in range(num_classes)] + + def fit(self): + pass + + def predict_proba(self, X): + return self.base_model_prediction_fn(X) + # Adapted from https://github.com/Trusted-AI/AIX360/blob/master/aix360/datasets/meps_dataset.py +# Utilization target is kept as a continuous target. +import os + +import pandas as pd + + +def default_preprocessing(df): + """""" + 1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White + and 'non-White' otherwise + 2. Restrict to Panel 19 + 3. RENAME all columns that are PANEL/ROUND SPECIFIC + 4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1 + 5. Compute UTILIZATION. + """""" + def race(row): + if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE + return 'White' + return 'Non-White' + + df['RACEV2X'] = df.apply(lambda row: race(row), axis=1) + df = df.rename(columns = {'RACEV2X' : 'RACE'}) + + df = df[df['PANEL'] == 19] + + # RENAME COLUMNS + df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH', + 'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT', + 'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM', + 'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE', + 'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'}) + + df = df[df['REGION'] >= 0] # remove values -1 + df = df[df['AGE'] >= 0] # remove values -1 + + df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9 + + df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9 + + df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG', + 'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX', + 'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM', + 'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42', + 'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1 + + def utilization(row): + return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15'] + + df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1) + + df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION'}) + + df = df[['REGION','AGE','SEX','RACE','MARRY', + 'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX', + 'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX', + 'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM', + 'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42', + 'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION','PERWT15F']] + + return df + + +class MEPSDataset(): + """""" + The Medical Expenditure Panel Survey (MEPS) [#]_ data consists of large scale surveys of families and individuals, + medical providers, and employers, and collects data on health services used, costs & frequency of services, + demographics, health status and conditions, etc., of the respondents. + This specific dataset contains MEPS survey data for calendar year 2015 obtained in rounds 3, 4, and 5 of Panel 19, + and rounds 1, 2, and 3 of Panel 20. + See :file:`uq360/datasets/data/meps_data/README.md` for more details on the dataset and instructions on downloading/processing the data. + References: + .. [#] `Medical Expenditure Panel Survey data `_ + """""" + + def __init__(self, custom_preprocessing=default_preprocessing, dirpath=None): + self._dirpath = dirpath + if not self._dirpath: + self._dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'meps_data') + + self._filepath = os.path.join(self._dirpath, 'h181.csv') + try: + df = pd.read_csv(self._filepath, sep=',', na_values=[]) + except IOError as err: + print(""IOError: {}"".format(err)) + print(""To use this class, please place the heloc_dataset.csv:"") + print(""file, as-is, in the folder:"") + print(""\\n\\t{}\\n"".format(os.path.abspath(os.path.join( + os.path.abspath(__file__), 'data', 'meps_data')))) + import sys + sys.exit(1) + + if custom_preprocessing: + self._data = custom_preprocessing(df) + + def data(self): + return self._data from .meps_dataset import MEPSDataset + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + + +class BuiltinUQ(ABC): + """""" BuiltinUQ is the base class for any algorithm that has UQ built into it. + """""" + + def __init__(self, *argv, **kwargs): + """""" Initialize a BuiltinUQ object. + """""" + + @abc.abstractmethod + def fit(self, *argv, **kwargs): + """""" Learn the UQ related parameters.. + """""" + raise NotImplementedError + + @abc.abstractmethod + def predict(self, *argv, **kwargs): + """""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric + uncertainty in the predictions. + """""" + raise NotImplementedError + + def set_params(self, **parameters): + for parameter, value in parameters.items(): + setattr(self, parameter, value) + return self + + + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + + +class PostHocUQ(ABC): + """""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model. + """""" + + def __init__(self, *argv, **kwargs): + """""" Initialize a BuiltinUQ object. + """""" + + @abc.abstractmethod + def _process_pretrained_model(self, *argv, **kwargs): + """""" Method to process the pretrained model that requires UQ. + """""" + raise NotImplementedError + + @abc.abstractmethod + def predict(self, *argv, **kwargs): + """""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric + uncertainty in the predictions. + """""" + raise NotImplementedError + + def set_params(self, **parameters): + for parameter, value in parameters.items(): + setattr(self, parameter, value) + return self + + def get_params(self): + """""" + This method should not take any arguments and returns a dict of the __init__ parameters. + + """""" + raise NotImplementedError + from collections import namedtuple + +import numpy as np +import torch +from scipy.stats import norm +from torch.utils.data import DataLoader +from torch.utils.data import TensorDataset +from uq360.algorithms.builtinuq import BuiltinUQ +from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet + +np.random.seed(42) +torch.manual_seed(42) + +class HeteroscedasticReg" +"ression(BuiltinUQ): + """""" Wrapper for heteroscedastic regression. We learn to predict targets given features, + assuming that the targets are noisy and that the amount of noise varies between data points. + https://en.wikipedia.org/wiki/Heteroscedasticity + """""" + + def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True): + """""" + Args: + model_type: The base model architecture. Currently supported values are [mlp]. + mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the + mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n)) + model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance. + config: dictionary containing the config parameters for the model. + device: device used for pytorch models ignored otherwise. + verbose: if True, print statements with the progress are enabled. + """""" + + super(HeteroscedasticRegression).__init__() + self.config = config + self.device = device + self.verbose = verbose + if model_type == ""mlp"": + self.model_type = model_type + self.model = _MLPNet( + num_features=self.config[""num_features""], + num_outputs=self.config[""num_outputs""], + num_hidden=self.config[""num_hidden""], + ) + + elif model_type == ""custom"": + self.model_type = model_type + self.model = model + + else: + raise NotImplementedError + + def get_params(self, deep=True): + return {""model_type"": self.model_type, ""config"": self.config, ""model"": self.model, + ""device"": self.device, ""verbose"": self.verbose} + + def _loss(self, y_true, y_pred_mu, y_pred_log_var): + return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 + + 0.5 * y_pred_log_var) + + def fit(self, X, y): + """""" Fit the Heteroscedastic Regression model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + X = torch.from_numpy(X).float().to(self.device) + y = torch.from_numpy(y).float().to(self.device) + + dataset_loader = DataLoader( + TensorDataset(X,y), + batch_size=self.config[""batch_size""] + ) + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config[""lr""]) + + for epoch in range(self.config[""num_epochs""]): + avg_loss = 0.0 + for batch_x, batch_y in dataset_loader: + self.model.train() + batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x) + loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + avg_loss += loss.item()/len(dataset_loader) + + if self.verbose: + print(""Epoch: {}, loss = {}"".format(epoch, avg_loss)) + + return self + + def predict(self, X, return_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + + self.model.eval() + + X = torch.from_numpy(X).float().to(self.device) + dataset_loader = DataLoader( + X, + batch_size=self.config[""batch_size""] + ) + + y_mean_list = [] + y_log_var_list = [] + for batch_x in dataset_loader: + batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x) + y_mean_list.append(batch_y_pred_mu.data.cpu().numpy()) + y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy()) + + y_mean = np.concatenate(y_mean_list) + y_log_var = np.concatenate(y_log_var_list) + y_std = np.sqrt(np.exp(y_log_var)) + y_lower = y_mean - 2.0*y_std + y_upper = y_mean + 2.0*y_std + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + return res + from .heteroscedastic_regression import HeteroscedasticRegression from collections import namedtuple + +import numpy as np +import torch +import torch.nn.functional as F +from scipy.stats import norm +from torch.utils.data import DataLoader +from torch.utils.data import TensorDataset + +from uq360.algorithms.builtinuq import BuiltinUQ + +np.random.seed(42) +torch.manual_seed(42) + + +class _MLPNet_Main(torch.nn.Module): + def __init__(self, num_features, num_outputs, num_hidden): + super(_MLPNet_Main, self).__init__() + self.fc = torch.nn.Linear(num_features, num_hidden) + self.fc_mu = torch.nn.Linear(num_hidden, num_outputs) + self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) + + def forward(self, x): + x = F.relu(self.fc(x)) + mu = self.fc_mu(x) + log_var = self.fc_log_var(x) + return mu, log_var + + +class _MLPNet_Aux(torch.nn.Module): + def __init__(self, num_features, num_outputs, num_hidden): + super(_MLPNet_Aux, self).__init__() + self.fc = torch.nn.Linear(num_features, num_hidden) + self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) + + def forward(self, x): + x = F.relu(self.fc(x)) + log_var = self.fc_log_var(x) + return log_var + + +class AuxiliaryIntervalPredictor(BuiltinUQ): + """""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model. + + References: + .. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep + models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on + Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079 + """""" + + def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True): + """""" + Args: + model_type: The model type used to build the main model and the auxiliary model. Currently supported values + are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user + provide `main_model` and `aux_model`. + main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance. + aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance. + config: dictionary containing the config parameters for the model. + device: device used for pytorch models ignored otherwise. + verbose: if True, print statements with the progress are enabled. + """""" + + super(AuxiliaryIntervalPredictor).__init__() + self.config = config + self.device = device + self.verbose = verbose + if model_type == ""mlp"": + self.model_type = model_type + self.main_model = _MLPNet_Main( + num_features=self.config[""num_features""], + num_outputs=self.config[""num_outputs""], + num_hidden=self.config[""num_hidden""], + ) + self.aux_model = _MLPNet_Aux( + num_features=self.config[""num_features""], + num_outputs=self.config[""num_outputs""], + num_hidden=self.config[""num_hidden""], + ) + elif model_type == ""custom"": + self.model_type = model_type + self.main_model = main_model + self.aux_model = aux_model + + else: + raise NotImplementedError + + def get_params(self, deep=True): + return {""model_type"": self.model_type, ""config"": self.config, ""main_model"": self.main_model, + ""aux_model"": self.aux_model, ""device"": self.device, ""verbose"": self.verbose} + + def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux): + r = torch.abs(y_true - y_pred_mu) + # + 0.5 * y_pred_log_var + + loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \\ + self.config[""lambda_match""] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux))) + return loss + + def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux): + deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux) + upper = y_pred_mu + deltau + lower = y_pred_mu - deltal + width = upper - lower + r = torch.abs(y_true - y_pred_mu) + + emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000)) + + loss_emce = torch.abs(self.config[""calibration_alpha""]-emce) + loss_noise = torch.mean(torch.abs(0.5 * width - r)) + loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true)) + + #print(emce) + return loss_emce + self.config[""lambda_noise""] * loss_noise + self.config[""lambda_sharpness""] * loss_sharpness + + def fit(self, X, y): + """""" Fit the Auxiliary Interval Predictor model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + + X = torch.from_numpy(X).float().to(self.device) + y = torch.from_numpy(y).float().to(self.device) + + dataset_loader = DataLoader( + TensorDataset(X,y), + batch_size=self.config[""batch_size""] + ) + optimizer_main_model = torch.optim.Adam(self.main_model.parameters(), lr=self.config[""lr""]) + optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config[""lr""]) + + for it in range(self.config[""num_outer_iters""]): + + # Train the main model + for epoch in range(self.config[""num_main_iters""]): + avg_mean_model_loss = 0.0 + for batch_x, batch_y in dataset_loader: + self.main_model.train() + self.aux_model.eval() + batch_y_pred_log_var_aux = self.aux_model(batch_x) + batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) + main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux) + optimizer_main_model.zero_grad() + main_loss.backward() + optimizer_main_model.step() + + avg_mean_model_loss += main_loss.item()/len(dataset_loader) + + if self.verbose: + print(""Iter: {}, Epoch: {}, main_model_loss = {}"".format(it, epoch, avg_mean_model_loss)) + + # Train the auxiliary model + for epoch in range(self.config[""num_aux_iters""]): + avg_aux_model_loss = 0.0 + for batch_x, batch_y in dataset_loader: + self.aux_model.train() + self.main_model.eval() + batch_y_pred_log_var_aux = self.aux_model(batch_x) + batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) + aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux) + optimizer_aux_model.zero_grad() + aux" +"_loss.backward() + optimizer_aux_model.step() + + avg_aux_model_loss += aux_loss.item() / len(dataset_loader) + + if self.verbose: + print(""Iter: {}, Epoch: {}, aux_model_loss = {}"".format(it, epoch, avg_aux_model_loss)) + + return self + + def predict(self, X, return_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + + self.main_model.eval() + + X = torch.from_numpy(X).float().to(self.device) + dataset_loader = DataLoader( + X, + batch_size=self.config[""batch_size""] + ) + + y_mean_list = [] + y_log_var_list = [] + for batch_x in dataset_loader: + batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) + y_mean_list.append(batch_y_pred_mu.data.cpu().numpy()) + y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy()) + + y_mean = np.concatenate(y_mean_list) + y_log_var = np.concatenate(y_log_var_list) + y_std = np.sqrt(np.exp(y_log_var)) + y_lower = y_mean - 2.0*y_std + y_upper = y_mean + 2.0*y_std + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + return res + from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor + from .infinitesimal_jackknife import InfinitesimalJackknife + from collections import namedtuple + +import numpy as np + +from uq360.algorithms.posthocuq import PostHocUQ + + +class InfinitesimalJackknife(PostHocUQ): + """""" + Performs a first order Taylor series expansion around MLE / MAP fit. + Requires the model being probed to be twice differentiable. + """""" + def __init__(self, params, gradients, hessian, config): + """""" Initialize IJ. + Args: + params: MLE / MAP fit around which uncertainty is sought. d*1 + gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n + hessian: Hessian evaluated at the MLE / MAP fit. d*d + """""" + + super(InfinitesimalJackknife).__init__() + self.params_one = params + self.gradients = gradients + self.hessian = hessian + self.d, self.n = gradients.shape + self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients) + self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients) + self.w_one = np.ones([self.n]) + self.config = config + + def get_params(self, deep=True): + return {""params"": self.params, ""config"": self.config, ""gradients"": self.gradients, + ""hessian"": self.hessian} + + def _process_pretrained_model(self, *argv, **kwargs): + pass + + def get_parameter_uncertainty(self): + if (self.config['resampling_strategy'] == ""jackknife"") or (self.config['resampling_strategy'] == ""jackknife+""): + w_query = np.ones_like(self.w_one) + resampled_params = np.zeros([self.n, self.d]) + for i in np.arange(self.n): + w_query[i] = 0 + resampled_params[i] = self.ij(w_query) + w_query[i] = 1 + return np.cov(resampled_params), resampled_params + elif self.config['resampling_strategy'] == ""bootstrap"": + pass + else: + raise NotImplementedError(""Only jackknife, jackknife+, and bootstrap resampling strategies are supported"") + + def predict(self, X, model): + """""" + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + model: model object, must implement a set_parameters function + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + + """""" + n, _ = X.shape + y_all = model.predict(X) + _, d_out = y_all.shape + params_cov, params = self.get_parameter_uncertainty() + if d_out > 1: + print(""Quantiles are computed independently for each dimension. May not be accurate."") + y = np.zeros([params.shape[0], n, d_out]) + for i in np.arange(params.shape[0]): + model.set_parameters(params[i]) + y[i] = model.predict(X) + y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0) + y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0) + y_mean = y.mean(axis=0) + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + return res + + def ij(self, w_query): + """""" + Args: + w_query: A n*1 vector to query parameters at. + Return: + new parameters at w_query + """""" + assert w_query.shape[0] == self.n + return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T + + def approx_ij(self, w_query): + """""" + Args: + w_query: A n*1 vector to query parameters at. + Return: + new parameters at w_query + """""" + assert w_query.shape[0] == self.n + return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T import copy +from collections import namedtuple + +import numpy as np +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader +import torch.utils.data as data_utils +from scipy.stats import norm +from sklearn.preprocessing import StandardScaler + +from uq360.algorithms.builtinuq import BuiltinUQ +from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp + + +class BnnRegression(BuiltinUQ): + """""" + Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression. + + References: + .. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. ""Structured variational learning of Bayesian neural + networks with horseshoe priors."" International Conference on Machine Learning. PMLR, 2018. + """""" + def __init__(self, config, prior=""Gaussian""): + """""" + + Args: + config: a dictionary specifying network and learning hyperparameters. + prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe + """""" + super(BnnRegression, self).__init__() + self.config = config + if prior == ""Gaussian"": + self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers']) + self.config['use_reg_hshoe'] = None + elif prior == ""Hshoe"": + self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale']) + self.config['use_reg_hshoe'] = False + elif prior == ""RegHshoe"": + self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale'], + use_reg_hshoe=config['use_reg_hshoe']) + self.config['use_reg_hshoe'] = True + else: + raise NotImplementedError(""'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe"") + + def get_params(self, deep=True): + return {""prior"": self.prior, ""config"": self.config} + + def fit(self, X, y): + """""" Fit the BNN regression model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + torch.manual_seed(1234) + optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size']) + neg_elbo = torch.zeros([self.config['num_epochs'], 1]) + params_store = {} + for epoch in range(self.config['num_epochs']): + loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0] + optimizer.zero_grad() + loss.backward() + optimizer.step() + if hasattr(self.net, 'fixed_point_updates'): + # for hshoe or regularized hshoe nets + self.net.fixed_point_updates() + neg_elbo[epoch] = loss.item() + if (epoch + 1) % 10 == 0: + # print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0]) + print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}' + .format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0], + self.net.get_noise_var())) + params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all. + best_model_id = neg_elbo.argmin() # loss_val_store.argmin() # + self.net.load_state_dict(params_store[best_model_id.item()]) + + return self + + def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + mc_samples: Number of Monte-Carlo samples. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + return_epistemic: if True, the epistemic upper and lower bounds are returned. + return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions + is returned. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + epistemic_out = np.zeros([mc_samples, X.shape[0]]) + total_out = np.zeros([mc_samples, X.shape[0]]) + for s in np.arange(mc_samples): + pred = self.net(X).data.numpy().ravel() + epistemic_out[s] = pred + total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0]) + y" +"_total_std = np.std(total_out, axis=0) + y_epi_std = np.std(epistemic_out, axis=0) + y_mean = np.mean(total_out, axis=0) + y_lower = y_mean - 2 * y_total_std + y_upper = y_mean + 2 * y_total_std + y_epi_lower = y_mean - 2 * y_epi_std + y_epi_upper = y_mean + 2 * y_epi_std + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_epistemic: + Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',)) + res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + if return_epistemic_dists: + epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_epistemic_dists',)) + res = Result(*res, y_epistemic_dists=epi_dists) + + return res + + +class BnnClassification(BuiltinUQ): + """""" + Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification. + """""" + def __init__(self, config, prior=""Gaussian"", device=None): + """""" + + Args: + config: a dictionary specifying network and learning hyperparameters. + prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe + """""" + super(BnnClassification, self).__init__() + self.config = config + self.device = device + if prior == ""Gaussian"": + self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers']) + self.config['use_reg_hshoe'] = None + elif prior == ""Hshoe"": + self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale']) + self.config['use_reg_hshoe'] = False + elif prior == ""RegHshoe"": + self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale'], + use_reg_hshoe=config['use_reg_hshoe']) + self.config['use_reg_hshoe'] = True + else: + raise NotImplementedError(""'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe"") + if ""batch_size"" not in self.config: + self.config[""batch_size""] = 50 + self.net = self.net.to(device) + + def get_params(self, deep=True): + return {""prior"": self.prior, ""config"": self.config, ""device"": self.device} + + def fit(self, X=None, y=None, train_loader=None): + """""" Fits BNN regression model. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + Ignored if train_loader is not None. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + Ignored if train_loader is not None. + train_loader: pytorch train_loader object. + + Returns: + self + + """""" + if train_loader is None: + train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long()) + train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True) + + torch.manual_seed(1234) + optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size']) + neg_elbo = torch.zeros([self.config['num_epochs'], 1]) + params_store = {} + for epoch in range(self.config['num_epochs']): + avg_loss = 0.0 + for batch_x, batch_y in train_loader: + loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if hasattr(self.net, 'fixed_point_updates'): + # for hshoe or regularized hshoe nets + self.net.fixed_point_updates() + + avg_loss += loss.item() + + neg_elbo[epoch] = avg_loss / len(train_loader) + + if (epoch + 1) % 10 == 0: + # print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0]) + print('Epoch[{}/{}], neg elbo: {:.6f}' + .format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item())) + params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all. + best_model_id = neg_elbo.argmin() # loss_val_store.argmin() # + self.net.load_state_dict(params_store[best_model_id.item()]) + + return self + + def predict(self, X, mc_samples=100): + """""" + Obtain calibrated predictions for the test points. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + mc_samples: Number of Monte-Carlo samples. + + Returns: + namedtuple: A namedtupe that holds + + y_pred: ndarray of shape (n_samples,) + Predicted labels of the test points. + y_prob: ndarray of shape (n_samples, n_classes) + Predicted probability scores of the classes. + y_prob_var: ndarray of shape (n_samples,) + Variance of the prediction on the test points. + y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes) + Samples from the predictive distribution. + + """""" + + X = torch.Tensor(X) + y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)] + + y_prob_samples_stacked = np.stack(y_prob_samples) + prob_mean = np.mean(y_prob_samples_stacked, 0) + prob_var = np.std(y_prob_samples_stacked, 0) ** 2 + + if len(np.shape(prob_mean)) == 1: + y_pred_labels = prob_mean > 0.5 + + else: + y_pred_labels = np.argmax(prob_mean, axis=1) + + Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples']) + res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples) + + return res + import inspect +from collections import namedtuple + +import numpy as np +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.exceptions import NotFittedError +from uq360.algorithms.posthocuq import PostHocUQ + + +class BlackboxMetamodelClassification(PostHocUQ): + """""" Extracts confidence scores from black-box classification models using a meta-model [4]_ . + + References: + .. [4] Chen, Tongfei, et al. ""Confidence scoring using whitebox meta-models with linear classifier probes."" + The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019. + """""" + + def _create_named_model(self, mdltype, config): + """""" Instantiates a model by name passed in 'mdltype'. + + Args: + mdltype: string with name (must be supported) + config: dict with args passed in the instantiation call + Returns: + mdl instance + """""" + assert (isinstance(mdltype, str)) + if mdltype == 'lr': + mdl = LogisticRegression(**config) + elif mdltype == 'gbm': + mdl = GradientBoostingClassifier(**config) + else: + raise NotImplementedError(""ERROR: Requested model type unknown: \\""%s\\"""" % mdltype) + return mdl + + def _get_model_instance(self, model, config): + """""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or + (c) passed in instance. + + :param model: string, class, or instance. Class and instance must have certain methods callable. + :param config: dict with args passed in during the instantiation + :return: model instance + """""" + assert (model is not None and config is not None) + if isinstance(model, str): # 'model' is a name, create it + mdl = self._create_named_model(model, config) + elif inspect.isclass(model): # 'model' is a class, instantiate it + mdl = model(**config) + else: # 'model' is an instance, register it + mdl = model + if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]): + raise ValueError(""ERROR: Passed model/method failed the interface test. Methods required: %s"" % + ','.join(self.callable_keys)) + return mdl + + def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42): + """""" + + :param base_model: Base model. Can be: + (1) None (default mdl will be set up), + (2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'), + (3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate. + (4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods. + Note: user-supplied classes and models must have certain callable methods ('predict', 'fit') + and be capable of raising NotFittedError. + :param meta_model: Meta model. Same values possible as with 'base_model' + :param base_config: None or a params dict to be passed to 'base_model' at instantiation + :param meta_config: None or a params dict to be passed to 'meta_model' at instantiation + :param random_seed: seed used in the various pipeline steps + """""" + super(BlackboxMetamodelClassification).__init__() + self.random_seed = random_seed + self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in + self.base_model_default = 'gbm' + self.meta_model_default = 'lr' + self.base_config_default = {'n_estimators': 300, 'max_depth': 10, + 'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10, + 'random_state': self.random_seed} + self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed} + self.base_config = base_config if base_config is not None else self.base_config_default + self.meta_config = meta_config if meta_config is not None else self.meta_config_default + self.base_model = None + self.meta_model = None + self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default, + self.base_config) + self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default, + self.meta_config) + + def get_params(self, deep=True): + return {""base_model"": self.base_model, ""meta_model"": self.meta_model, ""base_config"": self.base_config, + ""meta_config"": self.meta_config, ""random_seed"": self.random_seed} + + def _process_pretrained_model(self, X, y_hat_proba): + """""" + Given the original input features and the base output probabilities, generate input features + to train a meta model. Current implementation copies all input features and appends. + + :param X: numpy [nsamples, dim] + :param y_hat_proba: [nsamples, nclasses] + :return: array with new features [nsamples, newdim] + """""" + assert (len(y_hat_proba.shape) == 2) + assert (X.shape[0] == y_hat_proba.shape[0]) + # sort the probs sample by sample + faux1 = np.sort(y_hat_proba, axis=-1) + # add delta between top and second candidate + faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1) + return np.hstack([X, faux1, faux2]) + + def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False, + meta_train_data=(None, None)): + """""" + Fit base and meta models. + + :param X: input to the base model, + array-like of shape (n_samples, n_features). + Features vectors of the training data. + :param y: ground truth for the base model, + array-like of shape (n_samples,) + :param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model + (complement will be used to train the base model) + :param randomize_samples: use shuffling when creating partitions + " +":param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been + instantiated outside/by the user and are already fitted. + :param meta_train_data: User supplied data to train the meta model. Note that this option should only be used + with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate. + Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode. + :return: self + """""" + X = np.asarray(X) + y = np.asarray(y) + assert (len(meta_train_data) == 2) + if meta_train_data[0] is None: + X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction, + random_state=self.random_seed) + else: + if not base_is_prefitted: + raise ValueError(""ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option"") + X_base = y_base = None + X_meta = meta_train_data[0] + y_meta = meta_train_data[1] + # fit the base model + if not base_is_prefitted: + self.base_model.fit(X_base, y_base) + # get input for the meta model from the base + try: + y_hat_meta_proba = self.base_model.predict_proba(X_meta) + # determine correct-incorrect outcome - these are targets for the meta model trainer + + # y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8) + y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int) + + except NotFittedError as e: + raise RuntimeError(""ERROR: fit(): The base model appears not pre-fitted (%s)"" % repr(e)) + # get input features for meta training + X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba) + # train meta model to predict 'correct' vs. 'incorrect' of the base + self.meta_model.fit(X_meta_in, y_hat_meta_targets) + return self + + def predict(self, X): + """""" + Generate a base prediction along with uncertainty/confidence for data X. + + :param X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + :return: namedtuple: A namedtuple that holds + + y_pred: ndarray of shape (n_samples,) + Predicted labels of the test points. + y_score: ndarray of shape (n_samples,) + Confidence score the test points. + + """""" + y_hat_proba = self.base_model.predict_proba(X) + y_hat = np.argmax(y_hat_proba, axis=-1) + X_meta_in = self._process_pretrained_model(X, y_hat_proba) + z_hat = self.meta_model.predict_proba(X_meta_in) + index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome + Result = namedtuple('res', ['y_pred', 'y_score']) + res = Result(y_hat, z_hat[:, index_of_class_1]) + + return res + from .blackbox_metamodel_regression import BlackboxMetamodelRegression +from .blackbox_metamodel_classification import BlackboxMetamodelClassification + import inspect +from collections import namedtuple + +import numpy as np +from sklearn.ensemble import GradientBoostingRegressor +from sklearn.model_selection import train_test_split +from sklearn.exceptions import NotFittedError +from uq360.algorithms.posthocuq import PostHocUQ + + +class BlackboxMetamodelRegression(PostHocUQ): + """""" Extracts confidence scores from black-box regression models using a meta-model [2]_ . + + References: + .. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes. + The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019. + + """""" + + def _create_named_model(self, mdltype, config): + """""" + Instantiates a model by name passed in 'mdltype' + + :param mdltype: string with name (must be supprted) + :param config: dict with args passed in the instantiation call + :return: mdl instance + """""" + assert (isinstance(mdltype, str)) + if mdltype == 'gbr': + mdl = GradientBoostingRegressor(**config) + else: + raise NotImplementedError(""ERROR: Requested model type unknown: \\""%s\\"""" % mdltype) + return mdl + + def _get_model_instance(self, model, config): + """""" + Returns an instance of a model based on (a) a desired name or (b) passed in class, or + (c) passed in instance + + :param model: string, class, or instance. Class and instance must have certain methods callable. + :param config: dict with args passed in during the instantiation + :return: model instance + """""" + assert (model is not None and config is not None) + if isinstance(model, str): # 'model' is a name, create it + mdl = self._create_named_model(model, config) + elif inspect.isclass(model): # 'model' is a class, instantiate it + mdl = model(**config) + else: # 'model' is an instance, register it + mdl = model + if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]): + raise ValueError(""ERROR: Passed model/method failed the interface test. Methods required: %s"" % + ','.join(self.callable_keys)) + return mdl + + def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42): + """""" + + :param base_model: Base model. Can be: + (1) None (default mdl will be set up), + (2) Named model (e.g., 'gbr'), + (3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate. + (4) Model instance (instantiated outside). Will be re-used. Must have required callable methods. + Note: user-supplied classes and models must have certain callable methods ('predict', 'fit') + and be capable of raising NotFittedError. + :param meta_model: Meta model. Same values possible as with 'base_model' + :param base_config: None or a params dict to be passed to 'base_model' at instantiation + :param meta_config: None or a params dict to be passed to 'meta_model' at instantiation + :param random_seed: seed used in the various pipeline steps + """""" + super(BlackboxMetamodelRegression).__init__() + self.random_seed = random_seed + self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in + self.base_model_default = 'gbr' + self.meta_model_default = 'gbr' + self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001, + 'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed} + self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10, + 'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10, + 'random_state': self.random_seed} + self.base_config = base_config if base_config is not None else self.base_config_default + self.meta_config = meta_config if meta_config is not None else self.meta_config_default + self.base_model = None + self.meta_model = None + self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default, + self.base_config) + self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default, + self.meta_config) + + def get_params(self, deep=True): + return {""base_model"": self.base_model, ""meta_model"": self.meta_model, ""base_config"": self.base_config, + ""meta_config"": self.meta_config, ""random_seed"": self.random_seed} + + def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False, + meta_train_data=(None, None)): + """""" + Fit base and meta models. + + :param X: input to the base model + :param y: ground truth for the base model + :param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model + (complement will be used to train the base model) + :param randomize_samples: use shuffling when creating partitions + :param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been + instantiated outside/by the user and are already fitted. + :param meta_train_data: User supplied data to train the meta model. Note that this option should only be used + with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate. + Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode. + :return: self + """""" + X = np.asarray(X) + y = np.asarray(y) + assert(len(meta_train_data)==2) + if meta_train_data[0] is None: + X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction, + random_state=self.random_seed) + else: + if not base_is_prefitted: + raise ValueError(""ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option"") + X_base = y_base = None + X_meta = meta_train_data[0] + y_meta = meta_train_data[1] + # fit the base model + if not base_is_prefitted: + self.base_model.fit(X_base, y_base) + # get input for the meta model from the base + try: + y_hat_meta = self.base_model.predict(X_meta) + except NotFittedError as e: + raise RuntimeError(""ERROR: fit(): The base model appears not pre-fitted (%s)"" % repr(e)) + # used base input and output as meta input + X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta) + # train meta model to predict abs diff + self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta)) + return self + + def _process_pretrained_model(self, X, y_hat): + """""" + Given the original input features and the base output probabilities, generate input features + to train a meta model. Current implementation copies all input features and appends. + + :param X: numpy [nsamples, dim] + :param y_hat: [nsamples,] + :return: array with new features [nsamples, newdim] + """""" + y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat + X_meta_in = np.hstack([X, y_hat_meta_prime]) + return X_meta_in + + def predict(self, X): + """""" + Generate prediction and uncertainty bounds for data X. + + :param X: input features + :return: namedtuple: A namedtuple that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + + """""" + y_hat = self.base_model.predict(X) + y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat + X_meta_in = np.hstack([X, y_hat_prime]) + z_hat = self.meta_model.predict(X_meta_in) + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_hat, y_hat - z_hat, y_hat + z_hat) + + return res + from .quantile_regression import QuantileRegression + from collections import namedtuple + +from sklearn.ensemble import GradientBoostingRegressor + +from uq360.algorithms.builtinuq import BuiltinUQ + + +class QuantileRegression(BuiltinUQ): + """"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile + to obtain the prediction intervals. + """""" + + def __init__(self, model_type=""gbr"", config=None): + """""" + Args: + model_type: The base model used for predicting a quantile. Currently supported values are [gbr]. + gbr is sklearn GradientBoostingRegressor. + config: dictionary containing the config parameters for the model. + """""" + + super(QuantileRegression).__init__() + if config is not None: + self.config = config + else: + self.config = {} + if ""alpha"" not in self.config: + self.config[""alpha""] = 0.95 + if model_type == ""gbr"": + self.model_type = model_type + self.model_mean = GradientBoostingRegressor( + loss='ls', + n_estimators=self.config[""n_estimators""], + max_depth=self.config[""max_depth""], + learning_rate=self.config[""learning_rate""], + min_samples_leaf=self.config[""min_samples_leaf""], + min_samples_split=self.config[""min_samples_split""] + ) + self.model_upper = GradientBoostingRegressor( + loss='quantile', + alpha=self.config[""alpha""], + n_estimators=self.config[""n_estimators""], + max_depth=self.config[""max_depth""], + learning_rate=self.config[""learning_rate""], + " +"min_samples_leaf=self.config[""min_samples_leaf""], + min_samples_split=self.config[""min_samples_split""] + ) + self.model_lower = GradientBoostingRegressor( + loss='quantile', + alpha=1.0 - self.config[""alpha""], + n_estimators=self.config[""n_estimators""], + max_depth=self.config[""max_depth""], + learning_rate=self.config[""learning_rate""], + min_samples_leaf=self.config[""min_samples_leaf""], + min_samples_split=self.config[""min_samples_split""]) + + else: + raise NotImplementedError + + def get_params(self, deep=True): + return {""model_type"": self.model_type, ""config"": self.config} + + def fit(self, X, y): + """""" Fit the Quantile Regression model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + + self.model_mean.fit(X, y) + self.model_lower.fit(X, y) + self.model_upper.fit(X, y) + + return self + + def predict(self, X): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + """""" + + y_mean = self.model_mean.predict(X) + y_lower = self.model_lower.predict(X) + y_upper = self.model_upper.predict(X) + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + return res + from collections import namedtuple + +import botorch +import gpytorch +import numpy as np +import torch +from botorch.models import SingleTaskGP +from botorch.utils.transforms import normalize +from gpytorch.constraints import GreaterThan +from scipy.stats import norm +from sklearn.preprocessing import StandardScaler + +from uq360.algorithms.builtinuq import BuiltinUQ + +np.random.seed(42) +torch.manual_seed(42) + + +class HomoscedasticGPRegression(BuiltinUQ): + """""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise. + + References: + .. [1] https://botorch.org/api/models.html#singletaskgp + + """""" + + def __init__(self, + kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()), + likelihood=None, + config=None): + """""" + Args: + kernel: gpytorch kernel function with default set to `RBFKernel` with output scale. + likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`. + config: dictionary containing the config parameters for the model. + """""" + + super(HomoscedasticGPRegression).__init__() + self.config = config + self.kernel = kernel + self.likelihood = likelihood + self.model = None + self.scaler = StandardScaler() + self.X_bounds = None + + def get_params(self, deep=True): + return {""kernel"": self.kernel, ""likelihood"": self.likelihood, ""config"": self.config} + + def fit(self, X, y, **kwargs): + """""" + Fit the GP Regression model. + + Additional arguments relevant for SingleTaskGP fitting can be passed to this function. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + **kwargs: Additional arguments relevant for SingleTaskGP fitting. + + Returns: + self + + """""" + y = self.scaler.fit_transform(y) + X, y = torch.tensor(X), torch.tensor(y) + self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]), + X.max() * torch.ones(X.shape[1])]) + + X = normalize(X, X_bounds) + + model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs) + model_homo.likelihood.noise_covar.register_constraint(""raw_noise"", GreaterThan(1e-5)) + model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo) + botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik) + + model_homo_marginal_log_lik.eval() + + self.model = model_homo_marginal_log_lik + self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze() + + return self + + def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + return_epistemic: if True, the epistemic upper and lower bounds are returned. + return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions + is returned. + + Returns: + namedtuple: A namedtuple that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + X = torch.tensor(X) + + X_test_norm = normalize(X, self.X_bounds) + + self.model.eval() + with torch.no_grad(): + posterior = self.model.model.posterior(X_test_norm) + y_mean = posterior.mean + #y_epi_std = torch.sqrt(posterior.variance) + y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region() + + predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True) + #y_std = torch.sqrt(predictive_posterior.variance) + y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region() + + y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \\ + self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\\ + self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\\ + self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\\ + self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze() + + y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0 + y_std = (y_upper_total - y_lower_total) / 4.0 + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_epistemic: + Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',)) + res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + if return_epistemic_dists: + epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_epistemic_dists',)) + res = Result(*res, y_epistemic_dists=epi_dists) + + return res + + from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression from .ucc_recalibration import UCCRecalibration + from collections import namedtuple + +from uq360.algorithms.posthocuq import PostHocUQ +from uq360.utils.misc import form_D_for_auucc +from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve + + +class UCCRecalibration(PostHocUQ): + """""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve. + """""" + + def __init__(self, base_model): + """""" + Args: + base_model: pretrained model to be recalibrated. + """""" + super(UCCRecalibration).__init__() + self.base_model = self._process_pretrained_model(base_model) + self.ucc = None + + def get_params(self, deep=True): + return {""base_model"": self.base_model} + + def _process_pretrained_model(self, base_model): + return base_model + + def fit(self, X, y): + """""" + Fit the Uncertainty Characteristics Curve. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3] + bwu = y_pred_upper - y_pred_mean + bwl = y_pred_mean - y_pred_lower + self.ucc = UncertaintyCharacteristicsCurve() + self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze()) + + return self + + def predict(self, X, missrate=0.05): + """""" + Generate prediction and uncertainty bounds for data X. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + missrate: desired missrate of the new operating point, set to 0.05 by default. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + """""" + C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False) + new_scale = C['modvalue'] + + y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3] + bwu = y_pred_upper - y_pred_mean + bwl = y_pred_mean - y_pred_lower + + if C['operation'] == 'bias': + calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width + calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width + else: + calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width + calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper) + + return res + from collections import namedtuple + +import numpy as np + +from sklearn.calibration import CalibratedClassifierCV +from sklearn.preprocessing import LabelEncoder + +from uq360.utils.misc import DummySklearnEstimator +from uq360.algorithms.posthocuq import PostHocUQ + + +class ClassificationCalibration(PostHocUQ): + """"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows + non-sklearn models to be calibrated" +". + + """""" + def __init__(self, num_classes, fit_mode=""features"", method='isotonic', base_model_prediction_func=None): + """""" + + Args: + num_classes: number of classes. + fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores, + useful when these are precomputed. + method: isotonic or sigmoid. + base_model_prediction_func: the function that takes in the input features and produces base model's + probability scores. This is ignored when operating in `probs` mode. + """""" + super(ClassificationCalibration).__init__() + if fit_mode == ""probs"": + # In this case, the fit assumes that it receives the probability scores of the base model. + # create a dummy estimator + self.base_model = DummySklearnEstimator(num_classes, lambda x: x) + else: + self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func) + self.method = method + + def get_params(self, deep=True): + return {""num_classes"": self.num_classes, ""fit_mode"": self.fit_mode, ""method"": self.method, + ""base_model_prediction_func"": self.base_model_prediction_func} + + def _process_pretrained_model(self, base_model): + return base_model + + def fit(self, X, y): + """""" Fits calibration model using the provided calibration set. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + + self.base_model.label_encoder_ = LabelEncoder().fit(y) + self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model, + cv=""prefit"", + method=self.method) + self.calib_model.fit(X, y) + + return self + + def predict(self, X): + """""" + Obtain calibrated predictions for the test points. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + + Returns: + namedtuple: A namedtupe that holds + + y_pred: ndarray of shape (n_samples,) + Predicted labels of the test points. + y_prob: ndarray of shape (n_samples, n_classes) + Predicted probability scores of the classes. + + """""" + y_prob = self.calib_model.predict_proba(X) + if len(np.shape(y_prob)) == 1: + y_pred_labels = y_prob > 0.5 + + else: + y_pred_labels = np.argmax(y_prob, axis=1) + + Result = namedtuple('res', ['y_pred', 'y_prob']) + res = Result(y_pred_labels, y_prob) + + return res + from .classification_calibration import ClassificationCalibration + import numpy as np +from scipy.stats import norm +from sklearn.metrics import mean_squared_error, r2_score + +from ..utils.misc import fitted_ucc_w_nullref + + +def picp(y_true, y_lower, y_upper): + """""" + Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies + within predicted interval. Measures the prediction interval calibration for regression. + + Args: + y_true: Ground truth + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: the fraction of samples for which the grounds truth lies within predicted interval. + """""" + satisfies_upper_bound = y_true <= y_upper + satisfies_lower_bound = y_true >= y_lower + return np.mean(satisfies_upper_bound * satisfies_lower_bound) + + +def mpiw(y_lower, y_upper): + """""" + Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the + sharpness of intervals. + + Args: + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: the average width the prediction interval across samples. + """""" + return np.mean(np.abs(y_lower - y_upper)) + + +def auucc_gain(y_true, y_mean, y_lower, y_upper): + """""" Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference + with constant band. + + Args: + y_true: Ground truth + y_mean: predicted mean + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: AUUCC gain + + """""" + u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper) + auucc = u.get_AUUCC() + assert(isinstance(auucc, list) and len(auucc) == 2), ""Failed to calculate auucc gain"" + assert (not np.isclose(auucc[1], 0.)), ""Failed to calculate auucc gain"" + auucc_gain = (auucc[1]-auucc[0])/auucc[0] + return auucc_gain + + +def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper): + """""" Computes Gaussian negative_log_likelihood assuming symmetric band around the mean. + + Args: + y_true: Ground truth + y_mean: predicted mean + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: nll + + """""" + y_std = (y_upper - y_lower) / 4.0 + nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze())) + return nll + + +def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option=""all"", nll_fn=None): + """""" + Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes + the [""rmse"", ""nll"", ""auucc_gain"", ""picp"", ""mpiw"", ""r2""] metrics. + + Args: + y_true: Ground truth + y_mean: predicted mean + y_lower: predicted lower bound + y_upper: predicted upper bound + option: string or list of string contained the name of the metrics to be computed. + nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower. + + Returns: + dict: dictionary containing the computed metrics. + """""" + + assert y_true.shape == y_mean.shape, ""y_true shape: {}, y_mean shape: {}"".format(y_true.shape, y_mean.shape) + assert y_true.shape == y_lower.shape, ""y_true shape: {}, y_mean shape: {}"".format(y_true.shape, y_lower.shape) + assert y_true.shape == y_upper.shape, ""y_true shape: {}, y_mean shape: {}"".format(y_true.shape, y_upper.shape) + + results = {} + if not isinstance(option, list): + if option == ""all"": + option_list = [""rmse"", ""nll"", ""auucc_gain"", ""picp"", ""mpiw"", ""r2""] + else: + option_list = [option] + + if ""rmse"" in option_list: + results[""rmse""] = mean_squared_error(y_true, y_mean, squared=False) + if ""nll"" in option_list: + if nll_fn is None: + nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper) + results[""nll""] = nll + else: + results[""nll""] = np.mean(nll_fn(y_true)) + if ""auucc_gain"" in option_list: + gain = auucc_gain(y_true, y_mean, y_lower, y_upper) + results[""auucc_gain""] = gain + if ""picp"" in option_list: + results[""picp""] = picp(y_true, y_lower, y_upper) + if ""mpiw"" in option_list: + results[""mpiw""] = mpiw(y_lower, y_upper) + if ""r2"" in option_list: + results[""r2""] = r2_score(y_true, y_mean) + + return results + + +def _check_not_tuple_of_2_elements(obj, obj_name='obj'): + """"""Check object is not tuple or does not have 2 elements."""""" + if not isinstance(obj, tuple) or len(obj) != 2: + raise TypeError('%s must be a tuple of 2 elements.' % obj_name) + + +def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7, + ax=None, figsize=None, dpi=None, + title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs): + """""" + Plot the uncertainty distribution for a single distribution. + + Args: + dist: scipy.stats._continuous_distns. + A scipy distribution object. + show_quantile_dots: boolean. + Whether to show quantil dots on top of the density plot. + qd_sample: int. + Number of dots for the quantile dot plot. + qd_bins: int. + Number of bins for the quantile dot plot. + ax: matplotlib.axes.Axes or None, optional (default=None). + Target axes instance. If None, new figure and axes will be created. + figsize: tuple of 2 elements or None, optional (default=None). + Figure size. + dpi : int or None, optional (default=None). + Resolution of the figure. + title : string or None, optional (default=Prediction Distribution) + Axes title. + If None, title is disabled. + xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. + xlabel : string or None, optional (default=Prediction) + X-axis title label. + If None, title is disabled. + ylabel : string or None, optional (default=Density) + Y-axis title label. + If None, title is disabled. + + Returns: + matplotlib.axes.Axes: ax : The plot with prediction distribution. + """""" + + import matplotlib.pyplot as plt + + if ax is None: + if figsize is not None: + _check_not_tuple_of_2_elements(figsize, 'figsize') + _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) + + x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100) + ax.plot(x, dist.pdf(x), **kwargs) + + if show_quantile_dots: + from matplotlib.patches import Circle + from matplotlib.collections import PatchCollection + import matplotlib.ticker as ticker + + data = dist.rvs(size=10000) + p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample) + x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf) + # Create bins + hist = np.histogram(x_, bins=qd_bins) + bins, edges = hist + radius = (edges[1] - edges[0]) / 2 + + ax2 = ax.twinx() + patches = [] + max_y = 0 + for i in range(qd_bins): + x_bin = (edges[i + 1] + edges[i]) / 2 + y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])] + + max_y = max(y_bins) if max(y_bins) > max_y else max_y + + for _, y_bin in enumerate(y_bins): + circle = Circle((x_bin, y_bin), radius) + patches.append(circle) + + p = PatchCollection(patches, alpha=0.4) + ax2.add_collection(p) + + # Axis tweek + y_scale = (max_y + radius) / max(dist.pdf(x)) + ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale)) + ax2.yaxis.set_major_formatter(ticks_y) + ax2.set_yticklabels([]) + if xlims is not None: + ax2.set_xlim(left=xlims[0], right=xlims[1]) + else: + ax2.set_xlim([min(x_) - radius, max(x) + radius]) + ax2.set_ylim([0, max_y + radius]) + ax2.set_aspect(1) + + if title is not None: + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + + return ax + + +def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10, + ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale=""linear"", + title=None, xlabel=None, ylabel=None): + """""" + Plot how prediction uncertainty varies across the entire range of a feature. + + Args: + x_test: One dimensional ndarray. + Feature column of the test dataset. + y_test: One dimensional ndarray. + Ground truth label of the test dataset. + y_test_pred_lower_total: One dimensional ndarray. + Lower bound of the total uncertainty range. + y_test_pred_upper_total: One dimensional ndarray. + Upper bound of the total uncertainty range. + num_bins: int. + Number of bins used to discritize x_test into equal-sample-sized bins. + ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. + figsize: tuple of 2 elements or None, optional (default=None). Figure size. + dpi : int or None, optional (default=None). Resolution of the figure. + xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. + ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``. + xscale: Passed to ``ax.set_xscale()``. + title : string or None, optional + Axes title. + If None, title is disabled. + xlabel : string or None" +", optional + X-axis title label. + If None, title is disabled. + ylabel : string or None, optional + Y-axis title label. + If None, title is disabled. + + Returns: + matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature. + + """""" + from scipy.stats.mstats import mquantiles + import matplotlib.pyplot as plt + + if ax is None: + if figsize is not None: + _check_not_tuple_of_2_elements(figsize, 'figsize') + _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) + + x_uniques_sorted = np.sort(np.unique(x_test)) + + num_unique = len(x_uniques_sorted) + sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test) + if len(x_uniques_sorted) > 10: # bin the values + q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:]) + q_sample_bin_ids = np.digitize(x_test, q_bins) + picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin], + y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)]) + unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins) + picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)] + picp_replicated = np.array([item for sublist in picp_replicated for item in sublist]) + else: + picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin], + y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)]) + picp_replicated = picps + + ax.plot(x_uniques_sorted, picp_replicated, label='PICP') + ax.axhline(0.95, linestyle='--', label='95%') + ax.set_ylabel('PICP') + + ax.legend(loc='best') + + if title is None: + title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format( + picp(y_test, + y_test_pred_lower_total, + y_test_pred_upper_total), + mpiw(y_test_pred_lower_total, + y_test_pred_upper_total)) + + if xlims is not None: + ax.set_xlim(left=xlims[0], right=xlims[1]) + + if ylims is not None: + ax.set_ylim(bottom=ylims[0], top=ylims[1]) + + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + if xscale is not None: + ax.set_xscale(xscale) + + return ax + + +def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total, + y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None, + ax=None, figsize=None, dpi=None, xlims=None, xscale=""linear"", + title=None, xlabel=None, ylabel=None): + """""" + Plot how prediction uncertainty varies across the entire range of a feature. + + Args: + x_test: one dimensional ndarray. + Feature column of the test dataset. + y_test_pred_mean: One dimensional ndarray. + Model prediction for the test dataset. + y_test_pred_lower_total: One dimensional ndarray. + Lower bound of the total uncertainty range. + y_test_pred_upper_total: One dimensional ndarray. + Upper bound of the total uncertainty range. + y_test_pred_lower_epistemic: One dimensional ndarray. + Lower bound of the epistemic uncertainty range. + y_test_pred_upper_epistemic: One dimensional ndarray. + Upper bound of the epistemic uncertainty range. + ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. + figsize: tuple of 2 elements or None, optional (default=None). Figure size. + dpi : int or None, optional (default=None). Resolution of the figure. + xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. + xscale: Passed to ``ax.set_xscale()``. + title : string or None, optional + Axes title. + If None, title is disabled. + xlabel : string or None, optional + X-axis title label. + If None, title is disabled. + ylabel : string or None, optional + Y-axis title label. + If None, title is disabled. + + Returns: + matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature. + + """""" + import matplotlib.pyplot as plt + + if ax is None: + if figsize is not None: + _check_not_tuple_of_2_elements(figsize, 'figsize') + _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) + + x_uniques_sorted = np.sort(np.unique(x_test)) + + y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2 + agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted]) + agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted]) + + ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction') + ax.fill_between(x_uniques_sorted, + agg_y_mean - 2.0 * agg_y_std, + agg_y_mean + 2.0 * agg_y_std, + alpha=0.3, label='total uncertainty') + + if y_test_pred_lower_epistemic is not None: + y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2 + agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted]) + ax.fill_between(x_uniques_sorted, + agg_y_mean - 2.0 * agg_y_std_epistemic, + agg_y_mean + 2.0 * agg_y_std_epistemic, + alpha=0.3, label='model uncertainty') + + ax.legend(loc='best') + + if xlims is not None: + ax.set_xlim(left=xlims[0], right=xlims[1]) + + if title is not None: + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + if xscale is not None: + ax.set_xscale(xscale) + + return ax + import numpy as np +import pandas as pd +from scipy.stats import entropy +from sklearn.metrics import roc_auc_score, log_loss, accuracy_score + + +def entropy_based_uncertainty_decomposition(y_prob_samples): + """""" Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components. + + References: + .. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of + uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference + on Machine Learning (pp. 1184-1193). PMLR. + + Args: + y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities + corresponding to samples from the model posterior. + + Returns: + tuple: + - total_uncertainty: entropy of the predictive distribution. + - aleatoric_uncertainty: aleatoric component of the total_uncertainty. + - epistemic_uncertainty: epistemic component of the total_uncertainty. + + """""" + y_preds_samples_stacked = np.stack(y_prob_samples) + preds_mean = np.mean(y_preds_samples_stacked, 0) + + total_uncertainty = entropy(preds_mean, axis=1) + aleatoric_uncertainty = np.mean( + np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1), + axis=1) + epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty + + return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty + + +def multiclass_brier_score(y_true, y_prob): + """"""Brier score for multi-class. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + + Returns: + float: Brier score. + + """""" + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + + y_target = np.zeros_like(y_prob) + y_target[:, y_true] = 1.0 + return np.mean(np.sum((y_target - y_prob) ** 2, axis=1)) + + +def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score, + attributes=None, num_bins=10, subgroup_ids=None, + return_counts=False): + """""" Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where + coverage instead of rejection rate is used. + + References: + .. [3] Franc, Vojtech, and Daniel Prusa. ""On discriminative learning of prediction uncertainty."" + In International Conference on Machine Learning, pp. 1963-1971. 2019. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like of shape (n_samples,) + predicted labels. + selection_scores: scores corresponding to certainty in the predicted labels. + risk_func: risk function under consideration. + attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. + num_bins: number of bins. + subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids. + return_counts: set to True to return counts also. + + Returns: + float or tuple: + - aurrrc (float): area under risk rejection rate curve. + - rejection_rates (list): rejection rates for each bin (returned only if return_counts is True). + - selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True). + - risks (list): risk in each bin (returned only if return_counts is True). + + """""" + + if selection_scores is None: + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)] + + if y_pred is None: + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + y_pred = np.argmax(y_prob, axis=1) + + order = np.argsort(selection_scores)[::-1] + + rejection_rates = [] + selection_thresholds = [] + risks = [] + for bin_id in range(num_bins): + samples_in_bin = len(y_true) // num_bins + selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]] + selection_thresholds.append(selection_threshold) + ids = selection_scores >= selection_threshold + if sum(ids) > 0: + if attributes is None: + if isinstance(y_true, pd.Series): + y_true_numpy = y_true.values + else: + y_true_numpy = y_true + if subgroup_ids is None: + risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids]) + else: + if sum(subgroup_ids & ids) > 0: + risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids]) + else: + risk_value = 0.0 + else: + risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes) + else: + risk_value = 0.0 + risks.append(risk_value) + rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true)) + + aurrrc = np.nanmean(risks) + + if not return_counts: + return aurrrc + else: + return aurrrc, rejection_rates, selection_thresholds, risks + + +def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False): + """""" Computes the reliability curve and the expected calibration error [1]_ . + + References: + .. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference + on Machine Learning, PMLR 70:1321-1330, 2017. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y" +"_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like of shape (n_samples,) + predicted labels. + num_bins: number of bins. + return_counts: set to True to return counts also. + + Returns: + float or tuple: + - ece (float): expected calibration error. + - confidences_in_bins: average confidence in each bin (returned only if return_counts is True). + - accuracies_in_bins: accuracy in each bin (returned only if return_counts is True). + - frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True). + + """""" + + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + num_samples, num_classes = y_prob.shape + top_scores = np.max(y_prob, axis=1) + + if y_pred is None: + y_pred = np.argmax(y_prob, axis=1) + + if num_classes == 2: + bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0)) + else: + bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0)) + + non_boundary_bin_edges = bins_edges[1:-1] + bin_centers = (bins_edges[1:] + bins_edges[:-1])/2 + + sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges) + + num_samples_in_bins = np.zeros(num_bins) + accuracies_in_bins = np.zeros(num_bins) + confidences_in_bins = np.zeros(num_bins) + + for bin in range(num_bins): + num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin]) + if num_samples_in_bins[bin] > 0: + accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin] + confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin] + + ece = np.sum( + num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples + ) + frac_samples_in_bins = num_samples_in_bins / num_samples + + if not return_counts: + return ece + else: + return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers + + +def compute_classification_metrics(y_true, y_prob, option='all'): + """""" + Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes + the [aurrrc, ece, auroc, nll, brier, accuracy] metrics. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + option: string or list of string contained the name of the metrics to be computed. + + Returns: + dict: a dictionary containing the computed metrics. + """""" + results = {} + if not isinstance(option, list): + if option == ""all"": + option_list = [""aurrrc"", ""ece"", ""auroc"", ""nll"", ""brier"", ""accuracy""] + else: + option_list = [option] + + if ""aurrrc"" in option_list: + results[""aurrrc""] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob) + if ""ece"" in option_list: + results[""ece""] = expected_calibration_error(y_true=y_true, y_prob=y_prob) + if ""auroc"" in option_list: + results[""auroc""], _ = roc_auc_score(y_true=y_true, y_score=y_prob) + if ""nll"" in option_list: + results[""nll""] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) + if ""brier"" in option_list: + results[""brier""] = multiclass_brier_score(y_true=y_true, y_prob=y_prob) + if ""accuracy"" in option_list: + results[""accuracy""] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) + + return results + + +def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""""], num_bins=10): + """""" + Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves + can be plot by passing data as lists. + + Args: + y_true: array-like or or a list of array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like or or a list of array-like of shape (n_samples,) + predicted labels. + plot_label: (optional) list of names identifying each curve. + num_bins: number of bins. + + Returns: + tuple: + - ece_list: ece: list containing expected calibration error for each curve. + - accuracies_in_bins_list: list containing binned average accuracies for each curve. + - frac_samples_in_bins_list: list containing binned sample frequencies for each curve. + - confidences_in_bins_list: list containing binned average confidence for each curve. + """""" + import matplotlib.pyplot as plt + + if not isinstance(y_true, list): + y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred] + if len(plot_label) != len(y_true): + raise ValueError('y_true and plot_label should be of same length.') + + ece_list = [] + accuracies_in_bins_list = [] + frac_samples_in_bins_list = [] + confidences_in_bins_list = [] + + for idx in range(len(plot_label)): + ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx], + y_prob[idx], + y_pred[idx], + num_bins=num_bins, + return_counts=True) + ece_list.append(ece) + accuracies_in_bins_list.append(accuracies_in_bins) + frac_samples_in_bins_list.append(frac_samples_in_bins) + confidences_in_bins_list.append(confidences_in_bins) + + fig = plt.figure(figsize=(12, 5)) + + plt.subplot(1, 2, 1) + for idx in range(len(plot_label)): + plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx]) + plt.title(""Confidence Histogram"") + plt.xlabel(""Confidence"") + plt.ylabel(""Fraction of Samples"") + plt.grid() + plt.ylim([0.0, 1.0]) + plt.legend() + + plt.subplot(1, 2, 2) + for idx in range(len(plot_label)): + plt.plot(bins, accuracies_in_bins_list[idx], 'o-', + label=""{} ECE = {:.2f}"".format(plot_label[idx], ece_list[idx])) + plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label=""Perfect Calibration"") + plt.title(""Reliability Plot"") + plt.xlabel(""Confidence"") + plt.ylabel(""Accuracy"") + plt.grid() + plt.legend() + + plt.show() + + return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list + + +def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""""], risk_func=None, + attributes=None, num_bins=10, subgroup_ids=None): + """""" + Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves + can be plot by passing data as lists. + + Args: + y_true: array-like or or a list of array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like or or a list of array-like of shape (n_samples,) + predicted labels. + selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels. + risk_func: risk function under consideration. + attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. + num_bins: number of bins. + subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a + subgroup of the samples specified by subgroup_ids. + + Returns: + tuple: + - aurrrc_list: list containing the area under risk rejection rate curves. + - rejection_rate_list: list containing the binned rejection rates. + - selection_thresholds_list: list containing the binned selection thresholds. + - risk_list: list containing the binned risks. + """""" + import matplotlib.pyplot as plt + + if not isinstance(y_true, list): + y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids] + if len(plot_label) != len(y_true): + raise ValueError('y_true and plot_label should be of same length.') + + aurrrc_list = [] + rejection_rate_list = [] + risk_list = [] + selection_thresholds_list = [] + + for idx in range(len(plot_label)): + aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve( + y_true[idx], + y_prob[idx], + y_pred[idx], + selection_scores=selection_scores[idx], + risk_func=risk_func, + attributes=attributes, + num_bins=num_bins, + subgroup_ids=subgroup_ids[idx], + return_counts=True + ) + + aurrrc_list.append(aursrc) + rejection_rate_list.append(rejection_rates) + risk_list.append(risks) + selection_thresholds_list.append(selection_thresholds) + + plt.figure(figsize=(12, 5)) + + plt.subplot(1, 2, 1) + for idx in range(len(plot_label)): + plt.plot(rejection_rate_list[idx], risk_list[idx], label=""{} AURRRC={:.5f}"".format(plot_label[idx], aurrrc_list[idx])) + + plt.legend(loc=""best"") + plt.xlabel(""Rejection Rate"") + if risk_func is None: + ylabel = ""Prediction Error Rate"" + else: + if 'accuracy' in risk_func.__name__: + ylabel = ""1.0 - "" + risk_func.__name__ + else: + ylabel = risk_func.__name__ + + plt.ylabel(ylabel) + plt.title(""Risk vs Rejection Rate Plot"") + plt.grid() + + plt.subplot(1, 2, 2) + for idx in range(len(plot_label)): + plt.plot(selection_thresholds_list[idx], risk_list[idx], label=""{}"".format(plot_label[idx])) + + plt.legend(loc=""best"") + plt.xlabel(""Selection Threshold"") + if risk_func is None: + ylabel = ""Prediction Error Rate"" + else: + if 'accuracy' in risk_func.__name__: + ylabel = ""1.0 - "" + risk_func.__name__ + else: + ylabel = risk_func.__name__ + + plt.ylabel(ylabel) + plt.title(""Risk vs Selection Threshold Plot"") + plt.grid() + + plt.show() + + return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list + from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \\ + compute_classification_metrics, entropy_based_uncertainty_decomposition +from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \\ + plot_uncertainty_by_feature, plot_picp_by_feature +from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve + from copy import deepcopy + +import matplotlib.pyplot as plt +import numpy as np +from scipy.integrate import simps, trapz +from sklearn.isotonic import IsotonicRegression + +DEFAULT_X_AXIS_NAME = 'excess' +DEFAULT_Y_AXIS_NAME = 'missrate' + + +class UncertaintyCharacteristicsCurve: + """""" + Class with main functions of the Uncertainty Characteristics Curve (UCC). + + """""" + + def __init__(self, normalize=True, precompute_bias_data=True): + """""" + :param normalize: set initial axes normalization flag (can be changed via set_coordinates()) + :param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based + UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call + if bias-based UCC is not needed. + + """""" + self.axes_name2idx = {""missrate"": 1, ""bandwidth"": 2, ""excess"": 3, ""deficit"": 4} + self.axes_idx2descr = {1: ""Missrate"", 2: ""Bandwidth"", 3: ""Excess"", 4: ""Deficit""} + self.x_axis_idx = None + self.y_axis_idx = None + self.norm_x_axis = False + self.norm_y_axis = False + self.std_unit = None + self." +"normalize = normalize + self.d = None + self.gt = None + self.lb = None + self.ub = None + self.precompute_bias_data = precompute_bias_data + self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize) + + def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None): + """""" + Assigns user-specified type to the axes and normalization behavior (sticky). + + :param x_axis_name: None-> unchanged, or name from self.axes_name2idx + :param y_axis_name: ditto + :param normalize: True/False will activate/deactivate norming for specified axes. Behavior for + Axes_name that are None will not be changed. + Value None will leave norm status unchanged. + Note, axis=='missrate' will never get normalized, even with normalize == True + :return: none + """""" + normalize = self.normalize if normalize is None else normalize + if x_axis_name is None and self.x_axis_idx is None: + raise ValueError(""ERROR(UCC): x-axis has not been defined."") + if y_axis_name is None and self.y_axis_idx is None: + raise ValueError(""ERROR(UCC): y-axis has not been defined."") + if x_axis_name is None and y_axis_name is None and normalize is not None: + # just set normalization on/off for both axes and return + self.norm_x_axis = False if x_axis_name == 'missrate' else normalize + self.norm_y_axis = False if y_axis_name == 'missrate' else normalize + return + if x_axis_name is not None: + self.x_axis_idx = self.axes_name2idx[x_axis_name] + self.norm_x_axis = False if x_axis_name == 'missrate' else normalize + if y_axis_name is not None: + self.y_axis_idx = self.axes_name2idx[y_axis_name] + self.norm_y_axis = False if y_axis_name == 'missrate' else normalize + + def set_std_unit(self, std_unit=None): + """""" + Sets the UCC's unit to be used when displaying normalized axes. + + :param std_unit: if None, the unit will be calculated as stddev of the ground truth data + (ValueError raised if data has not been set at this point) + or set to the user-specified value. + :return: + """""" + if std_unit is None: # set it to stddev of data + if self.gt is None: + raise ValueError(""ERROR(UCC): No data specified - cannot set stddev unit."") + self.std_unit = np.std(self.gt) + + if np.isclose(self.std_unit, 0.): + print(""WARN(UCC): data-based stddev is zero - resetting axes unit to 1."") + self.std_unit = 1. + else: + self.std_unit = float(std_unit) + + def fit(self, X, gt): + """""" + Calculates internal arrays necessary for other methods (plotting, auc, cost minimization). + Re-entrant. + + :param X: [numsamples, 3] numpy matrix, or list of numpy matrices. + Col 1: predicted values + Col 2: lower band (deviate) wrt predicted value (always positive) + Col 3: upper band wrt predicted value (always positive) + If list is provided, all methods will output corresponding metrics as lists as well! + :param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X + :return: self + + """""" + if not isinstance(X, list): + X = [X] + newX = [] + for x in X: + assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt)) + newX.append(self._sanitize_input(x)) + self.d = [gt - x[:, 0] for x in newX] + self.lb = [x[:, 1] for x in newX] + self.ub = [x[:, 2] for x in newX] + self.gt = gt + self.set_std_unit() + self.plotdata_for_scale = [] + self.plotdata_for_bias = [] + # precompute plotdata: + for i in range(len(self.d)): + self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False)) + if self.precompute_bias_data: + self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True)) + + return self + + def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True, + search=('scale', 'bias')): + """""" + Find minima of a linear cost function for each component. + Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value. + A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg). + The function returns a 'recipe' how to achieve the corresponding minimum, for each component. + + :param x_axis_cost: weight of one unit on x_axis + :param y_axis_cost: weight of one unit on y_axis + :param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be + pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes. + :param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'. + + :return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are - + 'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to + add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that + minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point). + + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if augment_cost_by_normfactor: + if self.norm_x_axis: + x_axis_cost /= self.std_unit + if self.norm_y_axis: + y_axis_cost /= self.std_unit + print(""INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f"" % + (x_axis_cost, y_axis_cost)) + if isinstance(search, tuple): + search = list(search) + if not isinstance(search, list): + search = [search] + + min_costs = [] + for d in range(len(self.d)): + # original OP cost + m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d]) + original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][ + self.y_axis_idx] + + plotdata = self.plotdata_for_scale[d] + cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx, + x_axis_cost, y_axis_cost) + mcf_scale_multiplier = plotdata[minidx_scale][0] + mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx] + mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx] + + if 'bias' in search: + if not self.precompute_bias_data: + raise ValueError( + ""ERROR(UCC): Cannot perform minimization - instantiated without bias data computation"") + plotdata = self.plotdata_for_bias[d] + cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx, + x_axis_cost, y_axis_cost) + mcf_bias_add = plotdata[minidx_bias][0] + mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx] + mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx] + + if 'bias' in search and 'scale' in search: + if cost_bias < cost_scale: + min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add, + 'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost}) + else: + min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier, + 'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost}) + elif 'scale' in search: + min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier, + 'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost}) + elif 'bias' in search: + min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add, + 'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost}) + else: + raise ValueError(""(ERROR): Unknown search element (%s) requested."" % "","".join(search)) + + if len(min_costs) < 2: + return min_costs[0] + else: + return min_costs + + def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None, + req_critical_value=None, vary_bias=False): + """""" + Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns + a list of recipes how to achieve the point (x,y), for each component. If there is only one component, + returns a single recipe dict. + + :param req_x_axis_value: requested x value on UCC (normalization status is taken from current display) + :param req_y_axis_value: requested y value on UCC (normalization status is taken from current display) + :param vary_bias: set to True when referring to bias-induced UCC (scale UCC default) + :return: list of dicts (recipes), or a single dict + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1: + raise ValueError(""ERROR(UCC): exactly one axis value must be requested at a time."") + if vary_bias and not self.precompute_bias_data: + raise ValueError(""ERROR(UCC): Cannot vary bias - instantiated without bias data computation"") + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + recipe = [] + for dc in range(len(self.d)): + plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc] + if req_x_axis_value is not None: + tgtidx = self.x_axis_idx + req_value = req_x_axis_value * xnorm + elif req_y_axis_value is not None: + tgtidx = self.y_axis_idx + req_value = req_y_axis_value * ynorm + elif req_critical_value is not None: + req_value = req_critical_value + tgtidx = 0 # first element in plotdata is always the critical value (scale of bias) + else: + raise RuntimeError(""Unhandled case"") + closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata])) + recipe.append({'operation': ('bias' if vary_bias else 'scale'), + 'modvalue': plotdata[closestidx][0], + 'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm, + 'new_y': plotdata[closestidx][self.y_axis_idx] / ynorm}) + if len(recipe) < 2: + return recipe[0] + else: + return recipe + + + def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2): + """""" + Find s minimum cost function value and corresp. position index in plotdata + + :param plotdata: liste of tuples + :param idx1: idx of x-axis item within the tuple + :param idx2: idx of y-axis item within the tuple + :param cost1: cost factor for x-axis unit + :param cost2: cost factor for y-axis unit + :return: min cost value, index within plotdata where minimum occurs + """""" + raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata] + minidx = np.argmin(raw) + return raw[minidx], minidx + + def _sanitize_input(self, x): + """""" + Replaces problematic values in input data (e.g, zero error bars) + + :param x: single matrix of input data [n, 3] + :return: sanitized version of x + """""" + if np.isclose(np.sum(x[:, 1]), 0.): + raise ValueError(""ERROR(UCC): Provided lower bands are all zero."") + if np.isclose(np.sum(x[:, 2]), 0.): + raise ValueError(""ERROR(UCC): Provided upper bands are all zero."") + for i in [1, 2]: + if any(np.isclose(x[:, i], 0.)): + print(""WARN(UCC): some band values are 0. - REPLACING with positive minimum"") + m = np.min(x[x[:, i] > 0, i]) + x = np.where(np.isclose(x, 0.), m, x) + return x + + def _calc_avg_excess(self, d, lb, ub): + """""" + Excess is amount an error bar overshoots actual + + :param d: pred-actual array + :param lb: lower band + :param ub: upper band + :return: average excess over array +" +" """""" + excess = np.zeros(d.shape) + posidx = np.where(d >= 0)[0] + excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx]) + negidx = np.where(d < 0)[0] + excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx]) + return np.mean(excess) + + def _calc_avg_deficit(self, d, lb, ub): + """""" + Deficit is error bar insufficiency: bar falls short of actual + + :param d: pred-actual array + :param lb: lower band + :param ub: upper band + :return: average deficit over array + """""" + deficit = np.zeros(d.shape) + posidx = np.where(d >= 0)[0] + deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx]) + negidx = np.where(d < 0)[0] + deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx]) + return np.mean(deficit) + + def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0): + """""" + Calculates recall at a given scale/bias, average bandwidth and average excess + + :param d: delta + :param lb: lower band + :param ub: upper band + :param scale: scale * (x + bias) + :param bias: + :return: miss rate, average bandwidth, avg excess, avg deficit + """""" + abslband = scale * np.where((lb + bias) < 0., 0., lb + bias) + absuband = scale * np.where((ub + bias) < 0., 0., ub + bias) + recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d) + avgbandwidth = np.mean([absuband, abslband]) + avgexcess = self._calc_avg_excess(d, abslband, absuband) + avgdeficit = self._calc_avg_deficit(d, abslband, absuband) + return 1 - recall, avgbandwidth, avgexcess, avgdeficit + + def _calc_plotdata(self, d, lb, ub, vary_bias=False): + """""" + Generates data necessary for various UCC metrics. + + :param d: delta (predicted - actual) vector + :param ub: upper uncertainty bandwidth (above predicted) + :param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth) + :param vary_bias: True will switch to additive bias instead of scale + :return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit) + """""" + + # step 1: collect critical scale or bias values + critval = [] + for i in range(len(d)): + if not vary_bias: + if d[i] >= 0: + critval.append(d[i] / ub[i]) + else: + critval.append(-d[i] / lb[i]) + else: + if d[i] >= 0: + critval.append(d[i] - ub[i]) + else: + critval.append(-lb[i] - d[i]) + critval = sorted(critval) + plotdata = [] + for i in range(len(critval)): + if not vary_bias: + missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, + scale=critval[i]) + else: + missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, + bias=critval[i]) + plotdata.append((critval[i], missrate, bandwidth, excess, deficit)) + + return plotdata + + def get_AUUCC(self, vary_bias=False, aucfct=""trapz"", partial_x=None, partial_y=None): + """""" + returns approximate area under the curve on current coordinates, for each component. + + :param vary_bias: False == varies scale, True == varies bias + :param aucfct: specifies AUC integrator (can be ""trapz"", ""simps"") + :param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC. + The interval bounds refer to axes as visualized (ie. potentially normed) + :param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None. + :return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component. + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if vary_bias and not self.precompute_bias_data: + raise ValueError(""ERROR(UCC): Cannot vary bias - instantiated without bias data computation"") + if partial_x is not None and partial_y is not None: + raise ValueError(""ERROR(UCC): partial_x and partial_y can not be specified at the same time."") + assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2)) + assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2)) + + # find starting point (where the x axis value starts to actually change) + rv = [] + # do this for individual streams + xind = self.x_axis_idx + aucfct = simps if aucfct == ""simps"" else trapz + for s in range(len(self.d)): + plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s] + prev = plotdata[0][xind] + t = 1 + cval = plotdata[t][xind] + while cval == prev and t < len(plotdata) - 1: + t += 1 + prev = cval + cval = plotdata[t][xind] + startt = t - 1 # from here, it's a valid function + endtt = len(plotdata) + + if startt >= endtt - 2: + rvs = 0. # no area + else: + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)] + x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)] + if partial_x is not None: + from_i = self._find_closest_index(partial_x[0], x) + to_i = self._find_closest_index(partial_x[1], x) + 1 + elif partial_y is not None: + from_i = self._find_closest_index(partial_y[0], y) + to_i = self._find_closest_index(partial_y[1], y) + if from_i > to_i: # y is in reverse order + from_i, to_i = to_i, from_i + to_i += 1 # as upper bound in array indexing + else: + from_i = 0 + to_i = len(x) + to_i = min(to_i, len(x)) + if to_i < from_i: + raise ValueError(""ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data."") + if to_i - from_i < 2: + raise RuntimeError(""ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified"") + rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i]) + rv.append(rvs) + if len(rv) < 2: + return rv[0] + else: + return rv + + @ staticmethod + def _find_closest_index(value, array): + """""" + Returns an index of the 'array' element closest in value to 'value' + + :param value: + :param array: + :return: + """""" + return np.argmin(np.abs(np.asarray(array)-value)) + + def _get_single_OP(self, d, lb, ub, scale=1., bias=0.): + """""" + Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias. + + :param scale: + :param bias: + :return: single tuple (x point, y point, unit of x, unit of y) + """""" + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias) + op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here) + return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm) + + def get_OP(self, scale=1., bias=0.): + """""" + Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias. + + :param scale: + :param bias: + :return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only + 1 component. + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + op = [] + for dc in range(len(self.d)): + op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias)) + if len(op) < 2: + return op[0] + else: + return op + + def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None, + xlim=None, ylim=None, **kwargs): + """""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown + if there are multiple data components (via fit()) + + :param titlestr: Plot title string + :param syslabel: list is label strings to appear in the plot legend. Can be single, if one component. + :param outfn: base name of an image file to be created (will append .png before creating) + :param vary_bias: True will switch to varying additive bias (default is multiplicative scale) + :param markers: None or a list of marker styles to be used for each curve. + List must be same or longer than number of components. + Markers can be one among these ['o', 's', 'v', 'D', '+']. + :param xlim: tuples or lists of specifying the range for the x axis, or None (auto) + :param ylim: tuples or lists of specifying the range for the y axis, or None (auto) + :param `**kwargs`: Additional arguments passed to the main plot call. + + :return: list of areas under the curve (or single area, if one data component) + list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit) + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if vary_bias and not self.precompute_bias_data: + raise ValueError(""ERROR(UCC): Cannot vary bias - instantiated without bias data computation"") + if not isinstance(syslabel, list): + syslabel = [syslabel] + assert (len(syslabel) == len(self.d)) + assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d))) + # main plot of (possibly multiple) datasets + plt.figure() + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + op_info = [] + auucc = self.get_AUUCC(vary_bias=vary_bias) + auucc = [auucc] if not isinstance(auucc, list) else auucc + for s in range(len(self.d)): + # original operating point + x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s]) + op_info.append((x_op, y_op, x_unit, y_unit)) + # display chart + plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s] + axisX_data = [i[self.x_axis_idx] / xnorm for i in plotdata] + axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata] + marker = None + if markers is not None: marker = markers[s] + p = plt.plot(axisX_data, axisY_data, label=syslabel[s] + ("" (AUC=%.3f)"" % auucc[s]), marker=marker, **kwargs) + if s + 1 == len(self.d): + oplab = 'OP' + else: + oplab = None + plt.plot(x_op, y_op, marker='o', color=p[0].get_color(), label=oplab, markerfacecolor='w', + markeredgewidth=1.5, markeredgecolor=p[0].get_color()) + axisX_label = self.axes_idx2descr[self.x_axis_idx] + axisY_label = self.axes_idx2descr[self.y_axis_idx] + axisX_units = ""(raw)"" if np.isclose(xnorm, 1.0) else ""[in std deviations]"" + axisY_units = ""(raw)"" if np.isclose(ynorm, 1.0) else ""[in std deviations]"" + axisX_label += ' ' + axisX_units + axisY_label += ' ' + axisY_units + if ylim is not None: + plt.ylim(ylim) + if xlim is not None: + plt.xlim(xlim) + plt.xlabel(axisX_label) + plt.ylabel(axisY_label) + plt.legend() + plt.title(titlestr) + plt.grid() + if outfn is None: + plt.show() + else: + plt.savefig(outfn) + if len(auucc) < 2: + auucc = auucc[0] + op_info = op_info[0] + return auucc, op_info + from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve + import torch +import torch.nn.functional as F +from uq3" +"60.models.noise_models.heteroscedastic_noise_models import GaussianNoise + +class GaussianNoiseMLPNet(torch.nn.Module): + + def __init__(self, num_features, num_outputs, num_hidden): + super(GaussianNoiseMLPNet, self).__init__() + self.fc = torch.nn.Linear(num_features, num_hidden) + self.fc_mu = torch.nn.Linear(num_hidden, num_outputs) + self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) + self.noise_layer = GaussianNoise() + + def forward(self, x): + x = F.relu(self.fc(x)) + mu = self.fc_mu(x) + log_var = self.fc_log_var(x) + return mu, log_var + + def loss(self, y_true=None, mu_pred=None, log_var_pred=None): + return self.noise_layer.loss(y_true, mu_pred, log_var_pred, reduce_mean=True) """""" + Contains implementations of various utilities used by Horseshoe Bayesian layers +"""""" +import numpy as np +import torch +from torch.nn import Parameter + +td = torch.distributions +gammaln = torch.lgamma + + +def diag_gaussian_entropy(log_std, D): + return 0.5 * D * (1.0 + torch.log(2 * np.pi)) + torch.sum(log_std) + + +def inv_gamma_entropy(a, b): + return torch.sum(a + torch.log(b) + torch.lgamma(a) - (1 + a) * torch.digamma(a)) + + +def log_normal_entropy(log_std, mu, D): + return torch.sum(log_std + mu + 0.5) + (D / 2) * np.log(2 * np.pi) + + +class InvGammaHalfCauchyLayer(torch.nn.Module): + """""" + Uses the inverse Gamma parameterization of the half-Cauchy distribution. + a ~ C^+(0, b) <==> a^2 ~ IGamma(0.5, 1/lambda), lambda ~ IGamma(0.5, 1/b^2), where lambda is an + auxiliary latent variable. + Uses a factorized variational approximation q(ln a^2)q(lambda) = N(mu, sigma^2) IGamma(ahat, bhat). + This layer places a half Cauchy prior on the scales of each output node of the layer. + """""" + def __init__(self, out_features, b): + """""" + :param out_fatures: number of output nodes in the layer. + :param b: scale of the half Cauchy + """""" + super(InvGammaHalfCauchyLayer, self).__init__() + self.b = b + self.out_features = out_features + # variational parameters for q(ln a^2) + self.mu = Parameter(torch.FloatTensor(out_features)) + self.log_sigma = Parameter(torch.FloatTensor(out_features)) + # self.log_sigma = torch.FloatTensor(out_features) + # variational parameters for q(lambda). These will be updated via fixed point updates, hence not parameters. + self.ahat = torch.FloatTensor([1.]) # The posterior parameter is always 1. + self.bhat = torch.ones(out_features) * (1.0 / self.b ** 2) + self.const = torch.FloatTensor([0.5]) + self.initialize_from_prior() + + def initialize_from_prior(self): + """""" + Initializes variational parameters by sampling from the prior. + """""" + # sample from half cauchy and log to initialize the mean of the log normal + sample = np.abs(self.b * (np.random.randn(self.out_features) / np.random.randn(self.out_features))) + self.mu.data = torch.FloatTensor(np.log(sample)) + self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.) + + def expectation_wrt_prior(self): + """""" + Computes E[ln p(a^2 | lambda)] + E[ln p(lambda)] + """""" + expected_a_given_lambda = -gammaln(self.const) - 0.5 * (torch.log(self.bhat) - torch.digamma(self.ahat)) + ( + -0.5 - 1.) * self.mu - torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) * (self.ahat / self.bhat) + expected_lambda = -gammaln(self.const) - 2 * 0.5 * np.log(self.b) + (-self.const - 1.) * ( + torch.log(self.bhat) - torch.digamma(self.ahat)) - (1. / self.b ** 2) * (self.ahat / self.bhat) + return torch.sum(expected_a_given_lambda) + torch.sum(expected_lambda) + + def entropy(self): + """""" + Computes entropy of q(ln a^2) and q(lambda) + """""" + return self.entropy_lambda() + self.entropy_a2() + + def entropy_lambda(self): + return inv_gamma_entropy(self.ahat, self.bhat) + + def entropy_a2(self): + return log_normal_entropy(self.log_sigma, self.mu, self.out_features) + + def kl(self): + """""" + Computes KL(q(ln(a^2)q(lambda) || IG(a^2 | 0.5, 1/lambda) IG(lambda | 0.5, 1/b^2)) + """""" + return -self.expectation_wrt_prior() - self.entropy() + + def fixed_point_updates(self): + # update lambda moments + self.bhat = torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + (1. / self.b ** 2) + + +class InvGammaLayer(torch.nn.Module): + """""" + Approximates the posterior of c^2 with prior IGamma(c^2 | a , b) + using a log Normal approximation q(ln c^2) = N(mu, sigma^2) + """""" + + def __init__(self, a, b, out_features=1): + super(InvGammaLayer, self).__init__() + self.a = torch.FloatTensor([a]) + self.b = torch.FloatTensor([b]) + # variational parameters for q(ln c^2) + self.mu = Parameter(torch.FloatTensor(out_features)) + self.log_sigma = Parameter(torch.FloatTensor(out_features)) + self.out_features = out_features + self.initialize_from_prior() + + def initialize_from_prior(self): + """""" + Initializes variational parameters by sampling from the prior. + """""" + self.mu.data = torch.log(self.b / (self.a + 1) * torch.ones(self.out_features)) # initialize at the mode + self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.) + + def expectation_wrt_prior(self): + """""" + Computes E[ln p(c^2 | a, b)] + """""" + # return self.c_a * np.log(self.c_b) - gammaln(self.c_a) + ( + # - self.c_a - 1) * c_mu - self.c_b * Ecinv + return self.a * torch.log(self.b) - gammaln(self.a) + (- self.a - 1) \\ + * self.mu - self.b * torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + + def entropy(self): + return log_normal_entropy(self.log_sigma, self.mu, 1) + + def kl(self): + """""" + Computes KL(q(ln(c^2) || IG(c^2 | a, b)) + """""" + return -self.expectation_wrt_prior().sum() - self.entropy() + """""" + Contains implementations of various Bayesian layers +"""""" +import numpy as np +import torch +import torch.nn.functional as F +from torch.nn import Parameter + +from uq360.models.bayesian_neural_networks.layer_utils import InvGammaHalfCauchyLayer, InvGammaLayer + +td = torch.distributions + + +def reparam(mu, logvar, do_sample=True, mc_samples=1): + if do_sample: + std = torch.exp(0.5 * logvar) + eps = torch.FloatTensor(std.size()).normal_() + sample = mu + eps * std + for _ in np.arange(1, mc_samples): + sample += mu + eps * std + return sample / mc_samples + else: + return mu + + +class BayesianLinearLayer(torch.nn.Module): + """""" + Affine layer with N(0, v/H) or N(0, user specified v) priors on weights and + fully factorized variational Gaussian approximation + """""" + + def __init__(self, in_features, out_features, cuda=False, init_weight=None, init_bias=None, prior_stdv=None): + super(BayesianLinearLayer, self).__init__() + self.cuda = cuda + self.in_features = in_features + self.out_features = out_features + + # weight mean params + self.weights = Parameter(torch.Tensor(out_features, in_features)) + self.bias = Parameter(torch.Tensor(out_features)) + # weight variance params + self.weights_logvar = Parameter(torch.Tensor(out_features, in_features)) + self.bias_logvar = Parameter(torch.Tensor(out_features)) + + # numerical stability + self.fudge_factor = 1e-8 + if not prior_stdv: + # We will use a N(0, 1/num_inputs) prior over weights + self.prior_stdv = torch.FloatTensor([1. / np.sqrt(self.weights.size(1))]) + else: + self.prior_stdv = torch.FloatTensor([prior_stdv]) + # self.prior_stdv = torch.Tensor([1. / np.sqrt(1e+3)]) + self.prior_mean = torch.FloatTensor([0.]) + # for Bias use a prior of N(0, 1) + self.prior_bias_stdv = torch.FloatTensor([1.]) + self.prior_bias_mean = torch.FloatTensor([0.]) + + # init params either random or with pretrained net + self.init_parameters(init_weight, init_bias) + + def init_parameters(self, init_weight, init_bias): + # init means + if init_weight is not None: + self.weights.data = torch.Tensor(init_weight) + else: + self.weights.data.normal_(0, np.float(self.prior_stdv.numpy()[0])) + + if init_bias is not None: + self.bias.data = torch.Tensor(init_bias) + else: + self.bias.data.normal_(0, 1) + + # init variances + self.weights_logvar.data.normal_(-9, 1e-2) + self.bias_logvar.data.normal_(-9, 1e-2) + + def forward(self, x, do_sample=True, scale_variances=False): + # local reparameterization trick + mu_activations = F.linear(x, self.weights, self.bias) + var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) + if scale_variances: + activ = reparam(mu_activations, var_activations.log() - np.log(self.in_features), do_sample=do_sample) + else: + activ = reparam(mu_activations, var_activations.log(), do_sample=do_sample) + return activ + + def kl(self): + """""" + KL divergence (q(W) || p(W)) + :return: + """""" + weights_logvar = self.weights_logvar + kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \\ + (weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / ( + 2 * self.prior_stdv.pow(2)) - 0.5 + kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \\ + (self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / ( + 2 * self.prior_bias_stdv.pow(2)) \\ + - 0.5 + return kld_weights.sum() + kld_bias.sum() + + +class HorseshoeLayer(BayesianLinearLayer): + """""" + Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k + are vectors of all weights incident into the unit + """""" + def __init__(self, in_features, out_features, cuda=False, scale=1.): + super(HorseshoeLayer, self).__init__(in_features, out_features) + self.cuda = cuda + self.in_features = in_features + self.out_features = out_features + self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.) + self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale) + # prior on beta is N(0, I) when employing non centered parameterization + self.prior_stdv = torch.Tensor([1]) + self.prior_mean = torch.Tensor([0.]) + + def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None): + # At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample + # sample scales + scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu) + scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2) + scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp() + # sample preactivations + mu_activations = F.linear(x, self.weights, self.bias) + var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) + activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample) + " +"return scale_sample * activ_sample + + def kl(self): + return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl() + + def fixed_point_updates(self): + self.nodescales.fixed_point_updates() + self.layerscale.fixed_point_updates() + + +class RegularizedHorseshoeLayer(HorseshoeLayer): + """""" + Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe. + For all weights w_k incident upon node k in the layer we have: + w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b). + c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe. + """""" + + def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.): + super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale) + self.c = InvGammaLayer(a=c_a, b=c_b) + + def forward(self, x, do_sample=True, **kwargs): + # At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample + # sample regularized scales + scale_mean = self.nodescales.mu + self.layerscale.mu + scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2 + scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp() + c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp() + regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample) + # sample preactivations + mu_activations = F.linear(x, self.weights, self.bias) + var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) + activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample) + return torch.sqrt(regularized_scale_sample) * activ_sample + + def kl(self): + return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl() + + +class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer): + """""" + Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe. + For all weights w_k incident upon node k in the layer we have: + w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b). + c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe + Note that we now have a per-node c_k. + """""" + + def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.): + super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale) + self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features) + + + + + import numpy as np +import torch +from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision + +def compute_test_ll(y_test, y_pred_samples, std_y=1.): + """""" + Computes test log likelihoods = (1 / Ntest) * \\sum_n p(y_n | x_n, D_train) + :param y_test: True y + :param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples + q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train) + :param std_y: True std of y (assumed known) + """""" + S, _ = y_pred_samples.shape + noise = GaussianNoiseFixedPrecision(std_y=std_y) + ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False) + ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples + return torch.mean(ll) # mean over test points + + + from abc import ABC +import torch +from torch import nn +from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer +from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision +import numpy as np +td = torch.distributions + + +class BayesianNN(nn.Module, ABC): + """""" + Bayesian neural network with zero mean Gaussian priors over weights. + """""" + def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, + activation_type='relu', num_layers=1): + super(BayesianNN, self).__init__() + self.num_layers = num_layers + if activation_type == 'relu': + # activation + self.activation = nn.ReLU() + elif activation_type == 'tanh': + self.activation = nn.Tanh() + else: + print(""Activation Type not supported"") + self.fc_hidden = [] + self.fc1 = layer(ip_dim, num_nodes,) + for _ in np.arange(self.num_layers - 1): + self.fc_hidden.append(layer(num_nodes, num_nodes, )) + self.fc_out = layer(num_nodes, op_dim, ) + self.noise_layer = None + + def forward(self, x, do_sample=True): + x = self.fc1(x, do_sample=do_sample) + x = self.activation(x) + for layer in self.fc_hidden: + x = layer(x, do_sample=do_sample) + x = self.activation(x) + return self.fc_out(x, do_sample=do_sample, scale_variances=True) + + def kl_divergence_w(self): + kld = self.fc1.kl() + self.fc_out.kl() + for layer in self.fc_hidden: + kld += layer.kl() + return kld + + def prior_predictive_samples(self, n_sample=100): + n_eval = 1000 + x = torch.linspace(-2, 2, n_eval)[:, np.newaxis] + y = np.zeros([n_sample, n_eval]) + for i in np.arange(n_sample): + y[i] = self.forward(x).data.numpy().ravel() + return x.data.numpy(), y + + ### get and set weights ### + def get_weights(self): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + weight_dict = {} + weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + return weight_dict + + def set_weights(self, weight_dict): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + to_param = lambda x: nn.Parameter(torch.Tensor(x)) + self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1]) + self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1]) + self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1]) + self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1]) + + self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1]) + self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1]) + self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1]) + self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1]) + + +class BayesianRegressionNet(BayesianNN, ABC): + """""" + Bayesian neural net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods. + """""" + def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1): + super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers, + ) + self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.) + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer.loss(y_pred=out, y_true=y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik + return neg_elbo + + def mse(self, x, y): + """""" + scaled rmse (scaled by 1 / std_y**2) + """""" + E_noise_precision = 1. / self.noise_layer.get_noise_var() + return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum() + + def get_noise_var(self): + return self.noise_layer.get_noise_var() + + +class BayesianClassificationNet(BayesianNN, ABC): + """""" + Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification. + """""" + def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1): + super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers) + self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum') + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer(out, y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = self.kl_divergence_w() / num_batches - Elik + return neg_elbo + + + + + from abc import ABC + +import numpy as np +import torch +from torch import nn + +from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer +from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision +import numpy as np +td = torch.distributions + + +class HshoeBNN(nn.Module, ABC): + """""" + Bayesian neural network with Horseshoe layers. + """""" + def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1, + hshoe_scale=1e-1, use_reg_hshoe=False): + if use_reg_hshoe: + layer = RegularizedHorseshoeLayer + else: + layer = HorseshoeLayer + super(HshoeBNN, self).__init__() + self.num_layers = num_layers + if activation_type == 'relu': + # activation + self.activation = nn.ReLU() + elif activation_type == 'tanh': + self.activation = nn.Tanh() + else: + print(""Activation Type not supported"") + self.fc_hidden = [] + self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale) + for _ in np.arange(self.num_layers - 1): + self.fc_hidden.append(layer(num_nodes, num_nodes)) + self.fc_out = BayesianLinearLayer(num_nodes, op_dim) + self.noise_layer = None + + def forward(self, x, do_sample=True): + x = self.fc1(x, do_sample=do_sample) + x = self.activation(x) + for layer in self.fc_hidden: + x = layer(x, do_sample=do_sample) + x = self.activation(x) + return self.fc_out(x, do_sample=do_sample, scale_variances=True) + + def kl_divergence_w(self): + kld = self.fc1.kl() + self.fc_out.kl() + for layer in self.fc_hidden: + kld += layer.kl() + return kld + + def fixed_point_updates(self): + if hasattr(self.fc1, 'fixed_point_updates'): + self.fc1.fixed_point_updates() + if hasattr(self.fc_out, '" +"fixed_point_updates'): + self.fc_out.fixed_point_updates() + for layer in self.fc_hidden: + if hasattr(layer, 'fixed_point_updates'): + layer.fixed_point_updates() + + def prior_predictive_samples(self, n_sample=100): + n_eval = 1000 + x = torch.linspace(-2, 2, n_eval)[:, np.newaxis] + y = np.zeros([n_sample, n_eval]) + for i in np.arange(n_sample): + y[i] = self.forward(x).data.numpy().ravel() + return x.data.numpy(), y + + ### get and set weights ### + def get_weights(self): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + weight_dict = {} + weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + return weight_dict + + def set_weights(self, weight_dict): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + to_param = lambda x: nn.Parameter(torch.Tensor(x)) + self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1]) + self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1]) + self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1]) + self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1]) + + self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1]) + self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1]) + self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1]) + self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1]) + + +class HshoeRegressionNet(HshoeBNN, ABC): + """""" + Horseshoe net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods. + """""" + def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False): + super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers, + hshoe_scale=hshoe_scale, + use_reg_hshoe=use_reg_hshoe) + self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.) + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer.loss(y_pred=out, y_true=y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik + return neg_elbo + + def mse(self, x, y): + """""" + scaled rmse (scaled by 1 / std_y**2) + """""" + E_noise_precision = 1. / self.noise_layer.get_noise_var() + return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum() + + def get_noise_var(self): + return self.noise_layer.get_noise_var() + + +class HshoeClassificationNet(HshoeBNN, ABC): + """""" + Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification. + """""" + def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False): + super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers, + hshoe_scale=hshoe_scale, + use_reg_hshoe=use_reg_hshoe) + self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum') + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer(out, y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = (self.kl_divergence_w()) / num_batches - Elik + return neg_elbo + + + + + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + + +class AbstractNoiseModel(ABC): + """""" Abstract class. All noise models inherit from here. + """""" + + def __init__(self, *argv, **kwargs): + """""" Initialize an AbstractNoiseModel object. + """""" + + @abc.abstractmethod + def loss(self, *argv, **kwargs): + """""" Compute loss given predictions and groundtruth labels + """""" + raise NotImplementedError + + @abc.abstractmethod + def get_noise_var(self, *argv, **kwargs): + """""" + Return the current estimate of noise variance + """""" + raise NotImplementedError + import math + +import numpy as np +import torch +from scipy.special import gammaln +from uq360.models.noise_models.noisemodel import AbstractNoiseModel +from torch.nn import Parameter + +td = torch.distributions + + +def transform(a): + return torch.log(1 + torch.exp(a)) + + +class GaussianNoise(torch.nn.Module, AbstractNoiseModel): + """""" + N(y_true | f_\\mu(x, w), f_\\sigma^2(x, w)) + """""" + + def __init__(self, cuda=False): + super(GaussianNoise, self).__init__() + self.cuda = cuda + self.const = torch.log(torch.FloatTensor([2 * math.pi])) + + def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True): + """""" + computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred)) + :param y_true: + :param mu_pred: + :param log_var_pred: + + :return: + """""" + var_pred = transform(log_var_pred) + ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2) + if reduce_mean: + return -ll.mean(dim=0) + else: + return -ll.sum(dim=0) + + def get_noise_var(self, log_var_pred): + return transform(log_var_pred) + + + import math + +import numpy as np +import torch +from scipy.special import gammaln +from uq360.models.noise_models.noisemodel import AbstractNoiseModel +from torch.nn import Parameter + +td = torch.distributions + + +def transform(a): + return torch.log(1 + torch.exp(a)) + + +class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel): + """""" + N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b). + Uses a variational approximation; q(lambda) = Gamma(ahat, bhat) + """""" + + def __init__(self, a0=6, b0=6, cuda=False): + super(GaussianNoiseGammaPrecision, self).__init__() + self.cuda = cuda + self.a0 = a0 + self.b0 = b0 + self.const = torch.log(torch.FloatTensor([2 * math.pi])) + # variational parameters + self.ahat = Parameter(torch.FloatTensor([10.])) + self.bhat = Parameter(torch.FloatTensor([3.])) + + def loss(self, y_pred=None, y_true=None): + """""" + computes -1 * E_q(\\lambda)[ln N (y_pred | y_true, \\lambda^-1)], where q(lambda) = Gamma(ahat, bhat) + :param y_pred: + :param y_true: + :return: + """""" + n = y_pred.shape[0] + ahat = transform(self.ahat) + bhat = transform(self.bhat) + return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \\ + - 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum()) + + def kl(self): + ahat = transform(self.ahat) + bhat = transform(self.bhat) + return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \\ + self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat + + def get_noise_var(self): + ahat = transform(self.ahat) + bhat = transform(self.bhat) + return (bhat / ahat).data.numpy()[0] + + +class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel): + """""" + N(y_true | f(x, w), sigma_y**2); known sigma_y + """""" + + def __init__(self, std_y=1., cuda=False): + super(GaussianNoiseFixedPrecision, self).__init__() + self.cuda = cuda + self.const = torch.log(torch.FloatTensor([2 * math.pi])) + self.sigma_y = std_y + + def loss(self, y_pred=None, y_true=None): + """""" + computes -1 * ln N (y_pred | y_true, sigma_y**2) + :param y_pred: + :param y_true: + :return: + """""" + ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2) + return -ll.sum(dim=0) + + def get_noise_var(self): + return self.sigma_y ** 2 ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import numpy as np +import logging +import warnings +from sklearn.ensemble import VotingClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.tree import DecisionTreeClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.linear_model import Ridge +from sklearn.preprocessing import binarize +from sklearn.ensemble import VotingRegressor +from sklearn.svm import SVC +from sklearn.linear_model import LinearRegression +from sklearn.ensemble import RandomForestRegressor +from sklearn.tree import DecisionTreeRegressor +from learner.aion_matrix import aion_matrix +warnings.filterwarnings('always') + + + + +class ensemble_voting(): + def __init__(self,ensemble_params,scoreParam): + self.ensemble_params = ensemble_params + self.scoreParam=scoreParam + self.final_estimator_r='' + self.final_estimator_c='' + self.log = logging.getLogger('eion') + ''' Read the aion config ""Ensemble-Voting"", parse the algorithm and associated params based on enable or True status.Not used now ''' + def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): + from learner.parameters import parametersDefine + paramObj=parametersDefine() + ensClass_algs_params={} + # algs_status={} + for key,val in ensembleConfig.items(): + for s,p in val.items(): + + if (s == ""enable"" and p == ""True""): + params = val['param'] + params_eval = paramObj.paramDefine(params,None)" +" + params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} + ensClass_algs_params[key]=params_eval + else: + pass + return ensClass_algs_params + + ''' To make array of voting algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. ''' + def listEnsembleClassVotingAlgs(self,ensClass_algs_params): + ensembleVotingClassList=list() + for key,val in ensClass_algs_params.items(): + if (key == 'Logistic Regression'): + lr=LogisticRegression() + lr=lr.set_params(**val) + ensembleVotingClassList.append(lr) + + elif (key == 'Support Vector Machine'): + svm=SVC() + svm=svm.set_params(**val) + ensembleVotingClassList.append(svm) + + elif (key == 'Naive Bayes'): + nb=GaussianNB() + nb=nb.set_params(**val) + ensembleVotingClassList.append(nb) + + elif (key == 'K Nearest Neighbors'): + knn=KNeighborsClassifier() + knn=knn.set_params(**val) + ensembleVotingClassList.append(knn) + + elif (key == 'Decision Tree'): + dt=DecisionTreeClassifier() + dt=dt.set_params(**val) + ensembleVotingClassList.append(dt) + + + elif (key == 'Random Forest'): + rf=RandomForestClassifier() + rf=rf.set_params(**val) + ensembleVotingClassList.append(rf) + + else: + ## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg. + ensembleVotingClassList=[] + pass + + return ensembleVotingClassList + + ''' To make array of voting regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. ''' + def listEnsembleRegVotingAlgs(self,ensReg_algs_params): + + ensembleVotingRegList=list() + for key,val in ensReg_algs_params.items(): + if (key == 'Linear Regression'): + lir=LinearRegression() + lir=lir.set_params(**val) + ensembleVotingRegList.append(lir) + elif (key == 'Decision Tree'): + dtr=DecisionTreeRegressor() + dtr=dtr.set_params(**val) + ensembleVotingRegList.append(dtr) + elif (key == 'Ridge'): + ridge=Ridge() + ridge=ridge.set_params(**val) + ensembleVotingRegList.append(ridge) + else: + ## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg. + ensembleVotingRegList=[] + + return ensembleVotingRegList + + + + def ensemble_voting_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList): + #bug 12437 + status='ERROR' + model=None + estimator=None + score=None + params=None + threshold = -1 + precisionscore =-1 + recallscore = -1 + objClf = aion_matrix() + try: + lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200) + rf = RandomForestClassifier(random_state=1) + gnb = GaussianNB() + svc = SVC(probability=True) #Need to keep probability=True, because cross_val_score,predict_proba fn calls + knn=KNeighborsClassifier(n_neighbors=5) + base_estimators = [] + if 'Logistic Regression' in modelList: + base_estimators.append(('LogisticRegression', lr)) + self.log.info('-------- Ensemble: Logistic Regression-------') + if 'Random Forest' in modelList: + base_estimators.append(('RandomForestClassifier', rf)) + self.log.info('-------- Ensemble: Random Forest-------') + if 'Naive Bayes' in modelList: + base_estimators.append(('GaussianNB', gnb)) + self.log.info('-------- Ensemble: Naive Bayes-------') + if 'Support Vector Machine' in modelList: + self.log.info('-------- Ensemble: Support Vector Machine-------') + base_estimators.append(('SVC', svc)) + if 'K Nearest Neighbors' in modelList: + base_estimators.append(('KNeighborsClassifier', knn)) + self.log.info('-------- Ensemble: K Nearest Neighbors-------') + if len(base_estimators) == 0: + self.log.info('-------- Ensemble Voting is only supported for Logistic Regression, Random Forest Classifier, Naive Bayes, SVM and KNN -------') + status = ""UNSUPPORTED"" + return status, estimator,params,score,model,threshold,precisionscore,recallscore + eclf1 = VotingClassifier(base_estimators, voting='soft') + eclf1.fit(X_train, y_train) + y_predict = eclf1.predict(X_test) + score = objClf.get_score(self.scoreParam,y_test,y_predict) + self.log.info('-------- Ensemble (VoteClassifier) Soft Score:'+str(score)) + if MakeFP0: + self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FP','') + self.log.info('-------- Calculate Threshold for FP End-------') + elif MakeFN0: + self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FN','') + self.log.info('-------- Calculate Threshold for FN End-------') + + if threshold != -1: + predictedData = eclf1.predict_proba(X_test) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 + score = objClf.get_score(self.scoreParam,y_test,predictedData) + + status = 'SUCCESS' + model =eclf1.__class__.__name__ + estimator=eclf1 + params = estimator.get_params() + + #bug 12437 - Removed ensemble hard voting as predict_proba in the later stages will break + except Exception as Inst: #bug 12437 + self.log.info('--------- Error in Ensemble Voting ---------\\n') + self.log.info(str(Inst)) + return status,estimator,params,score,model,threshold,precisionscore,recallscore + + def ensemble_voting__regressor(self,X_train,y_train, X_test, y_test,modelList): + scoredetails = '' + vr_predict=None + vr_model=None + try: + lr = LinearRegression() + rfr = RandomForestRegressor(n_estimators=10, random_state=1) + dtr=DecisionTreeRegressor() + base_estimators = [] + if 'Linear Regression' in modelList: + base_estimators.append(('LinearRegression', lr)) + if 'Decision Tree' in modelList: + base_estimators.append(('DecisionTreeRegressor', dtr)) + if 'Random Forest' in modelList: + base_estimators.append(('RandomForestRegressor', rfr)) + if len(base_estimators) == 0: + base_estimators = [('LinearRegression', lr), ('RandomForestRegressor', rfr),('DecisionTreeRegressor', dtr)] + voting_reg = VotingRegressor(base_estimators) + vr_model=voting_reg.fit(X_train,y_train) + vr_predict=voting_reg.predict(X_test) + best_vr_alg=voting_reg.__class__.__name__ + self.log.info('-----------> Voting regression Model '+str(best_vr_alg)) + + except Exception as e: + self.log.info(""voting regression Exception info: \\n"") + self.log.info(e) + + aion_matrixobj = aion_matrix() + score = aion_matrixobj.get_score(self.scoreParam,y_test,vr_predict) + + return voting_reg,voting_reg.get_params(),score,best_vr_alg + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import time +import os +import sys +import numpy as np +import pandas as pd +from sklearn import model_selection +from sklearn.model_selection import train_test_split, KFold, cross_val_score +from sklearn.model_selection import KFold +#Classification metrics lib +import logging +import warnings +warnings.filterwarnings('always') # ""error"", ""ignore"", ""always"", ""default"", ""module"" or ""once"" +from learner.aion_matrix import aion_matrix +from sklearn.preprocessing import binarize + + + +class ensemble_bagging(): + def __init__(self,ensemble_params,scoreParam,MakeFP0,MakeFN0): + self.ensemble_params = ensemble_params + self.scoreParam=scoreParam + self.MakeFP0 = MakeFP0 + self.MakeFN0 = MakeFN0 + self.log = logging.getLogger('eion') + + + def add_alg2dict(self,k,v): + b_dict={} + b_dict[k]=v + return b_dict + + def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): + from learner.parameters import parametersDefine + paramObj=parametersDefine() + ensClass_algs_params={} + algs_status={} + for key,val in ensembleConfig.items(): + for s,p in val.items(): + if (s == ""enable"" and p == ""True""): + params = val['param'] + params_eval = paramObj.paramDefine(params,None) + params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} + ensClass_algs_params[key]=params_eval + else: + pass + return ensClass_algs_params + + def listEnsembleClassBaggingAlgs(self,ensClass_algs_params): + from sklearn.linear_model import LogisticRegression + from sklearn.svm import SVC + from sklearn.naive_bayes import GaussianNB + from sklearn.neighbors import KNeighborsClassifier + from sklearn.tree import DecisionTreeClassifier + from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier + ensembleBaggingClassList=list() + for key,val in ensClass_algs_params.items(): + if (key == 'Logistic Regression'): + lr=LogisticRegression() + lr=lr.set_params(**val) + ensembleBaggingClassList.append(lr) + + elif (key == 'Support Vector Machine'): + svm=SVC() + svm=svm.set_params(**val) + ensembleBaggingClassList.append(svm) + + elif (key == 'Naive Bayes'): + nb=GaussianNB() + nb=nb.set_params(**val) + ensembleBaggingClassList.append(nb) + + elif (key == 'K Nearest Neighbors'): + knn=KNeighborsClassifier() + knn=knn.set_params(**val) + ensembleBaggingClassList.append(knn) + + elif (key == 'Decision Tree'): + dt=DecisionTreeClassifier() + dt=dt.set_params(**val) + ensembleBaggingClassList.append(dt) + + + elif (key == 'Random Forest'): + rf=RandomForestClassifier() + rf=rf.set_params(**val) + ensembleBaggingClassList.append(rf) + + else: + pass + + + return ensembleBaggingClassList + + def listEnsembleRegBaggingAlgs(self,ensReg_algs_params): + from sklearn.linear_model import Ridge + from sklearn.linear_model import LinearRegression + from sklearn.ensemble import RandomForestRegressor + from sklearn.tree import DecisionTreeRegressor + ensembleBaggingRegList=list() + for key,val in ensReg_algs_" +"params.items(): + if (key == 'Linear Regression'): + lir=LinearRegression() + lir=lir.set_params(**val) + ensembleBaggingRegList.append(lir) + + elif (key == 'Decision Tree'): + dtr=DecisionTreeRegressor() + dtr=dtr.set_params(**val) + ensembleBaggingRegList.append(dtr) + + elif (key == 'Ridge'): + ridge=Ridge() + ridge=ridge.set_params(**val) + ensembleBaggingRegList.append(ridge) + + else: + ensembleBaggingRegList=[] + + return ensembleBaggingRegList + + def ensemble_bagging_classifier(self,X_train,y_train, X_test, y_test): + ## New changes + from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier + + ensemble_method = ""Bagging_classifier"" + problemType='classification' + ensembleType='bagging' + model_dict=self.ensemble_params + ensClass_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict) + ensembleBaggingList = self.listEnsembleClassBaggingAlgs(ensClass_algs_params) + + # clf_array = model_list + clf_array=ensembleBaggingList + # no. of base classifier + num_trees = len(clf_array) + # max_samples=float(max_samples) + n_estimators = num_trees + # random_state=seed + bagging_mean={} + bagging_std={} + accuracy_basealgs_train={} + accuracy_basealgs_test={} + blable="""" + accuracy_score_test=0 + kfold = model_selection.KFold(n_splits=10, random_state=None) + bestScore=-0xFFFF + scoredetails = '' + threshold = -1 + bestthreshold = -1 + precisionscore =-1 + bestprecisionscore=-1 + recallscore = -1 + bestrecallscore=-1 + objClf = aion_matrix() + + if (ensemble_method == ""Bagging_classifier""): + #bagging ensemble of base classifier .e.g. KNeighborsClassifier base estimators, each built on random subsets of 40% of the samples and 50% of the features. + for clf in clf_array: + self.log.info('-----------> Ensemble Algorithm '+str(clf.__class__.__name__)) + clf.fit(X_train, y_train) + bagging_clf = BaggingClassifier(clf,n_estimators = num_trees, random_state=10) + bagging_clf.fit(X_train, y_train) + bagging_scores = cross_val_score(bagging_clf, X_train, y_train, cv=kfold,n_jobs=-1) + #bagging_ensemble_t=bagging_clf.fit(X_train, y_train) + if not X_test.empty: + bag_predict=bagging_clf.predict(X_test) + accuracy_score_test = objClf.get_score(self.scoreParam,y_test,bag_predict) + else: + accuracy_score_test = bagging_scores + MakeFP0 = False + MakeFN0 = False + if self.MakeFP0: + self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FP','') + MakeFP0 = True + self.log.info('-------- Calculate Threshold for FP End-------') + if self.MakeFN0: + self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FN','') + MakeFN0 = True + self.log.info('-------- Calculate Threshold for FN End-------') + + if threshold != -1: + if not X_test.empty: + predictedData = bagging_clf.predict_proba(X_test) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 + accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData) + + status,bscore,bthres,brscore,bpscore = objClf.getBestModel(MakeFP0,MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,accuracy_score_test,bestScore) + if status: + bestScore =bscore + bestModel =bagging_clf.__class__.__name__ + bestEstimator=bagging_clf + bestthreshold = bthres + bestBaseModel = clf.__class__.__name__ + bestrecallscore = brscore + bestprecisionscore = bpscore + else: + pass + + best_alg_name=bestEstimator.__class__.__name__ + self.log.info('-----------> Best Bagging Classifier Model '+str(bestBaseModel)) + self.log.info('-----------> Best Score '+str(bestScore)) + # self.log.info('-----------> Threshold '+str(bestthreshold)) #bug 12438 + if bestthreshold != -1: + if not X_test.empty: + predictedData_test = bestEstimator.predict_proba(X_test) + predictedData_test = binarize(predictedData_test[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437 + predictedData_train = bestEstimator.predict_proba(X_train) + predictedData_train = binarize(predictedData_train[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437 + else: + if not X_test.empty: + predictedData_test = bestEstimator.predict(X_test) + predictedData_train = bestEstimator.predict(X_train) + + return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name,bestthreshold,bestprecisionscore,bestrecallscore + + + def ensemble_bagging__regressor(self,X_train,y_train, X_test, y_test): + from sklearn.ensemble import BaggingRegressor + ensemble_method='Bagging_regressor' + problemType='regression' + ensembleType='bagging' + model_dict=self.ensemble_params + ensReg_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict) + ensembleBaggingList = self.listEnsembleRegBaggingAlgs(ensReg_algs_params) + + scoredetails = '' + aion_matrixobj = aion_matrix() + reg_array = ensembleBaggingList + + num_trees = len(reg_array) + #self.log.info(num_trees) + # max_samples=float(max_samples) + n_estimators = num_trees + r_state=10 + + bestModel='' + bestParams={} + bestScore=-sys.float_info.max #extension of bugfix 11656 + objClf = aion_matrix() + for reg in reg_array: + self.log.info('-----------> Ensemble Algorithm '+str(reg.__class__.__name__)) + nmodel=reg.fit(X_train, y_train) + model = reg.__class__.__name__ + estimator = BaggingRegressor(base_estimator=reg, random_state=r_state) + bagging_ensemble_t=estimator.fit(X_train, y_train) + predictedData = estimator.predict(X_test) + score = objClf.get_score(self.scoreParam,y_test,predictedData) + if self.scoreParam == ""r2"": + if score > bestScore: + bestScore =score + bestModel =model + bestEstimator=estimator + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: #extension of bugfix 11656 + bestScore =abs(score) + bestModel =model + bestEstimator=estimator + best_alg_name=bestEstimator.__class__.__name__ + self.log.info('-----------> Best Ensemble Algorithm '+str(bestModel)) + return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +#Classification metrics lib + +import logging +import warnings +from sklearn.neighbors import KNeighborsClassifier +from sklearn.tree import DecisionTreeClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.preprocessing import binarize +from sklearn.svm import SVC +from sklearn.ensemble import StackingClassifier +from sklearn.linear_model import LinearRegression +from sklearn.ensemble import RandomForestRegressor +from sklearn.tree import DecisionTreeRegressor +from sklearn.ensemble import StackingRegressor +from sklearn.svm import LinearSVR +from sklearn.linear_model import RidgeCV +from sklearn.linear_model import LassoCV +from learner.aion_matrix import aion_matrix + +warnings.filterwarnings('always') # ""error"", ""ignore"", ""always"", ""default"", ""module"" or ""once"" + + + +class ensemble_stacking(): + def __init__(self,ensemble_params,scoreParam): + self.ensemble_params = ensemble_params + self.scoreParam=scoreParam + self.final_estimator_r='' + self.final_estimator_c='' + self.log = logging.getLogger('eion') + + + ## Read the aion config ""Ensemble-Stacking"", parse the algorithm and associated params based on enable or True status. + def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): + from learner.parameters import parametersDefine + paramObj=parametersDefine() + ensClass_algs_params={} + # algs_status={} + for key,val in ensembleConfig.items(): + for s,p in val.items(): + if (s == ""enable"" and p == ""True""): + params = val['param'] + params_eval = paramObj.paramDefine(params,None) + params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} + ensClass_algs_params[key]=params_eval + else: + pass + + return ensClass_algs_params + + ## To make array of stacking algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. + + def listEnsembleClassStackingAlgs(self,ensClass_algs_params): + + ensembleBaggingClassList=list() + for key,val in ensClass_algs_params.items(): + # print(key) + if (key == 'Logistic Regression'): + lr=LogisticRegression() + lr=lr.set_params(**val) + ensembleBaggingClassList.append(lr) + + elif (key == 'Support Vector Machine'): + svm=SVC() + svm=svm.set_params(**val) + ensembleBaggingClassList.append(svm) + + elif (key == 'Naive Bayes'): + nb=GaussianNB() + nb=nb.set_params(**val) + ensembleBaggingClassList.append(nb) + + elif (key == 'K Nearest Neighbors'): + knn=KNeighborsClassifier() + knn=knn.set_params(**val) + ensembleBaggingClassList.append(knn) + + elif (key == 'Decision Tree'): + dt=DecisionTreeClassifier() + dt=dt.set_params(**val) + ensembleBaggingClassList.append(dt) + + + elif (key == 'Random Forest'): + rf=RandomForestClassifier() + rf=rf.set_params(**val) + ensembleBaggingClassList.append(rf) + + else: + ensembleBaggingClassList=[] + pass + + + return ensembleBaggingClassList + + + ## To make array of stacking regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. + + def listEnsembleRegStackingAlgs(self,ensReg_algs_params): + + ensembleBaggingRegList=list() + for key,val in ensReg_algs_params.items(): + if (key == 'LinearSVR'): + lir=LinearSVR() + lir=lir.set_params(**val) + ensembleBaggingRegList.append(lir) + elif (key == 'LinearRegression'): + lr=LinearRegression() + lr=lr.set_params(**val) + ensembleBaggingRegList.append(lr) + elif (key == 'LassoCV'): + lcv=LassoCV() + lcv=lcv.set_params(**val) + ensembleBaggingRegList.append(lcv) + elif (key == 'RandomForestRegressor'): + rfr=RandomForestRegressor() + rfr=rfr.set_params(**val) + ensembleBaggingRegList.append(rfr) + elif (key == 'RidgeCV'): + ridge=RidgeCV() + ridge=ridge.set_params(**val) + ensembleBaggingRegList.append(ridge) + + else: + ## NO algorithms found in configuration settings, instead of sending empty array,we can add any one of algorithms. + ensembleBaggingRegList=[] + + return ensembleBaggingRegList + + def extract_params(self,dict): + self.dict=dict + for k,v in self.dict.items(): + return k,v + + def stacking_params(self): + for k,v in" +"self.ensemble_params.items(): + try: + if (k == ""max_features_percentage""): + max_features_percentage=float(v) + elif (k == ""max_samples""): + max_samples=float(v) + elif (k == ""seed""): + seed=int(v) + elif (k == ""final_estimator_stack_c""): + final_estimator_c=str(v) + elif (k == ""final_estimator_stack_r""): + final_estimator_r=str(v) + else: + self.log.info(""Invalid Param in ensemble advanced configuration.\\n"") + except Exception as e: + self.log.info(""\\n Ensemble config param parsing error""+str(e)) + continue + return final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage + + def ensemble_stacking_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList): + + final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params() + final_estimator_c="""" + final_estimator=final_estimator_c + scoredetails='' + lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200) + rf = RandomForestClassifier(random_state=2) + gnb = GaussianNB() + svc = SVC(probability=True) #Need to keep probability=True, because of cross_val_score,predict_proba fn calls + knn=KNeighborsClassifier(n_neighbors=5) + + try: + if (final_estimator == 'LogisticRegression'): + final_estimator_a=lr + elif (final_estimator == 'RandomForestClassifier'): + final_estimator_a=rf + elif (final_estimator == 'GaussianNB'): + final_estimator_a=gnb + elif (final_estimator == 'SVC'): + final_estimator_a=svc + elif (final_estimator == 'KNeighborsClassifier'): + final_estimator_a=knn + else: + final_estimator_a=lr + except Exception as e: + final_estimator_a=lr + self.log.info(""Given stacking regression final estimator algorithm issue, using default one (LogisticRegression) as final_estimator now.\\n"") + self.log.info(e) + #stacking estimators + base_estimators = [] + if 'Logistic Regression' in modelList: + base_estimators.append(('LogisticRegression', lr)) + if 'Random Forest' in modelList: + base_estimators.append(('RandomForestClassifier', rf)) + if 'Naive Bayes' in modelList: + base_estimators.append(('GaussianNB', gnb)) + if 'Support Vector Machine' in modelList: + base_estimators.append(('SVC', svc)) + if 'K Nearest Neighbors' in modelList: + base_estimators.append(('KNeighborsClassifier', knn)) + if len(base_estimators) == 0: + base_estimators = [('LogisticRegression', lr),('RandomForestClassifier', rf),('GaussianNB', gnb),('SVC', svc),('KNeighborsClassifier', knn)] + stacking_c = StackingClassifier(estimators=base_estimators, final_estimator=final_estimator_a) + stacking_c.fit(X_train, y_train) + y_predict=stacking_c.predict(X_test) + objClf = aion_matrix() + accuracy_score_test = objClf.get_score(self.scoreParam,y_test,y_predict) + MakeFP0 = False + MakeFN0 = False + threshold = -1 + recallscore = -1 + precisionscore =-1 + if MakeFP0: + self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FP','') + MakeFP0 = True + self.log.info('-------- Calculate Threshold for FP End-------') + elif MakeFN0: + self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FN','') + MakeFN0 = True + self.log.info('-------- Calculate Threshold for FN End-------') + + if threshold != -1: + predictedData = stacking_c.predict_proba(X_test) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 + accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData) + + best_alg_stacking=stacking_c.__class__.__name__ + self.log.info('-----------> Best Stacking Classifier Model '+str(best_alg_stacking)) + self.log.info('-----------> Best Score '+str(accuracy_score_test)) + + + return stacking_c,stacking_c.get_params(),accuracy_score_test,best_alg_stacking,threshold,precisionscore,recallscore + + + + + def ensemble_stacking__regressor(self,X_train,y_train, X_test, y_test,modelList): + + final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params() + final_estimator=final_estimator_r + final_estimator_a=None + scoredetails='' + lr=LinearRegression() + rcv=RidgeCV() + svr=LinearSVR() + lcv=LassoCV() + rf=RandomForestRegressor(random_state=42) + try: + if (final_estimator == 'LinearRegression'): + final_estimator_a=lr + if (final_estimator == 'RidgeCV'): + final_estimator_a=rcv + elif (final_estimator == 'LinearSVR'): + final_estimator_a=svr + elif (final_estimator == 'LassoCV'): + final_estimator_a=lcv + elif (final_estimator == 'RandomForestRegressor'): + final_estimator_a=rf + else: + #default is RidgeCV + final_estimator_a=rcv + except Exception as e: + self.log.info(""stacking regression Exception info: \\n"") + self.log.info(e) + final_estimator_a=rcv + base_estimators = [] + if 'Linear Regression' in modelList: + base_estimators.append(('LinearRegression', lr)) + if 'Ridge' in modelList: + base_estimators.append(('RidgeCV', rcv)) + if 'LinearSVR' in modelList: + base_estimators.append(('LinearSVR', svr)) + if 'Lasso' in modelList: + base_estimators.append(('LassoCV', lcv)) + if 'Random Forest' in modelList: + base_estimators.append(('RandomForestRegressor', rf)) + if len(base_estimators) == 0: + base_estimators = [('LinearRegression', lr),('RidgeCV', rcv),('LinearSVR', svr),('LassoCV', lcv),('RandomForestRegressor', rf)] + self.log.info(""Stacking Base Alogs :""+str(base_estimators)) + self.log.info(""Final Estimator :""+final_estimator) + stacking_regressor = StackingRegressor(estimators=base_estimators,final_estimator=final_estimator_a) + stacking_r_model=stacking_regressor.fit(X_train, y_train) + stacking_rpredict=stacking_regressor.predict(X_test) + + + + best_stacking_alg=stacking_regressor.__class__.__name__ + #Accuracy + accuracy_score_best=stacking_regressor.score(X_test, y_test) + aion_matrixobj = aion_matrix() + score = aion_matrixobj.get_score(self.scoreParam,y_test,stacking_rpredict) + return stacking_regressor,stacking_regressor.get_params(),score,best_stacking_alg + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import numpy as np +import pandas as pd +import talos +from talos import Evaluate +import json +import sys +import time +import os +import tensorflow.keras.utils as kutils +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense,Dropout,LSTM,GRU,SimpleRNN,Flatten,Input +from sklearn.model_selection import train_test_split +from tensorflow.keras.layers import Conv1D,MaxPooling1D +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +import logging +import tensorflow as tf +import tensorflow.keras.backend as K + + +def rmse_m(y_true, y_pred): + return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) + +def r_square(y_true, y_pred): + SS_res = K.sum(K.square(y_true-y_pred)) + SS_tot = K.sum(K.square(y_true-K.mean(y_true))) + return (1 - SS_res/(SS_tot+K.epsilon())) + +class DLRegressionModel: + + def __init__(self,modelList, modelParams, scoreParam, cvSplit, featuresData, + targetData,testX,testY, method,randomMethod,roundLimit,best_feature_model): + + + self.modelList =modelList + self.modelParams =modelParams + self.scoreParam = scoreParam + self.cvSplit =cvSplit + self.featuresData =featuresData + self.targetData = targetData + self.testX = testX + self.testY = testY + self.method =method + #self.logFile = logFile + self.randomMethod=randomMethod + self.roundLimit=roundLimit + self.log = logging.getLogger('eion') + self.best_feature_model = best_feature_model + + def RNNRegression(self,x_train,y_train,x_val,y_val,params): + tf.keras.backend.clear_session() + x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) + x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1)) + model = Sequential() + if params['RNNType'] == ""LSTM"" : + if params['numRNNLayers'] > 1: + model.add(LSTM(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1))) + for x in range(1,params['numRNNLayers']): + model.add(LSTM(params['first_neuron'])) + else: + model.add(LSTM(params['first_neuron'],input_shape=(x_train.shape[1],1))) + + elif params['RNNType'] == ""GRU"" : + if params['numRNNLayers'] > 1: + model.add(GRU(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1))) + for x in range(1,params['numRNNLayers']): + model.add(GRU(params['first_neuron'])) + else: + model.add(GRU(params['first_neuron'],input_shape=(x_train.shape[1],1))) + + + elif params['RNNType'] == ""SimpleRNN"" : + if params['numRNNLayers'] > 1: + model.add(SimpleRNN(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1))) + for x in range(1,params['numRNNLayers']): + model.add(SimpleRNN(params['first_neuron'])) + else: + model.add(SimpleRNN(params['first_neuron'],input_shape=(x_train.shape[1],1))) + + + talos.utils.hidden_layers(model, params, 1) + model.add(Dense(1,activation=params['activation'])) + model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['mae','mse',rmse_m,r_square]) + out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'], + epochs=params['epochs'],verbose=0,shuffle=True) + + return out, model + + def SNNRegression(self,x_train,y_train,x_val,y_val,params): + tf.keras.backend.clear_session() + model = Sequential() + model.add(Dense(params['first_neuron'],input_dim=x_train.shape[1],activation=params['activation'])) + talos.utils.hidden_layers(model, params, 1) + model.add(Dense(1, activation=params['activation'])) + model.compile(loss=" +"params['losses'], optimizer=params['optimizer'], metrics=['mae','mse',rmse_m,r_square]) + out = model.fit(x=x_train, + y=y_train, + validation_data=(x_val, y_val), + epochs=params['epochs'], + batch_size=params['batch_size'], + verbose=0) + return out, model + + def CNNRegression(self,x_train,y_train,x_val,y_val,params): + tf.keras.backend.clear_session() + x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) + self.log.info(x_train.shape) + x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1)) + model = Sequential() + self.log.info(params['kernel_size']) + model.add(Conv1D(filters=params['first_neuron'], kernel_size=int(params['kernel_size']), activation=params['activation'], input_shape=(x_train.shape[1],1)) ) + if params['numConvLayers'] > 1: + for x in range(1,params['numConvLayers']): + if params['MaxPool'] == ""True"": + model.add(MaxPooling1D(pool_size=2)) + model.add(Conv1D(filters=8, kernel_size=int(params['kernel_size']), activation=params['activation'])) + + talos.utils.hidden_layers(model, params, 1) + model.add(Flatten()) + model.add(Dense(1)) + model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['mae','mse',rmse_m,r_square]) + out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'], + epochs=params['epochs'],verbose=0,shuffle=True) + + return out, model + + + def TalosScan(self,modelObj): + + try: + #dataPath = pd.read_csv(self.dataLocation) + #X = dataPath.drop(self.targetData, axis=1) + X = self.featuresData + x = X.values + loss_matrix = 'mean_absolute_error' + optimizer='Nadam' + Y= self.targetData + y = Y.values + XSNN = X.values + X1 = np.expand_dims(X, axis=2) + scoredetails = '' + + kf = KFold(n_splits = self.cvSplit) + for train_index, test_index in kf.split(X): + X_train, X_test = x[train_index], x[test_index] + y_train, y_test = y[train_index], y[test_index] + + + data = self.modelParams + models = data.keys() + lstart = time.time() + scoreSNN = [] + scoreRNN = [] + scoreCNN = [] + scoreRNNGRU = [] + scoreRNNLSTM = [] + best_paramsSNN = {} + best_paramsRNN = {} + best_paramsRNNGRU = {} + best_paramsRNNLSTM = {} + best_paramsCNN = {} + + if ""Neural Network""in self.modelList: + self.log.info(""-------> Model Name: Neural Network"") + start = time.time() + data = self.modelParams[""Neural Network""] + p = {""activation"":data[""activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + + scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.SNNRegression,experiment_name='SNN',params=p,round_limit=self.roundLimit,random_method=self.randomMethod) + + matrix_type = 'val_loss' + if self.scoreParam.lower() == 'rmse': + matrix_type = 'val_rmse_m' + elif(self.scoreParam.lower() == 'r2'): + matrix_type = 'val_r_square' + elif(self.scoreParam.lower() == 'mae'): + matrix_type = 'val_mae' + elif(self.scoreParam.lower() == 'mse'): + matrix_type = 'val_mse' + + + analyze_objectSNN = talos.Analyze(scan_object) + highValAccSNN = analyze_objectSNN.low(matrix_type) + + dfSNN = analyze_objectSNN.data + + newdfSNN = dfSNN.loc[dfSNN[matrix_type] == highValAccSNN] + + + best_paramsSNN[""activation""] = list(newdfSNN[""activation""])[0] + best_paramsSNN[""optimizer""] = list(newdfSNN[""optimizer""])[0] + best_paramsSNN[""losses""] = list(newdfSNN[""losses""])[0] + best_paramsSNN[""first_layer""] = list(newdfSNN[""first_neuron""])[0] + best_paramsSNN[""shapes""] = list(newdfSNN[""shapes""])[0] + best_paramsSNN[""hidden_layers""] = list(newdfSNN[""hidden_layers""])[0] + best_paramsSNN[""dropout""] = list(newdfSNN[""dropout""])[0] + best_paramsSNN[""batch_size""] = list(newdfSNN[""batch_size""])[0] + best_paramsSNN[""epochs""] = list(newdfSNN[""epochs""])[0] + best_paramsSNN[""lr""] = list(newdfSNN[""lr""])[0] + + + best_modelSNN = scan_object.best_model(metric=matrix_type, asc=True) + + loss_matrix = best_paramsSNN[""losses""] + optimizer = best_paramsSNN[""optimizer""] + batchsize = best_paramsSNN[""batch_size""] + + if self.scoreParam == 'rmse': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m]) + elif self.scoreParam == 'r2': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square]) + elif self.scoreParam == 'mae': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae']) + else: + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse']) + scoreSNN = best_modelSNN.evaluate(XSNN,Y, batch_size=batchsize) + self.log.info(""----------> Score Matrix: ""+str(best_modelSNN.metrics_names)) + self.log.info(""----------> Score: ""+str(scoreSNN)) + self.log.info(""----------> Model Params: ""+str(best_paramsSNN)) + executionTime=time.time() - start + self.log.info('----------> SNN Execution Time: '+str(executionTime)+'\\n') + XSNN = self.testX.values + predictedData = best_modelSNN.predict(XSNN) + if self.scoreParam.lower() == 'mse': + score = mean_squared_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'rmse': + score=mean_squared_error(self.testY,predictedData,squared=False) + elif self.scoreParam.lower() == 'mae': + score=mean_absolute_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'r2': + score=r2_score(self.testY,predictedData) + else: + score = scoreSNN[1] + self.log.info(""----------> Testing Score: ""+str(score)) + scoreSNN[1] = score + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Neural Network"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreSNN[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Neural Network') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Recurrent Neural Network""in self.modelList: + self.log.info(""-------> Model Name: Recurrent Neural Network"") + start = time.time() + data = self.modelParams[""Recurrent Neural Network""] + p = {""RNNType"":[""SimpleRNN""], + ""numRNNLayers"":[int(n) for n in data[""numRNNLayers""].split("","")], + ""activation"":data[""activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + + scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNN',params=p,round_limit=self.roundLimit,random_method=self.randomMethod) + + matrix_type = 'val_loss' + if self.scoreParam.lower() == 'rmse': + matrix_type = 'val_rmse_m' + elif(self.scoreParam.lower() == 'r2'): + matrix_type = 'val_r_square' + elif(self.scoreParam.lower() == 'mae'): + matrix_type = 'val_mae' + elif(self.scoreParam.lower() == 'mse'): + matrix_type = 'val_mse' + + analyze_objectRNN = talos.Analyze(scan_object) + highValAccRNN = analyze_objectRNN.low(matrix_type) + dfRNN = analyze_objectRNN.data + + newdfRNN = dfRNN.loc[dfRNN[matrix_type] == highValAccRNN] + + + best_paramsRNN[""RNNType""] = ""SimpleRNN"" + best_paramsRNN[""numRNNLayers""] = list(newdfRNN[""numRNNLayers""])[0] + best_paramsRNN[""activation""] = list(newdfRNN[""activation""])[0] + best_paramsRNN[""optimizer""] = list(newdfRNN[""optimizer""])[0] + best_paramsRNN[""losses""] = list(newdfRNN[""losses""])[0] + best_paramsRNN[""first_layer""] = list(newdfRNN[""first_neuron""])[0] + best_paramsRNN[""shapes""] = list(newdfRNN[""shapes""])[0] + best_paramsRNN[""hidden_layers""] = list(newdfRNN[""hidden_layers""])[0] + best_paramsRNN[""dropout""] = list(newdfRNN[""dropout""])[0] + best_paramsRNN[""batch_size""] = list(newdfRNN[""batch_size""])[0] + best_paramsRNN[""epochs""] = list(newdfRNN[""epochs""])[0] + best_paramsRNN[""lr""] = list(newdfRNN[""lr""])[0] + + best_modelRNN = scan_object.best_model(metric=matrix_type, asc=True) + + loss_matrix = best_paramsRNN[""losses""] + optimizer = best_paramsRNN[""optimizer""] + batchsize = best_paramsRNN[""batch_size""] + + if self.scoreParam == 'rmse': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m]) + elif self.scoreParam == 'r2': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[" +"r_square]) + elif self.scoreParam == 'mae': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae']) + else: + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse']) + + scoreRNN = best_modelRNN.evaluate(X1,Y, batch_size=batchsize) + self.log.info(""----------> Score Matrix: ""+str(best_modelRNN.metrics_names)) + self.log.info(""----------> Score: ""+str(scoreRNN)) + self.log.info(""----------> Model Params: ""+str(best_paramsRNN)) + executionTime=time.time() - start + self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\\n') + XSNN = np.expand_dims(self.testX, axis=2) + predictedData = best_modelRNN.predict(XSNN) + if self.scoreParam.lower() == 'mse': + score = mean_squared_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'rmse': + score=mean_squared_error(self.testY,predictedData,squared=False) + elif self.scoreParam.lower() == 'mae': + score=mean_absolute_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'r2': + score=r2_score(self.testY,predictedData) + else: + score = scoreRNN[1] + self.log.info(""----------> Testing Score: ""+str(score)) + scoreRNN[1] = score + + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Recurrent Neural Network"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreRNN[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Recurrent Neural Network (GRU)""in self.modelList: + self.log.info(""-------> Model Name: Recurrent Neural Network (GRU)"") + start = time.time() + data = self.modelParams[""Recurrent Neural Network (GRU)""] + p = {""RNNType"":[""GRU""], + ""numRNNLayers"":[int(n) for n in data[""numRNNLayers""].split("","")], + ""activation"":data[""activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + + scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNNGRU',params=p,round_limit=self.roundLimit,random_method=self.randomMethod) + + matrix_type = 'val_loss' + if self.scoreParam.lower() == 'rmse': + matrix_type = 'val_rmse_m' + elif(self.scoreParam.lower() == 'r2'): + matrix_type = 'val_r_square' + elif(self.scoreParam.lower() == 'mae'): + matrix_type = 'val_mae' + elif(self.scoreParam.lower() == 'mse'): + matrix_type = 'val_mse' + + analyze_objectRNNGRU = talos.Analyze(scan_object) + highValAccRNNGRU = analyze_objectRNNGRU.low(matrix_type) + dfRNNGRU = analyze_objectRNNGRU.data + + newdfRNNGRU = dfRNNGRU.loc[dfRNNGRU[matrix_type] == highValAccRNNGRU] + + + best_paramsRNNGRU[""RNNType""] = ""GRU"" + best_paramsRNNGRU[""numRNNLayers""] = list(newdfRNNGRU[""numRNNLayers""])[0] + best_paramsRNNGRU[""activation""] = list(newdfRNNGRU[""activation""])[0] + best_paramsRNNGRU[""optimizer""] = list(newdfRNNGRU[""optimizer""])[0] + best_paramsRNNGRU[""losses""] = list(newdfRNNGRU[""losses""])[0] + best_paramsRNNGRU[""first_layer""] = list(newdfRNNGRU[""first_neuron""])[0] + best_paramsRNNGRU[""shapes""] = list(newdfRNNGRU[""shapes""])[0] + best_paramsRNNGRU[""hidden_layers""] = list(newdfRNNGRU[""hidden_layers""])[0] + best_paramsRNNGRU[""dropout""] = list(newdfRNNGRU[""dropout""])[0] + best_paramsRNNGRU[""batch_size""] = list(newdfRNNGRU[""batch_size""])[0] + best_paramsRNNGRU[""epochs""] = list(newdfRNNGRU[""epochs""])[0] + best_paramsRNNGRU[""lr""] = list(newdfRNNGRU[""lr""])[0] + + best_modelRNNGRU = scan_object.best_model(metric=matrix_type, asc=True) + + loss_matrix = best_paramsRNNGRU[""losses""] + optimizer = best_paramsRNNGRU[""optimizer""] + batchsize = best_paramsRNNGRU[""batch_size""] + + if self.scoreParam == 'rmse': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m]) + elif self.scoreParam == 'r2': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square]) + elif self.scoreParam == 'mae': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae']) + else: + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse']) + + scoreRNNGRU = best_modelRNNGRU.evaluate(X1,Y, batch_size=batchsize) + self.log.info(""----------> Score Matrix: ""+str(best_modelRNNGRU.metrics_names)) + self.log.info(""----------> Score: ""+str(scoreRNNGRU)) + self.log.info(""----------> Model Params: ""+str(best_paramsRNNGRU)) + executionTime=time.time() - start + self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\\n') + XSNN = np.expand_dims(self.testX, axis=2) + predictedData = best_modelRNNGRU.predict(XSNN) + if self.scoreParam.lower() == 'mse': + score = mean_squared_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'rmse': + score=mean_squared_error(self.testY,predictedData,squared=False) + elif self.scoreParam.lower() == 'mae': + score=mean_absolute_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'r2': + score=r2_score(self.testY,predictedData) + else: + score = scoreRNNGRU[1] + self.log.info(""----------> Testing Score: ""+str(score)) + scoreRNNGRU[1] = score + + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Recurrent Neural Network (GRU)"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreRNNGRU[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (GRU)') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Recurrent Neural Network (LSTM)""in self.modelList: + self.log.info(""-------> Model Name: Recurrent Neural Network (LSTM)"") + start = time.time() + data = self.modelParams[""Recurrent Neural Network (LSTM)""] + p = {""RNNType"":[""LSTM""], + ""numRNNLayers"":[int(n) for n in data[""numRNNLayers""].split("","")], + ""activation"":data[""activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + + scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNNLSTM',params=p,round_limit=self.roundLimit,random_method=self.randomMethod) + + matrix_type = 'val_loss' + if self.scoreParam.lower() == 'rmse': + matrix_type = 'val_rmse_m' + elif(self.scoreParam.lower() == 'r2'): + matrix_type = 'val_r_square' + elif(self.scoreParam.lower() == 'mae'): + matrix_type = 'val_mae' + elif(self.scoreParam.lower() == 'mse'): + matrix_type = 'val_mse' + + analyze_objectRNNLSTM = talos.Analyze(scan_object) + highValAccRNNLSTM = analyze_objectRNNLSTM.low(matrix_type) + dfRNNLSTM = analyze_objectRNNLSTM.data + + newdfRNNLSTM = dfRNNLSTM.loc[dfRNNLSTM[matrix_type] == highValAccRNNLSTM] + + + best_paramsRNNLSTM[""RNNType""] = ""GRU"" + best_paramsRNNLSTM[""numRNNLayers""] = list(newdfRNNLSTM[""numRNNLayers""])[0] + best_paramsRNNLSTM[""activation""] = list(newdfRNNLSTM[""activation""])[0] + best_paramsRNNLSTM[""optimizer""] = list(newdfRNNLSTM[""optimizer""])[0] + best_paramsRNNLSTM[""losses""] = list(newdfRNNLSTM[""losses""])[0] + best_paramsRNNLSTM[""first_layer""] = list(newdfRNNLSTM[""first_neuron""])[0] + best_paramsRNNLSTM[""shapes""] = list(newdfRNNLSTM[""shapes""])[0] + best_paramsRNNLSTM[""hidden_layers""] = list(newdfRNNLSTM[""hidden_layers""])[0] + best_paramsRNNLSTM[""dropout""] = list(newdfRNNLSTM[""dropout""])[0] + best_paramsRNNLSTM[""batch_size""] = list(newdfRNNLSTM[""batch_size""])[0] + best_paramsRNNLSTM[""epochs""] = list(newdfRNNLSTM[""epochs""])[0] + best_paramsRNNLSTM[""lr""] = list(newdfRNNLSTM[""lr""])[0] + + + best_modelRNNLSTM = scan_object.best_model(metric=matrix_type, asc=True) + + loss_matrix = best_paramsRNNLSTM[""losses""] + optimizer = best_paramsRNNLSTM[""optimizer""] + batchsize = best_paramsRNNLSTM[""batch_size""] + + if self.scoreParam == 'rmse': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m]) + elif self.scoreParam == 'r2': + best_modelRNNLSTM.compile(loss=loss" +"_matrix,optimizer=optimizer, metrics=[r_square]) + elif self.scoreParam == 'mae': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae']) + else: + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse']) + + scoreRNNLSTM = best_modelRNNLSTM.evaluate(X1,Y, batch_size=batchsize) + self.log.info(""----------> Score Matrix: ""+str(best_modelRNNLSTM.metrics_names)) + self.log.info(""----------> Score: ""+str(scoreRNNLSTM)) + self.log.info(""----------> Model Params: ""+str(best_paramsRNNLSTM)) + executionTime=time.time() - start + self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\\n') + XSNN = np.expand_dims(self.testX, axis=2) + predictedData = best_modelRNNLSTM.predict(XSNN) + if self.scoreParam.lower() == 'mse': + score = mean_squared_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'rmse': + score=mean_squared_error(self.testY,predictedData,squared=False) + elif self.scoreParam.lower() == 'mae': + score=mean_absolute_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'r2': + score=r2_score(self.testY,predictedData) + else: + score = scoreRNNLSTM[1] + self.log.info(""----------> Testing Score: ""+str(score)) + scoreRNNLSTM[1] = score + + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Recurrent Neural Network (LSTM)"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreRNNLSTM[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (LSTM)') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Convolutional Neural Network (1D)""in self.modelList: + self.log.info(""-------> Model Name: CNN"") + start = time.time() + data = self.modelParams[""Convolutional Neural Network (1D)""] + p = {""activation"":data[""activation""].split("",""), + ""kernel_size"":data[""kernel_size""].split("",""), + ""numConvLayers"":[int(n) for n in data[""numConvLayers""].split("","")], + ""MaxPool"":data[""activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + scan_object = talos.Scan(x=X_train,y=y_train, x_val = X_test, y_val = y_test, model = modelObj.CNNRegression,experiment_name='CNN', params=p,round_limit=self.roundLimit,random_method=self.randomMethod) + + matrix_type = 'val_loss' + if self.scoreParam.lower() == 'rmse': + matrix_type = 'val_rmse_m' + elif(self.scoreParam.lower() == 'r2'): + matrix_type = 'val_r_square' + elif(self.scoreParam.lower() == 'mae'): + matrix_type = 'val_mae' + elif(self.scoreParam.lower() == 'mse'): + matrix_type = 'val_mse' + + analyze_objectCNN = talos.Analyze(scan_object) + highValAccCNN = analyze_objectCNN.low(matrix_type) + dfCNN = analyze_objectCNN.data + + newdfCNN = dfCNN.loc[dfCNN[matrix_type] == highValAccCNN] + + + best_paramsCNN[""numConvLayers""] = list(newdfCNN[""numConvLayers""])[0] + best_paramsCNN[""MaxPool""] = list(newdfCNN[""MaxPool""])[0] + best_paramsCNN[""activation""] = list(newdfCNN[""activation""])[0] + best_paramsCNN[""optimizer""] = list(newdfCNN[""optimizer""])[0] + best_paramsCNN[""losses""] = list(newdfCNN[""losses""])[0] + best_paramsCNN[""first_layer""] = list(newdfCNN[""first_neuron""])[0] + best_paramsCNN[""shapes""] = list(newdfCNN[""shapes""])[0] + best_paramsCNN[""hidden_layers""] = list(newdfCNN[""hidden_layers""])[0] + best_paramsCNN[""dropout""] = list(newdfCNN[""dropout""])[0] + best_paramsCNN[""batch_size""] = list(newdfCNN[""batch_size""])[0] + best_paramsCNN[""epochs""] = list(newdfCNN[""epochs""])[0] + best_paramsCNN[""lr""] = list(newdfCNN[""lr""])[0] + + + best_modelCNN = scan_object.best_model(metric=matrix_type, asc=True) + + loss_matrix = best_paramsCNN[""losses""] + optimizer = best_paramsCNN[""optimizer""] + batchsize = best_paramsCNN[""batch_size""] + + if self.scoreParam == 'rmse': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m]) + elif self.scoreParam == 'r2': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square]) + elif self.scoreParam == 'mae': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae']) + else: + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse']) + + scoreCNN = best_modelCNN.evaluate(X1,Y, batch_size=batchsize) + self.log.info(""----------> Score Matrix: ""+str(best_modelCNN.metrics_names)) + self.log.info(""----------> Score: ""+str(scoreCNN)) + self.log.info(""----------> Model Params: ""+str(best_paramsCNN)) + executionTime=time.time() - start + self.log.info('----------> CNN Execution Time: '+str(executionTime)+'\\n') + + XSNN = np.expand_dims(self.testX, axis=2) + predictedData = best_modelCNN.predict(XSNN) + if self.scoreParam.lower() == 'mse': + score = mean_squared_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'rmse': + score=mean_squared_error(self.testY,predictedData,squared=False) + elif self.scoreParam.lower() == 'mae': + score=mean_absolute_error(self.testY,predictedData) + elif self.scoreParam.lower() == 'r2': + score=r2_score(self.testY,predictedData) + else: + score = scoreCNN[1] + self.log.info(""----------> Testing Score: ""+str(score)) + scoreCNN[1] = score + + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""CNN"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreCNN[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: CNN') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + modelScore = [] + + if len(scoreSNN) != 0: + modelScore.append(scoreSNN[1]) + if len(scoreRNN) != 0: + modelScore.append(scoreRNN[1]) + if len(scoreRNNGRU) != 0: + modelScore.append(scoreRNNGRU[1]) + if len(scoreRNNLSTM) != 0: + modelScore.append(scoreRNNLSTM[1]) + if len(scoreCNN) != 0: + modelScore.append(scoreCNN[1]) + + selectedModel = """" + best_model = """" + if self.scoreParam == ""r2"": + if len(scoreSNN) != 0 and max(modelScore) == scoreSNN[1]: + selectedModel = ""Neural Network"" + best_model = best_modelSNN + best_params = best_paramsSNN + + elif len(scoreRNN) != 0 and max(modelScore) == scoreRNN[1]: + selectedModel = ""Recurrent Neural Network"" + best_model = best_modelRNN + best_params = best_paramsRNN + + elif len(scoreRNNGRU) != 0 and max(modelScore) == scoreRNNGRU[1]: + selectedModel = ""Recurrent Neural Network (GRU)"" + best_model = best_modelRNNGRU + best_params = best_paramsRNNGRU + + elif len(scoreRNNLSTM) != 0 and max(modelScore) == scoreRNNLSTM[1]: + selectedModel = ""Recurrent Neural Network (LSTM)"" + best_model = best_modelRNNLSTM + best_params = best_paramsRNNLSTM + + elif len(scoreCNN) != 0 and max(modelScore) == scoreCNN[1]: + selectedModel = ""Convolutional Neural Network (1D)"" + best_model = best_modelCNN + best_params = best_paramsCNN + + modelScore = max(modelScore) + + else: + if len(scoreSNN) != 0 and min(modelScore) == scoreSNN[1]: + selectedModel = ""Neural Network"" + best_model = best_modelSNN + best_params = best_paramsSNN + + elif len(scoreRNN) != 0 and min(modelScore) == scoreRNN[1]: + selectedModel = ""Recurrent Neural Network"" + best_model = best_modelRNN + best_params = best_paramsRNN + + elif len(scoreRNNGRU) != 0 and min(modelScore) == scoreRNNGRU[1]: + selectedModel = ""Recurrent Neural Network (GRU)"" + best_model = best_modelRNNGRU + best_params = best_paramsRNNGRU + + elif len(scoreRNNLSTM) != 0 and min(modelScore) == scoreRNNLSTM[1]: + selectedModel = ""Recurrent Neural Network (LSTM)"" + best_model = best_modelRNNLSTM + best_params = best_paramsRNNLSTM + + elif len(scoreCNN) != 0 and min(modelScore) == scoreCNN[1]: + selectedModel = ""Convolutional Neural Network (1D)"" + best_model = best_modelCNN + best_params = best_paramsCNN + + modelScore = min(modelScore) + + executionTime=time.time() - lstart + self.log.info(""-------> Total Execution Time(sec):""+str(executionTime)) + self.log.info('Status:- |... Best Algorithm selected: '+str(selectedModel)+' '+str(round(modelScore,2))) + + return selectedModel,modelScore,best_model,best_params,X1,XSNN,scoredetails,loss_matrix,optimizer + + except Exception as inst: + + self.log.info( '\\n-----> regressionModel failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Techn" +"ologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import logging +import numpy as np +import pandas as pd +from sklearn.metrics import confusion_matrix +from sklearn.metrics import classification_report +from sklearn.metrics import roc_curve, auc +from sklearn.metrics import roc_auc_score +from sklearn.preprocessing import LabelBinarizer +from imblearn.over_sampling import RandomOverSampler,SMOTE +from imblearn.under_sampling import RandomUnderSampler +from imblearn.under_sampling import TomekLinks +from sklearn.model_selection import train_test_split +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +from sklearn.metrics import log_loss +import tensorflow as tf +from tensorflow.keras import backend as K +from tensorflow.keras.models import load_model +from dlearning.Classification import DLClassificationModel +from dlearning.Regression import DLRegressionModel +from learner.machinelearning import machinelearning +from sklearn.metrics import matthews_corrcoef, brier_score_loss +import os +def recall_m(y_true, y_pred): + true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) + possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) + recall = true_positives / (possible_positives + K.epsilon()) + return recall + +def precision_m(y_true, y_pred): + true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) + predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) + precision = true_positives / (predicted_positives + K.epsilon()) + return precision + +def f1_m(y_true, y_pred): + precision = precision_m(y_true, y_pred) + recall = recall_m(y_true, y_pred) + return 2*((precision*recall)/(precision+recall+K.epsilon())) + + +def rmse_m(y_true, y_pred): + return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) + +def r_square(y_true, y_pred): + SS_res = K.sum(K.square(y_true-y_pred)) + SS_tot = K.sum(K.square(y_true-K.mean(y_true))) + return (1 - SS_res/(SS_tot+K.epsilon())) + + +class deeplearning(object): + def __init__(self): + self.log = logging.getLogger('eion') + + def getDLPredictionData(self,model_dl,hist_reloaded,X): + if model_dl == ""Neural Network"": + XSNN = X.values + predictedData = hist_reloaded.predict(XSNN) + else: + X1 = np.expand_dims(X, axis=2) + predictedData = hist_reloaded.predict(X1) + return(predictedData) + + def getPredictionData(self,model_dl,hist_reloaded,X): + if model_dl == ""Neural Network"": + XSNN = X.values + #predictedData = hist_reloaded.predict_classes(XSNN) + predict_x=hist_reloaded.predict(XSNN) + predictedData=np.argmax(predict_x,axis=1) + else: + X1 = np.expand_dims(X, axis=2) + #predictedData = hist_reloaded.predict_classes(X1) + predict_x=hist_reloaded.predict(X1) + predictedData=np.argmax(predict_x,axis=1) + return(predictedData, predict_x) + + def LoadDL_Regression_Model(self,filename_dl,scoreParam,loss_matrix,optimizer): + if(scoreParam.lower() == 'rmse'): + hist_reloaded = load_model(filename_dl,custom_objects={""rmse"": rmse_m},compile=False) + hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m]) + elif(scoreParam.lower() == 'r2'): + hist_reloaded = load_model(filename_dl,custom_objects={""r2"": r_square},compile=False) + hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square]) + else: + hist_reloaded = load_model(filename_dl) + return(hist_reloaded) + + + def startLearning(self,model_type,modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,labelMaps,df_test,deployLocation,modelName,modelVersion,best_feature_model): + mlobj = machinelearning() + if model_type == 'Classification': + self.log.info('\\n------ Training DL: Classification ----') + objClf = DLClassificationModel(modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,best_feature_model) + dftrain = xtrain.copy() + dftrain['Target'] = ytrain + model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer = objClf.TalosScan(objClf) + self.log.info('------ Training DL: Classification End----\\n') + saved_model_dl = 'dl_'+modelName+'_'+modelVersion+'.sav' + filename_dl = os.path.join(deployLocation,'model',saved_model_dl) + best_model_dl.save(filename_dl) + hist_reloaded = self.LoadDL_Classification_Model(filename_dl,scoreParam,loss_matrix,optimizer) + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + predictedData, prob = self.getPredictionData(model_dl,hist_reloaded,xtrain) + trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(ytrain, predictedData, prob,labelMaps) + self.log.info('\\n--------- Performance Matrix with Train Data End ---------') + predictedData, prob = self.getPredictionData(model_dl,hist_reloaded,xtest) + df_test['predict'] = predictedData + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performancematrix = mlobj.getClassificationPerformaceMatrix(ytest, predictedData, prob,labelMaps) + self.log.info('\\n--------- Performance Matrix with Test Data End ---------') + return(model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix) + else: + objReg = DLRegressionModel(modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,best_feature_model) + dftrain = xtrain.copy() + dftrain['Target'] = ytrain + model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer = objReg.TalosScan(objReg) + self.log.info('------ Training DL: Regression End----\\n') + self.log.info('\\n------- Best DL Model and its parameters -------------') + self.log.info('-------> Best Model: '+str(model_dl)) + self.log.info('-------> Best Score: '+str(score_dl)) + self.log.info('-------> Best Params: '+str(params_dl)) + self.log.info('------- Best DL Model and its parameters End-------------\\n') + saved_model_dl = 'dl_'+modelName+'_'+modelVersion+'.sav' + filename_dl = os.path.join(deployLocation,'model',saved_model_dl) + best_model_dl.save(filename_dl) + hist_reloaded=self.LoadDL_Regression_Model(filename_dl,scoreParam,loss_matrix,optimizer) + predictedData = self.getDLPredictionData(model_dl,hist_reloaded,xtrain) + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + trainingperformancematrix = mlobj.get_regression_matrix(ytrain, predictedData) + self.log.info('--------- Performance Matrix with Train Data End---------\\n') + predictedData = self.getDLPredictionData(model_dl,hist_reloaded,xtest) + df_test['predict'] = predictedData + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performancematrix = mlobj.get_regression_matrix(ytest, predictedData) + self.log.info('--------- Performance Matrix with Test Data End---------\\n') + return(model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix) + + def LoadDL_Classification_Model(self,filename_dl,scoreParam,loss_matrix,optimizer): + if(scoreParam.lower() == 'recall'): + hist_reloaded = load_model(filename_dl,custom_objects={""recall"": recall_m},compile=False) + hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m]) + elif(scoreParam.lower() == 'precision'): + hist_reloaded = load_model(filename_dl,custom_objects={""precision"": precision_m},compile=False) + hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m]) + elif(scoreParam.lower() == 'roc_auc'): + hist_reloaded = load_model(filename_dl,compile=False) + hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()]) + elif(scoreParam.lower() == 'f1_score'): + hist_reloaded = load_model(filename_dl,custom_objects={""f1_score"": f1_m},compile=False) + hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m]) + else: + hist_reloaded = load_model(filename_dl) + return(hist_reloaded) + + def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps): + setOfyTrue = set(le_trainY) + unqClassLst = list(setOfyTrue) + if(str(labelMaps) != '{}'): + inv_mapping_dict = {v: k for k, v in labelMaps.items()} + unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) + unqClassLst2 = list(unqClassLst2) + else: + unqClassLst2 = unqClassLst + indexName = [] + columnName = [] + for item in unqClassLst2: + indexName.append(""true:""+str(item)) + columnName.append(str(item)) + matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) + self.log.info('\\n <--- Confusion Matrix --->') + self.log.info(matrixconfusion) + classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, output_dict=True)) + self.log.info('\\n <--- Classification Report --->') + self.log.info(classificationreport) + lb = LabelBinarizer() + lb.fit(le_trainY) + transformTarget= lb.transform(le_trainY) + if transformTarget.shape[-1] == 1: + transformTarget = le_trainY + prob = np.delete( prob, 0, 1) + rocaucscore = roc_auc_score(transformTarget,prob,average=""macro"") + brier_score = None + mcc_score = matthews_corrcoef(le_trainY,predictedData) + if len(unqClassLst) > 2: + brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1)) + else: + brier_score = brier_score_loss(transformTarget,prob) + self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) + self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}') + self.log.info(f'-------> BRIER SCORE : {brier_score}') + matrixconfusion = matrixconfusion.to_json(orient='index') + classificationreport = classificationreport.to_json(orient='index') + matrix = f'""ConfusionMatrix"": {matrixconfusion},""ClassificationReport"": {classificationreport},""ROC_AUC_SCORE"": {rocaucscore},""MCC_SCORE"": {mcc_score},""BRIER_SCORE"": {brier_score}' + return(matrix) + + def split_into_train_test_data(self,featureData,targetData,cvSplit,testPercentage,modelType='classification'): + ''' + if cvSplit == None: + ''' + testSize=testPercentage/100 + if modelType == 'regression': + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) + else: + try: + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True) + except: + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) + + self.log.info('\\n<-------------- Test Train Split -------------" +"--->\\n') + self.log.info('\\n<-------- Train Data Shape '+str(xtrain.shape)+' ---------->\\n') + self.log.info('\\n<-------- Test Data Shape '+str(xtest.shape)+' ---------->\\n') + ''' + else: + xtrain=featureData + ytrain=targetData + xtest=featureDa" +"Conv1D(filters=params['first_neuron'], kernel_size=(3), activation=params['activation'], input_shape=(x_train.shape[1],1),padding='same') ) + if params['numConvLayers'] > 1: + for x in range(1,params['numConvLayers']): + if params['MaxPool'] == ""True"": + model.add(MaxPooling1D(pool_size=2,padding='same')) + model.add(Conv1D(filters=8, kernel_size=3, activation=params['activation'],padding='same')) + talos.utils.hidden_layers(model, params, x_train.shape[1]) + model.add(MaxPooling1D(pool_size=2,padding='same')) + model.add(Flatten()) + model.add(Dense(y_train.shape[1],activation=params['last_activation'])) + model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['acc',f1_m,precision_m,recall_m,tf.keras.metrics.AUC()]) + out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'], + epochs=params['epochs'],verbose=0,shuffle=True) + + return out, model + + + def TalosScan(self,modelObj): + + try: + #dataPath = pd.read_csv(self.dataLocation) + #X = dataPath.drop(self.targetData, axis=1) + loss_matrix='binary_crossentropy' + optimizer='Nadam' + X = self.featuresData + x = X.values + Y = self.targetData + scoredetails = '' + #Y= dataPath[self.targetData] + y = Y.values + y = kutils.to_categorical(y) + XSNN = X.values + X1 = np.expand_dims(X, axis=2) + + + kf = KFold(n_splits = self.cvSplit) + + for train_index, test_index in kf.split(X): + X_train, X_test = x[train_index], x[test_index] + y_train, y_test = y[train_index], y[test_index] + + data = self.modelParams + models = data.keys() + start = time.time() + scoreSNN = [] + scoreRNN = [] + scoreCNN = [] + scoreRNNGRU = [] + scoreRNNLSTM = [] + best_paramsSNN = {} + best_paramsRNN = {} + best_paramsRNNGRU = {} + best_paramsRNNLSTM = {} + best_paramsCNN = {} + if ""Neural Network""in self.modelList: + self.log.info(""-------> Model Name: Neural Network"") + start = time.time() + data = self.modelParams[""Neural Network""] + + p = {""activation"":data[""activation""].split("",""), + ""last_activation"":data[""last_activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")] + } + param_combinations = int(np.prod([len(x.split(',')) for x in p])) + round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations) + scan_object = talos.Scan(x=X_train, + y=y_train, + x_val = X_test, + y_val = y_test, + model = modelObj.SNNClassification, + experiment_name='SNN', + params=p, + round_limit=round_limit, + random_method=self.randomMethod + ) + matrix_type = 'val_acc' + if self.scoreParam.lower() == 'accuracy': + matrix_type = 'val_acc' + elif(self.scoreParam.lower() == 'roc_auc'): + matrix_type = 'val_auc' + elif(self.scoreParam.lower() == 'recall'): + matrix_type = 'val_recall_m' + elif(self.scoreParam.lower() == 'precision'): + matrix_type = 'val_precision_m' + elif(self.scoreParam.lower() == 'f1_score'): + matrix_type = 'val_f1_m' + + analyze_objectSNN = talos.Analyze(scan_object) + highValAccSNN = analyze_objectSNN.high(matrix_type) + dfSNN = analyze_objectSNN.data + #pd.set_option('display.max_columns',20) + #print(dfSNN) + #pd.reset_option('display.max_columns') + newdfSNN = dfSNN.loc[dfSNN[matrix_type] == highValAccSNN] + if(len(newdfSNN) > 1): + lowLoss = analyze_objectSNN.low('val_loss') + newdfSNN = newdfSNN.loc[newdfSNN['val_loss'] == lowLoss] + best_paramsSNN[""activation""] = list(newdfSNN[""activation""])[0] + best_paramsSNN[""optimizer""] = list(newdfSNN[""optimizer""])[0] + best_paramsSNN[""losses""] = list(newdfSNN[""losses""])[0] + best_paramsSNN[""first_layer""] = list(newdfSNN[""first_neuron""])[0] + best_paramsSNN[""shapes""] = list(newdfSNN[""shapes""])[0] + best_paramsSNN[""hidden_layers""] = list(newdfSNN[""hidden_layers""])[0] + best_paramsSNN[""dropout""] = list(newdfSNN[""dropout""])[0] + best_paramsSNN[""batch_size""] = list(newdfSNN[""batch_size""])[0] + best_paramsSNN[""epochs""] = list(newdfSNN[""epochs""])[0] + best_paramsSNN[""lr""] = list(newdfSNN[""lr""])[0] + best_paramsSNN[""last_activation""] = list(newdfSNN[""last_activation""])[0] + + best_modelSNN = scan_object.best_model(metric=matrix_type) + try: + if(len(best_paramsSNN[""losses""]) == 0): + loss_matrix = 'binary_crossentropy' + else: + loss_matrix = best_paramsSNN[""losses""] + if(len(best_paramsSNN[""optimizer""]) == 0): + optimizer = 'Nadam' + else: + optimizer = best_paramsSNN[""optimizer""] + if best_paramsSNN[""batch_size""] == 0: + batchsize = 32 + else: + batchsize = best_paramsSNN[""batch_size""] + except: + loss_matrix = 'binary_crossentropy' + optimizer = 'Nadam' + batchsize = 32 + + if self.scoreParam == 'accuracy': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy']) + elif self.scoreParam == 'roc_auc': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()]) + elif self.scoreParam == 'recall': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m]) + elif self.scoreParam == 'precision': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m]) + elif self.scoreParam == 'f1_score': + best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m]) + + scoreSNN = best_modelSNN.evaluate(XSNN,y, batch_size=batchsize) + self.log.info(""----------> Score Matrix: ""+str(best_modelSNN.metrics_names)) + self.log.info(""----------> Score: ""+str(scoreSNN)) + self.log.info(""----------> Model Params: ""+str(best_paramsSNN)) + executionTime=time.time() - start + XSNN = self.testX.values + #predict_x=best_modelSNN.predict(XSNN) + predictedData=np.argmax(best_modelSNN.predict(XSNN),axis=1) + #predictedData = best_modelSNN.predict_classes(XSNN) + #print(predictedData) + #predictedData = best_modelSNN.predict(self.testX) + if 'accuracy' in str(self.scoreParam): + score = accuracy_score(self.testY,predictedData) + elif 'recall' in str(self.scoreParam): + score = recall_score(self.testY,predictedData, average='macro') + elif 'precision' in str(self.scoreParam): + score = precision_score(self.testY,predictedData,average='macro') + elif 'f1_score' in str(self.scoreParam): + score = f1_score(self.testY,predictedData, average='macro') + elif 'roc_auc' in str(self.scoreParam): + score = roc_auc_score(self.testY,predictedData,average=""macro"") + score = round((score*100),2) + self.log.info(""----------> Testing Score: ""+str(score)) + self.log.info('----------> Total Execution: '+str(executionTime)+'\\n') + scoreSNN[1] = score + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Neural Network"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreSNN[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Neural Network') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Recurrent Neural Network""in self.modelList: + self.log.info(""-------> Model Name: Recurrent Neural Network"") + start = time.time() + data = self.modelParams[""Recurrent Neural Network""] + p = {""RNNType"":[""SimpleRNN""], + ""numRNNLayers"":[int(n) for n in data[""numRNNLayers""].split("","")], + ""activation"":data[""activation""].split("",""), + ""last_activation"":data[""last_activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + param_combinations = int(np.prod([len(x.split(',')) for x in p])) + round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations) + + scan_object = talos.Scan(x=X_train, + y=y_train, + x_val = X_test, + y_val = y_test, + model = modelObj.RNNClassification, + experiment_name='RNN', + params=p, + round_limit=round_limit, + random_method=self.randomMethod + ) + + matrix_type = 'val_acc' + if self.scoreParam.lower() == 'accuracy': + matrix_type = 'val_acc' + elif(self.scoreParam.lower() == 'roc_auc'): + matrix_type = 'val_auc' + elif(self.scoreParam.lower() == 'recall'): + matrix_type = 'val_recall_m' + elif(self.score" +"Param.lower() == 'precision'): + matrix_type = 'val_precision_m' + elif(self.scoreParam.lower() == 'f1_score'): + matrix_type = 'val_f1_m' + + analyze_objectRNN = talos.Analyze(scan_object) + highValAccRNN = analyze_objectRNN.high(matrix_type) + dfRNN = analyze_objectRNN.data + newdfRNN = dfRNN.loc[dfRNN[matrix_type] == highValAccRNN] + if(len(newdfRNN) > 1): + lowLoss = analyze_objectRNN.low('val_loss') + newdfRNN = newdfRNN.loc[newdfRNN['val_loss'] == lowLoss] + + best_paramsRNN[""RNNType""] = list(newdfRNN[""RNNType""])[0] + best_paramsRNN[""numRNNLayers""] = list(newdfRNN[""numRNNLayers""])[0] + best_paramsRNN[""activation""] = list(newdfRNN[""activation""])[0] + best_paramsRNN[""optimizer""] = list(newdfRNN[""optimizer""])[0] + best_paramsRNN[""losses""] = list(newdfRNN[""losses""])[0] + best_paramsRNN[""first_layer""] = list(newdfRNN[""first_neuron""])[0] + best_paramsRNN[""shapes""] = list(newdfRNN[""shapes""])[0] + best_paramsRNN[""hidden_layers""] = list(newdfRNN[""hidden_layers""])[0] + best_paramsRNN[""dropout""] = list(newdfRNN[""dropout""])[0] + best_paramsRNN[""batch_size""] = list(newdfRNN[""batch_size""])[0] + best_paramsRNN[""epochs""] = list(newdfRNN[""epochs""])[0] + best_paramsRNN[""lr""] = list(newdfRNN[""lr""])[0] + best_paramsRNN[""last_activation""] = list(newdfRNN[""last_activation""])[0] + + best_modelRNN = scan_object.best_model(metric=matrix_type, asc=False) + + try: + if(len(best_paramsRNN[""losses""]) == 0): + loss_matrix = 'binary_crossentropy' + else: + loss_matrix = best_paramsRNN[""losses""][0] + + if(len(best_paramsRNN[""optimizer""]) == 0): + optimizer = 'Nadam' + else: + optimizer = best_paramsRNN[""optimizer""][0] + + if(best_paramsRNN[""batch_size""] == 0): + batchsize = 32 + else: + batchsize = best_paramsRNN[""batch_size""][0] + except: + loss_matrix = 'binary_crossentropy' + optimizer = 'Nadam' + batchsize = 32 + + if self.scoreParam == 'accuracy': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy']) + elif self.scoreParam == 'recall': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m]) + elif self.scoreParam == 'roc_auc': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()]) + elif self.scoreParam == 'precision': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m]) + elif self.scoreParam == 'f1_score': + best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m]) + self.log.info(""----------> Score Matrix: ""+str(best_modelRNN.metrics_names)) + scoreRNN = best_modelRNN.evaluate(X1,y, batch_size=batchsize) + self.log.info(""----------> Score: ""+str(scoreRNN)) + self.log.info(""----------> Model Params: ""+str(best_paramsRNN)) + executionTime=time.time() - start + self.log.info('----------> Total Execution: '+str(executionTime)+'\\n') + + XSNN = np.expand_dims(self.testX, axis=2) + #predictedData = best_modelRNN.predict_classes(XSNN) + predictedData=np.argmax(best_modelRNN.predict(XSNN),axis=1) + #predictedData = best_modelSNN.predict(self.testX) + if 'accuracy' in str(self.scoreParam): + score = accuracy_score(self.testY,predictedData) + elif 'recall' in str(self.scoreParam): + score = recall_score(self.testY,predictedData, average='macro') + elif 'precision' in str(self.scoreParam): + score = precision_score(self.testY,predictedData,average='macro') + elif 'f1_score' in str(self.scoreParam): + score = f1_score(self.testY,predictedData, average='macro') + elif 'roc_auc' in str(self.scoreParam): + score = roc_auc_score(self.testY,predictedData,average=""macro"") + score = round((score*100),2) + self.log.info(""----------> Testing Score: ""+str(score)) + scoreRNN[1] = score + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Recurrent Neural Network"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreRNN[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Recurrent Neural Network (GRU)""in self.modelList: + self.log.info(""-------> Model Name: Recurrent Neural Network (GRU)"") + start = time.time() + data = self.modelParams[""Recurrent Neural Network (GRU)""] + print(data) + p = {""RNNType"":[""GRU""], + ""numRNNLayers"":[int(n) for n in data[""numRNNLayers""].split("","")], + ""activation"":data[""activation""].split("",""), + ""last_activation"":data[""last_activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + param_combinations = int(np.prod([len(x.split(',')) for x in p])) + round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations) + + scan_object = talos.Scan(x=X_train, + y=y_train, + x_val = X_test, + y_val = y_test, + model = modelObj.RNNClassification, + experiment_name='RNN', + params=p, + round_limit=round_limit, + random_method=self.randomMethod + ) + + matrix_type = 'val_acc' + if self.scoreParam.lower() == 'accuracy': + matrix_type = 'val_acc' + elif(self.scoreParam.lower() == 'roc_auc'): + matrix_type = 'val_auc' + elif(self.scoreParam.lower() == 'recall'): + matrix_type = 'val_recall_m' + elif(self.scoreParam.lower() == 'precision'): + matrix_type = 'val_precision_m' + elif(self.scoreParam.lower() == 'f1_score'): + matrix_type = 'val_f1_m' + + analyze_objectRNNGRU = talos.Analyze(scan_object) + highValAccRNNGRU = analyze_objectRNNGRU.high(matrix_type) + dfRNNGRU = analyze_objectRNNGRU.data + newdfRNNGRU = dfRNNGRU.loc[dfRNNGRU[matrix_type] == highValAccRNNGRU] + if(len(newdfRNNGRU) > 1): + lowLoss = analyze_objectRNNGRU.low('val_loss') + newdfRNNGRU = newdfRNNGRU.loc[newdfRNNGRU['val_loss'] == lowLoss] + + best_paramsRNNGRU[""RNNType""] = ""GRU"" + best_paramsRNNGRU[""numRNNLayers""] = list(newdfRNNGRU[""numRNNLayers""])[0] + best_paramsRNNGRU[""activation""] = list(newdfRNNGRU[""activation""])[0] + best_paramsRNNGRU[""optimizer""] = list(newdfRNNGRU[""optimizer""])[0] + best_paramsRNNGRU[""losses""] = list(newdfRNNGRU[""losses""])[0] + best_paramsRNNGRU[""first_layer""] = list(newdfRNNGRU[""first_neuron""])[0] + best_paramsRNNGRU[""shapes""] = list(newdfRNNGRU[""shapes""])[0] + best_paramsRNNGRU[""hidden_layers""] = list(newdfRNNGRU[""hidden_layers""])[0] + best_paramsRNNGRU[""dropout""] = list(newdfRNNGRU[""dropout""])[0] + best_paramsRNNGRU[""batch_size""] = list(newdfRNNGRU[""batch_size""])[0] + best_paramsRNNGRU[""epochs""] = list(newdfRNNGRU[""epochs""])[0] + best_paramsRNNGRU[""lr""] = list(newdfRNNGRU[""lr""])[0] + best_paramsRNNGRU[""last_activation""] = list(newdfRNNGRU[""last_activation""])[0] + + best_modelRNNGRU = scan_object.best_model(metric=matrix_type, asc=False) + try: + if(len(best_paramsRNNGRU[""losses""]) == 0): + loss_matrix = 'binary_crossentropy' + else: + loss_matrix = best_paramsRNNGRU[""losses""][0] + + if(len(best_paramsRNNGRU[""optimizer""]) == 0): + optimizer = 'Nadam' + else: + optimizer = best_paramsRNNGRU[""optimizer""][0] + + if(best_paramsRNNGRU[""batch_size""]== 0): + batchsize = 32 + else: + batchsize = best_paramsRNNGRU[""batch_size""][0] + except: + loss_matrix = 'binary_crossentropy' + optimizer = 'Nadam' + batchsize = 32 + + if self.scoreParam == 'accuracy': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy']) + elif self.scoreParam == 'recall': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m]) + elif self.scoreParam == 'roc_auc': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()]) + elif self.scoreParam == 'precision': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m]) + elif self.scoreParam == 'f1_score': + best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m]) + self.log.info(""----------> Score Matrix: ""+str(best_modelRNNGRU.metrics_names)) + scoreRNNGRU = best_modelRNNGRU.evaluate(X1,y, batch_size=batchsize) + self.log.info(""----------> Score: ""+str(scoreRNNGRU)) + self.log.info(""----------> Model Params: ""+str(best_paramsRNNGRU)) + executionTime=time.time() - start + self.log.info('----------> Total Execution: '+str(executionTime)+'\\n') + + XSNN =" +"np.expand_dims(self.testX, axis=2) + #predictedData = best_modelRNNGRU.predict_classes(XSNN) + predictedData=np.argmax(best_modelRNNGRU.predict(XSNN),axis=1) + #predictedData = best_modelSNN.predict(self.testX) + if 'accuracy' in str(self.scoreParam): + score = accuracy_score(self.testY,predictedData) + elif 'recall' in str(self.scoreParam): + score = recall_score(self.testY,predictedData, average='macro') + elif 'precision' in str(self.scoreParam): + score = precision_score(self.testY,predictedData,average='macro') + elif 'f1_score' in str(self.scoreParam): + score = f1_score(self.testY,predictedData, average='macro') + elif 'roc_auc' in str(self.scoreParam): + score = roc_auc_score(self.testY,predictedData,average=""macro"") + score = round((score*100),2) + self.log.info(""----------> Testing Score: ""+str(score)) + scoreRNNGRU[1] = score + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Recurrent Neural Network (GRU)"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreRNNGRU[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (GRU)') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Recurrent Neural Network (LSTM)""in self.modelList: + self.log.info(""-------> Model Name: Recurrent Neural Network (LSTM)"") + start = time.time() + data = self.modelParams[""Recurrent Neural Network (LSTM)""] + p = {""RNNType"":[""LSTM""], + ""numRNNLayers"":[int(n) for n in data[""numRNNLayers""].split("","")], + ""activation"":data[""activation""].split("",""), + ""last_activation"":data[""last_activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + param_combinations = int(np.prod([len(x.split(',')) for x in p])) + round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations) + + scan_object = talos.Scan(x=X_train, + y=y_train, + x_val = X_test, + y_val = y_test, + model = modelObj.RNNClassification, + experiment_name='RNN', + params=p, + round_limit=round_limit, + random_method=self.randomMethod + ) + + matrix_type = 'val_acc' + if self.scoreParam.lower() == 'accuracy': + matrix_type = 'val_acc' + elif(self.scoreParam.lower() == 'roc_auc'): + matrix_type = 'val_auc' + elif(self.scoreParam.lower() == 'recall'): + matrix_type = 'val_recall_m' + elif(self.scoreParam.lower() == 'precision'): + matrix_type = 'val_precision_m' + elif(self.scoreParam.lower() == 'f1_score'): + matrix_type = 'val_f1_m' + + analyze_objectRNNLSTM = talos.Analyze(scan_object) + highValAccRNNLSTM = analyze_objectRNNLSTM.high(matrix_type) + dfRNNLSTM = analyze_objectRNNLSTM.data + newdfRNNLSTM = dfRNNLSTM.loc[dfRNNLSTM[matrix_type] == highValAccRNNLSTM] + if(len(newdfRNNLSTM) > 1): + lowLoss = analyze_objectRNNLSTM.low('val_loss') + newdfRNNLSTM = newdfRNNLSTM.loc[newdfRNNLSTM['val_loss'] == lowLoss] + + best_paramsRNNLSTM[""RNNType""] = ""LSTM"" + best_paramsRNNLSTM[""numRNNLayers""] = list(newdfRNNLSTM[""numRNNLayers""])[0] + best_paramsRNNLSTM[""activation""] = list(newdfRNNLSTM[""activation""])[0] + best_paramsRNNLSTM[""optimizer""] = list(newdfRNNLSTM[""optimizer""])[0] + best_paramsRNNLSTM[""losses""] = list(newdfRNNLSTM[""losses""])[0] + best_paramsRNNLSTM[""first_layer""] = list(newdfRNNLSTM[""first_neuron""])[0] + best_paramsRNNLSTM[""shapes""] = list(newdfRNNLSTM[""shapes""])[0] + best_paramsRNNLSTM[""hidden_layers""] = list(newdfRNNLSTM[""hidden_layers""])[0] + best_paramsRNNLSTM[""dropout""] = list(newdfRNNLSTM[""dropout""])[0] + best_paramsRNNLSTM[""batch_size""] = list(newdfRNNLSTM[""batch_size""])[0] + best_paramsRNNLSTM[""epochs""] = list(newdfRNNLSTM[""epochs""])[0] + best_paramsRNNLSTM[""lr""] = list(newdfRNNLSTM[""lr""])[0] + best_paramsRNNLSTM[""last_activation""] = list(newdfRNNLSTM[""last_activation""])[0] + + best_modelRNNLSTM = scan_object.best_model(metric=matrix_type, asc=False) + try: + if(len(best_paramsRNNLSTM[""losses""]) == 0): + loss_matrix = 'binary_crossentropy' + else: + loss_matrix = best_paramsRNNLSTM[""losses""][0] + + if(len(best_paramsRNNLSTM[""optimizer""]) == 0): + optimizer = 'Nadam' + else: + optimizer = best_paramsRNNLSTM[""optimizer""][0] + + if(best_paramsRNNLSTM[""batch_size""] == 0): + batchsize = 32 + else: + batchsize = best_paramsRNNLSTM[""batch_size""][0] + except: + loss_matrix = 'binary_crossentropy' + optimizer = 'Nadam' + batchsize = 32 + + if self.scoreParam == 'accuracy': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy']) + elif self.scoreParam == 'recall': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m]) + elif self.scoreParam == 'roc_auc': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()]) + elif self.scoreParam == 'precision': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m]) + elif self.scoreParam == 'f1_score': + best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m]) + self.log.info(""----------> Score Matrix: ""+str(best_modelRNNLSTM.metrics_names)) + scoreRNNLSTM = best_modelRNNLSTM.evaluate(X1,y, batch_size=batchsize) + self.log.info(""----------> Score: ""+str(scoreRNNLSTM)) + self.log.info(""----------> Model Params: ""+str(best_paramsRNNLSTM)) + executionTime=time.time() - start + self.log.info('----------> Total Execution: '+str(executionTime)+'\\n') + + XSNN = np.expand_dims(self.testX, axis=2) + #predictedData = best_modelRNNLSTM.predict_classes(XSNN) + predictedData=np.argmax(best_modelRNNLSTM.predict(XSNN),axis=1) + #predictedData = best_modelSNN.predict(self.testX) + if 'accuracy' in str(self.scoreParam): + score = accuracy_score(self.testY,predictedData) + elif 'recall' in str(self.scoreParam): + score = recall_score(self.testY,predictedData, average='macro') + elif 'precision' in str(self.scoreParam): + score = precision_score(self.testY,predictedData,average='macro') + elif 'f1_score' in str(self.scoreParam): + score = f1_score(self.testY,predictedData, average='macro') + elif 'roc_auc' in str(self.scoreParam): + score = roc_auc_score(self.testY,predictedData,average=""macro"") + score = round((score*100),2) + self.log.info(""----------> Testing Score: ""+str(score)) + scoreRNNLSTM[1] = score + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Recurrent Neural Network (LSTM)"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreRNNLSTM[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (LSTM)') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + + if ""Convolutional Neural Network (1D)""in self.modelList: + self.log.info(""-------> Model Name: CNN"") + start = time.time() + data = self.modelParams[""Convolutional Neural Network (1D)""] + p = {""activation"":data[""activation""].split("",""), + ""last_activation"":data[""last_activation""].split("",""), + ""numConvLayers"":[int(n) for n in data[""numConvLayers""].split("","")], + ""MaxPool"":data[""activation""].split("",""), + ""optimizer"":data[""optimizer""].split("",""), + ""losses"":data[""losses""].split("",""), + ""first_neuron"":[int(n) for n in data[""first_layer""].split("","")], + ""shapes"": data[""shapes""].split("",""), + ""hidden_layers"":[int(n) for n in data[""hidden_layers""].split("","")], + ""dropout"": [float(n) for n in data[""dropout""].split("","")], + ""lr"": [float(n) for n in data[""learning_rate""].split("","")], + ""batch_size"": [int(n) for n in data[""batch_size""].split("","")], + ""epochs"": [int(n) for n in data[""epochs""].split("","")]} + param_combinations = int(np.prod([len(x.split(',')) for x in p])) + round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations) + scan_object = talos.Scan(x=X_train, + y=y_train, + x_val = X_test, + y_val = y_test, + model = modelObj.CNNClassification, + experiment_name='CNN', + params=p, + round_limit=round_limit, + random_method=self.randomMethod + ) + matrix_type = 'val_acc' + if self.scoreParam.lower() == 'accuracy': + matrix_type = 'val_acc' + elif(self.scoreParam.lower() == 'roc_auc'): + matrix_type = 'val_auc' + elif(self.scoreParam.lower() == 'recall'): + matrix_type = 'val_recall_m' + elif(self.scoreParam.lower() == 'precision'): + matrix_type = 'val_precision_m' + elif(self" +".scoreParam.lower() == 'f1_score'): + matrix_type = 'val_f1_m' + analyze_objectCNN = talos.Analyze(scan_object) + highValAccCNN = analyze_objectCNN.high(matrix_type) + dfCNN = analyze_objectCNN.data + + newdfCNN = dfCNN.loc[dfCNN[matrix_type] == highValAccCNN] + if(len(newdfCNN) > 1): + lowLoss = analyze_objectCNN.low('val_loss') + newdfCNN = newdfCNN.loc[newdfCNN['val_loss'] == lowLoss] + + best_paramsCNN[""numConvLayers""] = list(newdfCNN[""numConvLayers""]) + best_paramsCNN[""MaxPool""] = list(newdfCNN[""MaxPool""]) + best_paramsCNN[""activation""] = list(newdfCNN[""activation""]) + best_paramsCNN[""optimizer""] = list(newdfCNN[""optimizer""]) + best_paramsCNN[""losses""] = list(newdfCNN[""losses""]) + best_paramsCNN[""first_layer""] = list(newdfCNN[""first_neuron""]) + best_paramsCNN[""shapes""] = list(newdfCNN[""shapes""]) + best_paramsCNN[""hidden_layers""] = list(newdfCNN[""hidden_layers""]) + best_paramsCNN[""dropout""] = list(newdfCNN[""dropout""]) + best_paramsCNN[""batch_size""] = list(newdfCNN[""batch_size""]) + best_paramsCNN[""epochs""] = list(newdfCNN[""epochs""]) + best_paramsCNN[""lr""] = list(newdfCNN[""lr""]) + best_paramsCNN[""last_activation""] = list(newdfCNN[""last_activation""])[0] + + best_modelCNN = scan_object.best_model(metric='val_acc', asc=True) + try: + if(len(best_paramsCNN[""losses""]) == 0): + loss_matrix = 'binary_crossentropy' + else: + loss_matrix = best_paramsCNN[""losses""][0] + if(len(best_paramsCNN[""optimizer""]) == 0): + optimizer = 'Nadam' + else: + optimizer = best_paramsCNN[""optimizer""][0] + + if(best_paramsCNN[""batch_size""] == 0): + batchsize = 32 + else: + batchsize = best_paramsCNN[""batch_size""][0] + except: + loss_matrix = 'binary_crossentropy' + optimizer = 'Nadam' + batchsize = 32 + + + if self.scoreParam == 'accuracy': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy']) + elif self.scoreParam == 'recall': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m]) + elif self.scoreParam == 'precision': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m]) + elif self.scoreParam == 'roc_auc': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()]) + elif self.scoreParam == 'f1_score': + best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m]) + self.log.info(""----------> Score Matrix: ""+str(best_modelCNN.metrics_names)) + scoreCNN = best_modelCNN.evaluate(X1,y, batch_size=batchsize) + self.log.info(""----------> Score: ""+str(scoreCNN)) + self.log.info(""----------> Model Params: ""+str(best_paramsCNN)) + executionTime=time.time() - start + self.log.info('----------> Total Execution: '+str(executionTime)+'\\n') + + XSNN = np.expand_dims(self.testX, axis=2) + #predictedData = best_modelCNN.predict_classes(XSNN) + predictedData=np.argmax(best_modelCNN.predict(XSNN),axis=1) + #predictedData = best_modelSNN.predict(self.testX) + if 'accuracy' in str(self.scoreParam): + score = accuracy_score(self.testY,predictedData) + elif 'recall' in str(self.scoreParam): + score = recall_score(self.testY,predictedData, average='macro') + elif 'precision' in str(self.scoreParam): + score = precision_score(self.testY,predictedData,average='macro') + elif 'f1_score' in str(self.scoreParam): + score = f1_score(self.testY,predictedData, average='macro') + elif 'roc_auc' in str(self.scoreParam): + score = roc_auc_score(self.testY,predictedData,average=""macro"") + score = round((score*100),2) + self.log.info(""----------> Testing Score: ""+str(score)) + scoreCNN[1] = score + + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""Convolutional Neural Network (1D)"",""FeatureEngineering"":""'+str(self.best_feature_model)+'"",""Score"":'+str(scoreCNN[1])+'}' + self.log.info('Status:- |... DL Algorithm applied: Convolutional Neural Network (1D)') + self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2))) + modelScore = [] + if len(scoreSNN) != 0: + modelScore.append(scoreSNN[1]) + if len(scoreRNN) != 0: + modelScore.append(scoreRNN[1]) + if len(scoreRNNGRU) != 0: + modelScore.append(scoreRNNGRU[1]) + if len(scoreRNNLSTM) != 0: + modelScore.append(scoreRNNLSTM[1]) + if len(scoreCNN) != 0: + modelScore.append(scoreCNN[1]) + + selectedModel = """" + best_params="""" + if len(scoreSNN) != 0 and max(modelScore) == scoreSNN[1]: + selectedModel = ""Neural Network"" + best_model = best_modelSNN + best_params = best_paramsSNN + + elif len(scoreRNN) != 0 and max(modelScore) == scoreRNN[1]: + selectedModel = ""Recurrent Neural Network"" + best_model = best_modelRNN + best_params = best_paramsRNN + + elif len(scoreRNNGRU) != 0 and max(modelScore) == scoreRNNGRU[1]: + selectedModel = ""Recurrent Neural Network (GRU)"" + best_model = best_modelRNNGRU + best_params = best_paramsRNNGRU + + elif len(scoreRNNLSTM) != 0 and max(modelScore) == scoreRNNLSTM[1]: + selectedModel = ""Recurrent Neural Network (LSTM)"" + best_model = best_modelRNNLSTM + best_params = best_paramsRNNLSTM + + elif len(scoreCNN) != 0 and max(modelScore) == scoreCNN[1]: + selectedModel = ""Convolutional Neural Network (1D)"" + best_model = best_modelCNN + best_params = best_paramsCNN + + + modelScore = max(modelScore) + executionTime=time.time() - start + self.log.info(""-------> ExecutionTime(sec) :""+str(executionTime)+'\\n') + self.log.info('Status:- |... Best Algorithm selected: '+str(selectedModel)+' '+str(round(modelScore,2))) + self.log.info('-------> Best Params: '+str(best_params)) + return selectedModel,modelScore,best_model,best_params,X1,XSNN,scoredetails,loss_matrix,optimizer + + except Exception as inst: + self.log.info( '\\n-----> classificationModel failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import numpy as np +from mlxtend.frequent_patterns import apriori, association_rules +from mlxtend.preprocessing import TransactionEncoder +import matplotlib.pyplot as plt + +import json +import logging +import os,sys + +def hot_encode(x): + if(int(x)<= 0): + return 0 + if(int(x)>= 1): + return 1 + +class associationrules: + def __init__(self,dataframe,association_rule_conf,modelparam,invoiceNoFeature,itemFeature): + self.minSupport = modelparam['minSupport'] + self.metric = modelparam['metric'] + self.minThreshold = modelparam['minThreshold'] + self.data = dataframe + self.invoiceNoFeature = invoiceNoFeature + self.itemFeature = itemFeature + self.log = logging.getLogger('eion') + + + def apply_associationRules(self,outputLocation): + self.data= self.data[[self.itemFeature,self.invoiceNoFeature]] + self.data[self.itemFeature] = self.data[self.itemFeature].str.strip() + self.data.dropna(axis = 0, subset =[self.invoiceNoFeature], inplace = True) + self.data[self.invoiceNoFeature] = self.data[self.invoiceNoFeature].astype('str') + self.data = self.data.groupby([self.invoiceNoFeature,self.itemFeature]).size() + + self.data=self.data.unstack().reset_index().fillna('0').set_index(self.invoiceNoFeature) + self.data = self.data.applymap(hot_encode) + ohe_df = self.data + ''' + print(self.data) + sys.exit() + items = [] + for col in list(self.data): + ucols = self.data[col].dropna().unique() + #print('ucols :',ucols) + if len(ucols) > 0: + items = items + list(set(ucols) - set(items)) + #items = self.data.apply(lambda col: col.unique()) + #print(items) + #items = (self.data[self.masterColumn].unique()) + #print(items) + self.log.info(""-------> Total Unique Items: ""+str(len(items))) + encoded_vals = [] + for index, row in self.data.iterrows(): + labels = {} + uncommons = list(set(items) - set(row)) + commons = list(set(items).intersection(row)) + for uc in uncommons: + labels[uc] = 0 + for com in commons: + labels[com] = 1 + encoded_vals.append(labels) + ohe_df = pd.DataFrame(encoded_vals) + #print(ohe_df) + ''' + freq_items = apriori(ohe_df, min_support=self.minSupport, use_colnames=True) + self.log.info('Status:- |... AssociationRule Algorithm applied: Apriori') + if not freq_items.empty: + self.log.info(""\\n------------ Frequent Item Set --------------- "") + self.log.info(freq_items) + save_freq_items = pd.DataFrame() + save_freq_items[""itemsets""] = freq_items[""itemsets""].apply(lambda x: ', '.join(list(x))).astype(""unicode"") + outputfile = os.path.join(outputLocation,'frequentItems.csv') + save_freq_items.to_csv(outputfile) + self.log.info('-------> FreqentItems File Name:'+outputfile) + rules = association_rules(freq_items, metric=self.metric, min_threshold=self.minThreshold) + if not rules.empty: + #rules = rules.sort_values(['confidence', 'lift'], ascending =[False, False]) + self.log.info(""\\n------------ Rules --------------- "") + for index, row in rules.iterrows(): + self.log.info(""------->Rule: ""+ str(row['antecedents']) + """ +"-> "" + str(row['consequents'])) + self.log.info(""---------->Support: ""+ str(row['support'])) + self.log.info(""---------->Confidence: ""+ str(row['confidence'])) + self.log.info(""---------->Lift: ""+ str(row['lift'])) + #rules['antecedents'] = lis" +" try: + start_time = time.time() + objConvUtility=model_converter(model_path,output_path,input_format,output_format,input_shape) + objConvUtility.convert() + end_time = time.time() + log.info(f""Time required for conversion: {end_time - start_time} sec"") + log.info(f'\\nConverting {input_format} to {output_format} Successful') + output['Convert'] = ""Success"" + except Exception as e: + output['Convert'] = ""Error"" + log.info('Error: ' + str(e)) + log.error(e, exc_info=True) + if 'not supported' in str(e): + output['sub error'] = ""Not supported"" + output = json.dumps(output) + log.info(f'Output: {output}') + return output + +def convert(config_file): + with open(config_file, 'r') as f: + config = json.load(f) + model_path = config['advance']['aionConversionUtility']['modelpath'] + output_path = config['advance']['aionConversionUtility']['deployedlocation'] + input_format = get_true_option(config['advance']['aionConversionUtility']['inputModelType'],'').lower() + output_format = get_true_option(config['advance']['aionConversionUtility']['outputModelType'],'').lower() + if input_format==""keras"": + input_shape = int(config['advance']['aionConversionUtility']['inputShape']) + if input_format!=""keras"": + input_shape = config['advance']['aionConversionUtility']['numberoffeatures'] + input_shape = int(input_shape) if input_shape else 0 + #input_shape = int(config['advance']['aionConversionUtility']['numberoffeatures']) + output = run(model_path, output_path, input_format, output_format, input_shape) + print(output) + +class aionRunTimeUtility: + # def __init__(self): + # print(""AI.ON ConversionUtility function init..."") + def executeOnRuntime(self,inputModelName,inputDataSet): + # print(""AI.ON ConversionUtility function starts..."") + RuntimeType = inputModelName.rsplit('.', 1)[1] + inputDataType = inputDataSet.rsplit('.', 1)[1] + if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType.lower()=='json')): + # print(""Inference through ONNX Runtime started [ML]"") + import pandas + import json + with open(inputDataSet) as datafile: + data = json.load(datafile) + dataframe = pandas.DataFrame(data,index=[0]) + + import numpy + import onnxruntime as rt + sess = rt.InferenceSession(inputModelName) + input_name = sess.get_inputs()[0].name + label_name = sess.get_outputs()[0].name + inputsize=sess.get_inputs()[0].shape + first_n_column = dataframe.iloc[: , :inputsize[1]] + dataset = first_n_column.values + if(inputsize[1]!=len(dataframe.columns)): + print(""Error : Input Data size does not match"") + return 0 + + pred_onx = sess.run([label_name], {input_name: dataset.astype(numpy.float32)[0:1]})[0] + # for i in range(0, 1): + + #print(""ONNX Runtime Prediction [csv]: "",pred_onx) + output = numpy.squeeze(pred_onx) + predictions = numpy.squeeze(output) + prediction = numpy.argmax(predictions) + return(prediction) + + # print(""Inference through ONNX modelcompleted "") + if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType!='json')): + + import numpy as np + import onnxruntime as rt + from tensorflow.keras.preprocessing import image + sess = rt.InferenceSession(inputModelName) + input_name = sess.get_inputs()[0].name + label_name = sess.get_outputs()[0].name + inputsize=sess.get_inputs()[0].shape + img = image.load_img(inputDataSet, target_size=(inputsize[1], inputsize[2])) + x = image.img_to_array(img) + x = np.expand_dims(x, axis=0) + pred_onx = sess.run([label_name], {input_name: x.astype(np.float32)[0:1]})[0] + output = np.squeeze(pred_onx) + predictions = np.squeeze(output) + return(pred_onx) + + + + if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite')and (inputDataType=='json')): + + import numpy as np + import tensorflow as tf + import pandas + from numpy import asarray + interpreter = tf.lite.Interpreter(model_path=inputModelName) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + input_shape = input_details[0]['shape'] + import pandas + import json + with open(inputDataSet) as datafile: + data = json.load(datafile) + dataframe = pandas.DataFrame(data,index=[0]) + dataset = dataframe.values + + XYZ = dataset[:,0:input_shape[1]].astype(float) + + input_data = asarray(XYZ[0]).reshape((1, input_shape[1])) + for i in range(0, 1): + input_data = asarray(XYZ[i]).reshape((1,input_shape[1])) + interpreter.set_tensor(input_details[0]['index'], input_data.astype(np.float32)[0:1]) + interpreter.invoke() + output_data = interpreter.get_tensor(output_details[0]['index']) + predictions = np.squeeze(output_data) + prediction = np.argmax(predictions) + + return(prediction) + if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite') and (inputDataType!='json')): + + + import numpy as np + from tensorflow.keras.preprocessing import image + import os + import tensorflow as tf + import pandas + from numpy import asarray + interpreter = tf.lite.Interpreter(model_path=inputModelName) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + input_shape = input_details[0]['shape'] + + + img = image.load_img(inputDataSet, target_size=(input_shape[1], input_shape[2])) + x = image.img_to_array(img) + x = np.expand_dims(x, axis=0) + interpreter.set_tensor(input_details[0]['index'], x.astype(np.float32)[0:1]) + interpreter.invoke() + output_data = interpreter.get_tensor(output_details[0]['index']) + predictions = np.squeeze(output_data) + prediction = np.argmax(predictions) + return(prediction) + +def runTimeTesting(inputModelName,inputDataSet): + objRunTimeUtility=aionRunTimeUtility() + return(objRunTimeUtility.executeOnRuntime(inputModelName,inputDataSet)) + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' import pandas +import numpy +import sys +import onnxruntime as rt + +def onnx_runtime_validation(modelfile,datafile): + dataframe = pandas.read_csv(datafile) + df = dataframe.head(8) + dataset = df.values + sess = rt.InferenceSession(modelfile) + input_name = sess.get_inputs()[0].name + label_name = sess.get_outputs()[0].name + inputsize=sess.get_inputs()[0].shape + XYZ = dataset[:,0:inputsize[1]].astype(float) + pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0] + print(""Prediction of AION generated/converted model on ONNX runtime for 8 sets of data"") + for i in range(0, 8): + output = numpy.squeeze(pred_onx[i]) + predictions = numpy.squeeze(output) + prediction = numpy.argmax(predictions) + df['predictions'] = predictions + result = df.to_json(orient=""records"") + return(result) +if __name__ == ""__main__"": + output = onnx_runtime_validation(sys.argv[1],sys.argv[2]) + print(""predictions:"",output) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import pandas as pd +from nltk.tokenize import word_tokenize + +# Private function +def unitvec(vec): + return vec / np.linalg.norm(vec) + +def __word_average(vectors, sent, vector_size,key_to_index): + """""" + Compute average word vector for a single doc/sentence. + """""" + try: + mean = [] + for word in sent: + index = key_to_index.get( word, None) + if index != None: + mean.append( vectors[index] ) + if len(mean): + return unitvec(np.array(mean).mean(axis=0)) + return np.zeros(vector_size) + except: + raise + +# Private function +def __word_average_list(vectors, docs, embed_size,key_to_index): + """""" + Compute average word vector for multiple docs, where docs had been tokenized. + """""" + try: + return np.vstack([__word_average(vectors, sent, embed_size,key_to_index) for sent in docs]) + except: + raise + +def load_pretrained(path): + df = pd.read_csv(path, index_col=0,sep=' ',quotechar = ' ' , header=None, skiprows=1,encoding_errors= 'replace') + return len(df.columns), df + +def get_model( df:pd.DataFrame): + index_to_key = {k:v for k,v in enumerate(df.index)} + key_to_index = {v:k for k,v in enumerate(df.index)} + df = df.to_numpy() + return df, index_to_key, key_to_index + +def extractFeatureUsingPreTrainedModel(inputCorpus, pretrainedModelPath=None, loaded_model=False,key_to_index={}, embed_size=300): + """""" + Extract feature vector from input Corpus using pretrained Vector model(word2vec,fasttext, glove(converted to word2vec format) + """""" + try: + if inputCorpus is None: + return None + else: + if not pretrainedModelPath and ((isinstance(loaded_model, pd.DataFrame) and loaded_model.empty) or (not isinstance(loaded_model, pd.DataFrame) and not loaded_model)): + inputCorpusWordVectors = None + else: + if (isinstance(loaded_model, pd.DataFrame) and not loaded_model.empty) or loaded_model: + pretrainedModel = loaded_model + else: + embed_size, pretrainedModel = load_pretrained(pretrainedModelPath) + pretrainedModel, index_to_key,key_to_index = get_model( pretrainedModel) + if len(pretrainedModel): + input_docs_tokens_list = [word_tokenize(inputDoc) for inputDoc in inputCorpus] + inputCorpusWordVectors = __word_average_list(pretrainedModel, input_docs_tokens_list,embed_size,key_to_index) + else: + inputCorpusWordVectors = None + return inputCorpusWordVectors + except: + raise + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__)))) +#from .eda import ExploreTextData ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import logging +import numpy as np +import sys +from pathlib import Path +import nltk +from nltk.tokenize import sent_tokenize +from nltk import pos_tag +from nltk import ngrams +from nltk.corpus import wordnet +from nltk import RegexpParser +from textblob import TextBlob +import spacy +from sklearn.feature_extraction.text import CountVectorizer +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.base import BaseEstimator, TransformerMixin +" +"import urllib.request +import zipfile +import os +from os.path import expanduser +import platform + +from text import TextCleaning as text_cleaner +from text.Embedding import extractFeatureUsingPreTrainedModel + +logEnabled = False +spacy_nlp = None + +def ExtractFeatureCountVectors(ngram_range=(1, 1), + max_df=1.0, + min_df=1, + max_features=None, + binary=False): + vectorizer = CountVectorizer(ngram_range = ngram_range, max_df = max_df, \\ + min_df = min_df, max_features = max_features, binary = binary) + return vectorizer + +def ExtractFeatureTfIdfVectors(ngram_range=(1, 1), + max_df=1.0, + min_df=1, + max_features=None, + binary=False, + norm='l2', + use_idf=True, + smooth_idf=True, + sublinear_tf=False): + vectorizer = TfidfVectorizer(ngram_range = ngram_range, max_df = max_df, \\ + min_df = min_df, max_features = max_features, \\ + binary = binary, norm = norm, use_idf = use_idf, \\ + smooth_idf = smooth_idf, sublinear_tf = sublinear_tf) + return vectorizer + +def GetPOSTags( inputText, getPOSTags_Lib='nltk'): + global spacy_nlp + tokens_postag_list = [] + + if (inputText == """"): + __Log(""debug"", ""{} function: Input text is not provided"".format(sys._getframe().f_code.co_name)) + else: + if getPOSTags_Lib == 'spacy': + if spacy_nlp == None: + spacy_nlp = spacy.load('en_core_web_sm') + doc = spacy_nlp(inputText) + for token in doc: + tokens_postag_list.append((token.text, token.tag_)) + elif getPOSTags_Lib == 'textblob': + doc = TextBlob(inputText) + tokens_postag_list = doc.tags + else: + tokensList = WordTokenize(inputText) + tokens_postag_list = pos_tag(tokensList) + + return tokens_postag_list + +def GetNGrams( inputText, ngramRange=(1,1)): + ngramslist = [] + for n in range(ngramRange[0],ngramRange[1]+1): + nwordgrams = ngrams(inputText.split(), n) + ngramslist.extend([' '.join(grams) for grams in nwordgrams]) + return ngramslist + +def NamedEntityRecognition( inputText): + global spacy_nlp + neResultList = [] + if (inputText == """"): + __Log(""debug"", ""{} function: Input text is not provided"".format(sys._getframe().f_code.co_name)) + else: + if spacy_nlp == None: + spacy_nlp = spacy.load('en_core_web_sm') + doc = spacy_nlp(inputText) + neResultList = [(X.text, X.label_) for X in doc.ents] + + return neResultList + +def KeywordsExtraction( inputText, ratio=0.2, words = None, scores=False, pos_filter=('NN', 'JJ'), lemmatize=False): + keywordsList = [] + if (inputText == """"): + __Log(""debug"", ""{} function: Input text is not provided"".format(sys._getframe().f_code.co_name)) + else: + keywordsList = keywords(inputText, ratio = ratio, words = words, split=True, scores=scores, + pos_filter=pos_filter, lemmatize=lemmatize) + return keywordsList + +def __get_nodes(parent): + nounList = [] + verbList = [] + for node in parent: + if type(node) is nltk.Tree: + if node.label() == ""NP"": + subList = [] + for item in node.leaves(): + subList.append(item[0]) + nounList.append(("" "".join(subList))) + elif node.label() == ""VP"": + subList = [] + for item in node.leaves(): + subList.append(item[0]) + verbList.append(("" "".join(subList))) + #verbList.append(node.leaves()[0][0]) + __get_nodes(node) + result = {'NP': nounList, 'VP': verbList} + return result + +def ShallowParsing( inputText, lib='spacy'): + tags = GetPOSTags(inputText, getPOSTags_Lib=lib) + + chunk_regex = r"""""" + NBAR: + {
?*+} # Nouns and Adjectives, terminated with Nouns + VBAR: + {**?*+?} # Verbs and Verb Phrases + + NP: + {} + {} # Above, connected with in/of/etc... + VP: + {} + {} # Above, connected with in/of/etc... + """""" + rp = RegexpParser(chunk_regex) + t = rp.parse(tags) + return __get_nodes(t) + +def SyntacticAndEntityParsing(inputCorpus, + featuresList=['POSTags','NGrams','NamedEntityRecognition','KeywordsExtraction','ShallowParsing'], + posTagsLib='nltk', + ngramRange=(1,1), + ke_ratio=0.2, + ke_words = None, + ke_scores=False, + ke_pos_filter=('NN', 'JJ'), + ke_lemmatize=False): + columnsList = ['Input'] + columnsList.extend(featuresList) + df = pd.DataFrame(columns=columnsList) + df['Input'] = inputCorpus + for feature in featuresList: + if feature == 'POSTags': + df[feature] = inputCorpus.apply(lambda x: GetPOSTags(x, posTagsLib)) + if feature == 'NGrams': + df[feature] = inputCorpus.apply(lambda x: GetNGrams(x, ngramRange)) + if feature == 'NamedEntityRecognition': + df[feature] = inputCorpus.apply(lambda x: NamedEntityRecognition(x)) + if feature == 'KeywordsExtraction': + df[feature] = inputCorpus.apply(lambda x: KeywordsExtraction(x, + ratio=ke_ratio, words=ke_words, + scores=ke_scores, pos_filter=ke_pos_filter, + lemmatize=ke_lemmatize)) + if feature == 'ShallowParsing': + df[feature] = inputCorpus.apply(lambda x: ShallowParsing(x, lib=posTagsLib)) + return df + +def __Log( logType=""info"", text=None): + if logType.lower() == ""exception"": + logging.exception( text) + elif logEnabled: + if logType.lower() == ""info"": + logging.info( text) + elif logType.lower() == ""debug"": + logging.debug( text) + +def SentenceTokenize( inputText): + return text_cleaner.WordTokenize(inputText) + +def WordTokenize( inputText, tokenizationLib = 'nltk'): + return text_cleaner.WordTokenize(inputText, tokenizationLib) + +def Lemmatize( inputTokensList, lemmatizationLib = 'nltk'): + return text_cleaner.Lemmatize(inputTokensList, lemmatizationLib) + +def Stemmize( inputTokensList): + return text_cleaner.Stemmize(inputTokensList) + +def ToLowercase( inputText): + resultText = """" + if inputText is not None and inputText != """": + resultText = inputText.lower() + return resultText + +def ToUppercase( inputText): + resultText = """" + if inputText is not None and inputText != '': + resultText = inputText.upper() + return resultText + +def RemoveNoise( + inputText, + removeNoise_fHtmlDecode = True, + removeNoise_fRemoveHyperLinks = True, + removeNoise_fRemoveMentions = True, + removeNoise_fRemoveHashtags = True, + removeNoise_RemoveOrReplaceEmoji = 'remove', + removeNoise_fUnicodeToAscii = True, + removeNoise_fRemoveNonAscii = True): + return text_cleaner.RemoveNoise(inputText, removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions, + removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii) + +def RemoveStopwords( inputTokensList, stopwordsRemovalLib='nltk', stopwordsList = None, extend_or_replace='extend'): + return text_cleaner.RemoveStopwords(inputTokensList, stopwordsRemovalLib, stopwordsList, extend_or_replace) + +def RemoveNumericTokens( inputText, removeNumeric_fIncludeSpecialCharacters=True): + return text_cleaner.RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters) + +def RemovePunctuation( inputText, fRemovePuncWithinTokens=False): + return text_cleaner.RemovePunctuation(inputText, fRemovePuncWithinTokens) + +def CorrectSpelling( inputTokensList): + return text_cleaner.CorrectSpelling(inputTokensList) + +def ReplaceAcronym( inputTokensList, acrDict=None): + return text_cleaner.ReplaceAcronym(inputTokensList, acrDict) + +def ExpandContractions( inputText, expandContractions_googleNewsWordVectorPath=None): + return text_cleaner.ExpandContractions(inputText, expandContractions_googleNewsWordVectorPath) + +def get_pretrained_model_path(): + try: + from appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + except: + modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath + +def checkAndDownloadPretrainedModel(preTrainedModel, embedding_size=300): + + models = {'glove':{50:'glove.6B.50d.w2vformat.txt',100:'glove.6B.100d.w2vformat.txt',200:'glove.6B.200d.w2vformat.txt',300:'glove.6B.300d.w2vformat.txt'}, 'fasttext':{300:'wiki-news-300d-1M.vec'}} + supported_models = [x for y in models.values() for x in y.values()] + embedding_sizes = {x:y.keys() for x,y in models.items()} + preTrainedModel = preTrainedModel.lower() + if preTrainedModel not in models.keys(): + raise ValueError(f'model not supported: {preTrainedModel}') + if embedding_size not in embedding_sizes[preTrainedModel]: + raise ValueError(f""Embedding size '{embedding_size}' not supported for {preTrainedModel}"") + selected_model = models[preTrainedModel][embedding_size] + modelsPath = get_pretrained_model_path() + p = modelsPath.glob('**/*') + modelsDownloaded = [x.name for x in p if x.name in supported_models] + if selected_model not in modelsDownloaded: + if preTrainedModel == ""glove"": + try: + local_file_path = modelsPath/f""glove.6B.{embedding_size}d.w2vformat.txt"" + file_test, header_test = urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.{embedding_size}d.w2vformat.txt', local_file_path) + except Exception as e: + raise ValueError(""Error: unable to download glove pretrained model, please try again or download it manually and placed it at {}. "".format(modelsPath)+str(e)) + + elif preTrainedModel == ""fasttext"": + try: + local_file_path = modelsPath/""wiki-news-300d-1M.vec.zip"" + url = 'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/wiki-news-300d-1M.vec.zip' + file_test, header_test = urllib.request.urlretrieve(url, local_file_path) + with zipfile.ZipFile(local_file_path) as zip_ref: + zip_ref.extractall(modelsPath) + Path(local_file_path).unlink() + except Exception as e: + raise ValueError(""Error: unable to download fastText pretrained model, please try again or download it manually and placed it at {}. "".format(location)+str(e)) + return modelsPath/selected_model + +def load_pretrained(path): + embeddings = {} + word = '' + with open(path, 'r', encoding=""utf8"") as f: + header = f.readline() + header = header.split(' ') + vocab_size = int(header[0]) + embed_size = int(header[1]) + for i in range(vocab_size): + data = f.readline().strip().split(' ') + word = data[0] + embeddings[word] = [float(x) for x in data[1:]] + return embeddings + +class TextProcessing(BaseEstimator, TransformerMixin): + + def __init__(self, + functionSequence = ['RemoveNoise','ExpandContractions','Normalize','ReplaceAcronym', + 'CorrectSpelling','RemoveStopwords','RemovePunctuation','RemoveNumericTokens'], + fRemoveNoise = True, + fExpandContractions = False, + fNormalize = True, + fReplaceAcronym = False, + fCorrectSpelling = False, + fRemoveStopwords = True, + fRemovePunctuation = True, + fRemoveNumericTokens = True, + removeNoise_fHtmlDecode = True, + removeNoise_fRemoveHyperLinks = True, + removeNoise_fRemoveMentions = True, + removeNoise_fRemoveHashtags = True, + removeNoise_RemoveOrReplaceEmoji = 'remove', + removeNoise_fUnicodeToAscii = True, + removeNoise_fRemoveNonAscii = True, + tokenizationLib='nltk', + normalizationMethod = 'Lemmatization', + lemmatizationLib = 'nltk', + acronymDict = None, + stopwordsRemovalLib = 'nltk', + stopwordsList = None, + extend_or_replace_" +"stopwordslist = 'extend', + removeNumeric_fIncludeSpecialCharacters = True, + fRemovePuncWithinTokens = False, + data_path = None +): + global logEnabled + #logEnabled = EnableLogging + self.functionSequence = functionSequence + self.fRemoveNoise = fRemoveNoise + self.fExpandContractions = fExpandContractions + self.fNormalize = fNormalize + self.fReplaceAcronym = fReplaceAcronym + self.fCorrectSpelling = fCorrectSpelling + self.fRemoveStopwords = fRemoveStopwords + self.fRemovePunctuation = fRemovePunctuation + self.fRemoveNumericTokens = fRemoveNumericTokens + self.removeNoise_fHtmlDecode = removeNoise_fHtmlDecode + self.removeNoise_fRemoveHyperLinks = removeNoise_fRemoveHyperLinks + self.removeNoise_fRemoveMentions = removeNoise_fRemoveMentions + self.removeNoise_fRemoveHashtags = removeNoise_fRemoveHashtags + self.removeNoise_RemoveOrReplaceEmoji = removeNoise_RemoveOrReplaceEmoji + self.removeNoise_fUnicodeToAscii = removeNoise_fUnicodeToAscii + self.removeNoise_fRemoveNonAscii = removeNoise_fRemoveNonAscii + self.tokenizationLib = tokenizationLib + self.normalizationMethod = normalizationMethod + self.lemmatizationLib = lemmatizationLib + self.acronymDict = acronymDict + self.stopwordsRemovalLib = stopwordsRemovalLib + self.stopwordsList = stopwordsList + self.extend_or_replace_stopwordslist = extend_or_replace_stopwordslist + self.removeNumeric_fIncludeSpecialCharacters = removeNumeric_fIncludeSpecialCharacters + self.fRemovePuncWithinTokens = fRemovePuncWithinTokens + self.data_path = data_path + self.fit_and_transformed_ = False + + def fit(self, x, y=None): + return self + + def transform(self, x): + x = map(lambda inputText: text_cleaner.cleanText(inputText, functionSequence = self.functionSequence, fRemoveNoise = self.fRemoveNoise, fExpandContractions = self.fExpandContractions, fNormalize = self.fNormalize, fReplaceAcronym = self.fReplaceAcronym, fCorrectSpelling = self.fCorrectSpelling, fRemoveStopwords = self.fRemoveStopwords, fRemovePunctuation = self.fRemovePunctuation, fRemoveNumericTokens = self.fRemoveNumericTokens, removeNoise_fHtmlDecode = self.removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks = self.removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions = self.removeNoise_fRemoveMentions , removeNoise_fRemoveHashtags = self.removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji = self.removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii = self.removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii = self.removeNoise_fRemoveNonAscii, tokenizationLib = self.tokenizationLib, normalizationMethod = self.normalizationMethod, lemmatizationLib = self.lemmatizationLib, acronymDict = self.acronymDict, stopwordsRemovalLib = self.stopwordsRemovalLib, stopwordsList = self.stopwordsList, extend_or_replace_stopwordslist = self.extend_or_replace_stopwordslist, removeNumeric_fIncludeSpecialCharacters = self.removeNumeric_fIncludeSpecialCharacters, fRemovePuncWithinTokens = self.fRemovePuncWithinTokens), x) + x = pd.Series(list(x)) + if hasattr(self, 'fit_and_transformed_') and not self.fit_and_transformed_: + self.fit_and_transformed_ = True + if self.data_path and Path(self.data_path).exists(): + x.to_csv(Path(self.data_path)/'text_cleaned.csv', index=False) + return x + + def get_feature_names_out(self): + return ['tokenize'] + +class wordEmbedding(BaseEstimator, TransformerMixin): + + def __init__(self, preTrainedModel, embeddingSize=300,external_model=None,external_model_type='binary'): + self.number_of_features = 0 + self.embeddingSize = embeddingSize + self.preTrainedModel = preTrainedModel.lower() + self.external_model=external_model + self.external_model_type = external_model_type + if self.preTrainedModel == ""glove"": + self.preTrainedModelpath = f'glove.6B.{self.embeddingSize}d.w2vformat.txt' + self.binary = False + elif self.preTrainedModel == ""fasttext"": + self.preTrainedModelpath = 'wiki-news-300d-1M.vec' + self.binary = False + else: + raise ValueError(f'Model ({self.preTrainedModel}) not supported') + + def fit(self, x, y=None): + return self + + def transform(self, x): + if ((isinstance(self.external_model, pd.DataFrame) and not self.external_model.empty) or (not isinstance(self.external_model, pd.DataFrame) and self.external_model)): + if self.preTrainedModel == ""fasttext"" and self.external_model_type == 'binary': + print('Transforming using external binary') + extracted = np.vstack([self.external_model.get_sentence_vector( sentense) for sentense in x]) + else: + print('Transforming using external vector') + extracted = extractFeatureUsingPreTrainedModel(x, pretrainedModelPath=None, loaded_model=self.external_model, embed_size=300) + else: + print('Transforming using Vector') + models_path = checkAndDownloadPretrainedModel(self.preTrainedModel, self.embeddingSize) + extracted = extractFeatureUsingPreTrainedModel(x, models_path) + + self.number_of_features = extracted.shape[1] + return extracted + + def get_feature_names_out(self): + return [str(x) for x in range(self.number_of_features)] + + def get_feature_names(self): + return self.get_feature_names_out() + +def getProcessedPOSTaggedData(pos_tagged_data): + def get_wordnet_post(tag): + if tag.startswith('V'): + return wordnet.VERB + elif tag.startswith('J'): + return wordnet.ADJ + elif tag.startswith('R'): + return wordnet.ADV + else: + return wordnet.NOUN + + def process_pos_tagged_data(text): + processed_text = [f""{t[0]}_{get_wordnet_post(t[1])}"" for t in text] + processed_text = "" "".join(processed_text) + return processed_text + + processed_pos_tagged_data = pos_tagged_data.apply(process_pos_tagged_data) + return processed_pos_tagged_data + + +class PosTagging(BaseEstimator, TransformerMixin): + + def __init__(self, posTagsLib, data_path): + self.posTagsLib = posTagsLib + self.fit_and_transformed_ = False + self.data_path = data_path + + def fit(self, x, y=None): + return self + + def transform(self, x): + parsing_output = SyntacticAndEntityParsing(x, featuresList=['POSTags'], posTagsLib=self.posTagsLib) + output = getProcessedPOSTaggedData(parsing_output['POSTags']) + if not self.fit_and_transformed_: + self.fit_and_transformed_ = True + if self.data_path and Path(self.data_path).exists(): + output.to_csv(Path(self.data_path)/'pos_tagged.csv', index=False) + return output + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import re +import string +import sys +import demoji +#demoji.download_codes() +import nltk +import spacy +from nltk.corpus import stopwords +from bs4 import BeautifulSoup +from text_unidecode import unidecode +from textblob import TextBlob +from spellchecker import SpellChecker +from nltk import pos_tag +from nltk.tokenize import word_tokenize +from nltk.corpus import wordnet +from nltk.stem.wordnet import WordNetLemmatizer +from nltk.stem.porter import PorterStemmer +from spacy.lang.en import English +from collections import defaultdict +import contractions + + +spacy_nlp = None + +def WordTokenize( inputText, tokenizationLib = 'nltk'): + tokenList = [] + if inputText is not None and inputText != """": + tokenizationLib = tokenizationLib.lower() + if tokenizationLib == 'nltk': + tokenList = word_tokenize(inputText) + elif tokenizationLib == 'textblob': + tbObj = TextBlob(inputText) + tokenList = tbObj.words + elif tokenizationLib == 'spacy': + nlp = English() + nlpDoc = nlp(inputText) + for token in nlpDoc: + tokenList.append(token.text) + elif tokenizationLib == 'keras': + from tensorflow.keras.preprocessing.text import text_to_word_sequence + tokenList = text_to_word_sequence(inputText) + else: + tokenList = word_tokenize(inputText) + + return tokenList + +def SentenceTokenize( inputText): + sentenceList = [] + if inputText is not None and inputText != """": + sentenceList = sent_tokenize(inputText) + return sentenceList + +def Lemmatize(inputTokensList, lemmatizationLib = 'nltk'): + global spacy_nlp + lemmatized_list= [] + lemmatizationLib = lemmatizationLib.lower() + if (inputTokensList is not None) and (len(inputTokensList)!=0): + if (lemmatizationLib == 'textblob'): + inputText = "" "".join(inputTokensList) + sent = TextBlob(inputText) + tag_dict = {""J"": 'a', + ""N"": 'n', + ""V"": 'v', + ""R"": 'r'} + words_and_tags = [(w, tag_dict.get(pos[0], 'n')) for w, pos in sent.tags] + lemmatized_list = [wd.lemmatize(tag) for wd, tag in words_and_tags] + if (lemmatizationLib == 'spacy'): + inputText = "" "".join(inputTokensList) + if spacy_nlp == None: + spacy_nlp = spacy.load('en_core_web_sm') + doc = spacy_nlp(inputText) + + for token in doc: + if token.text != token.lemma_: + if token.lemma_ != ""-PRON-"": + lemmatized_list.append(token.lemma_) + else: + lemmatized_list.append(token.text) + else: + lemmatized_list.append(token.text) + else: + tag_map = defaultdict(lambda : wordnet.NOUN) + tag_map['J'] = wordnet.ADJ + tag_map['V'] = wordnet.VERB + tag_map['R'] = wordnet.ADV + + wnLemmatizer = WordNetLemmatizer() + token_tags = pos_tag(inputTokensList) + lemmatized_list = [wnLemmatizer.lemmatize(token, tag_map[tag[0]]) for token, tag in token_tags] + + return lemmatized_list + +def Stemmize(inputTokensList): + stemmedTokensList= [] + + if (inputTokensList is not None) and (len(inputTokensList)!=0): + porterStemmer = PorterStemmer() + stemmedTokensList = [porterStemmer.stem(token) for token in inputTokensList] + + return stemmedTokensList + +def ToLowercase(inputText): + resultText = """" + if inputText is not None and inputText != """": + resultText = inputText.lower() + + return resultText + +def ToUppercase(inputText): + resultText = """" + if inputText is not None and inputText != '': + resultText = inputText.upper() + + return resultText + +def RemoveNoise(inputText, + removeNoise_fHtmlDecode = True, + removeNoise_fRemoveHyperLinks = True, + removeNoise_fRemoveMentions = True, + removeNoise_fRemoveHashtags = True, + removeNoise_RemoveOrReplaceEmoji = 'remove', + removeNoise_fUnicodeToAscii = True, + removeNoise_fRemoveNonAscii = True): + if inputText is not None and inputText != """": + if removeNoise_fHtmlDecode == True: + inputText = BeautifulSoup(inputText, ""html.parser"").text + if removeNoise_fRemoveHyperLinks == True: + inputText = re.sub(r'https?:\\/\\/\\S*', '', inputText, flags=re.MULTILINE) + if removeNoise_fRemoveMentions == True: + inputText = re.sub('[@]+\\S+','', inputText) + if removeNoise_fRemoveHashtags == True: + inputText = re.sub('[#]+\\S+','', inputText) + if removeNoise_RemoveOrReplaceEmoji == 'remove': + inputText = demoji.replace(inputText, """") + elif removeNoise_RemoveOrReplaceEmoji == 'replace': + inputText = demoji.replace_with_desc(inputText, "" "") + if removeNoise_fUnicodeToAscii == True: + inputText = unidecode(inputText) + if removeNoise_fRemoveNonAscii == True: + inputText= re.sub(r'[^\\x00-\\x7F]+',' ', inputText) + + inputText = re.sub(r'\\s+', ' ', inputText) + inputText = inputText.strip() + + return inputText + +def RemoveStopwords(inputTokensList, stopwordsRemovalLib='nltk', stopwordsList = None, extend_or_replace='extend'): + resultTokensList = [] + if (inputTokensList is not None) and (len(inputTokensList)!=0): + stopwordsRemovalLib= stopwordsRemovalLib.lower() + + if stopwordsRemovalLib == 'spacy': + nlp = English() + stopwordRemovalList = nlp.Defaults.stop_words + else: + stopwordRemovalList = set(stopwords.words('english')) + + if extend_or_replace == 'replace': + if stopwordsList is not None: + stopwordRemovalList = set(stopwordsList)" +" + else: + if stopwordsList: + stopwordRemovalList = stopwordRemovalList.union(set(stopwordsList)) + + resultTokensList = [word for word in inputTokensList if word not in stopwordRemovalList] + + return resultTokensList + +def RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters=True): + resultText = """" + if inputText is not None and inputText != """": + if removeNumeric_fIncludeSpecialCharacters == True: + #Remove tokens having numbers and punctuations + resultText = re.sub(r'\\b\\d+[^a-zA-Z]*\\d*\\b',' ', inputText) + else: + #Remove only numeric tokens + resultText = re.sub(r'\\b\\d+\\b','', inputText) + + # convert consecutive whitespaces to single space in the results + resultText = re.sub(r'\\s+', ' ', resultText) + + return resultText + +def RemovePunctuation(inputText, fRemovePuncWithinTokens=False): + resultText = """" + if inputText is not None and len(inputText) != 0: + if fRemovePuncWithinTokens == True: + resultText = inputText.translate(str.maketrans("""","""", string.punctuation)) + else: + punctuationList = list(string.punctuation) + tokensList = WordTokenize(inputText) + resultTokensList = [word for word in tokensList if word not in punctuationList] + resultText = "" "".join(resultTokensList) + + resultText = re.sub(r'\\s+', ' ', resultText) + return resultText + + +def CorrectSpelling(inputTokensList): + correctedTokensList = [] + + if (inputTokensList is not None) and (len(inputTokensList)!=0): + spell = SpellChecker() + for word in inputTokensList: + word = word.lower() + if word not in spell: + word = spell.correction(word) + if word: + correctedTokensList.append(word) + return correctedTokensList + +def ReplaceAcronym(inputTokensList, acrDict=None): + resultTokensList = [] + + if (inputTokensList is not None) and (len(inputTokensList)!=0): + if ((acrDict is not None) and (len(acrDict) != 0)): + acrDictLowercase = dict((key.lower(), value.lower()) for key, value in acrDict.items()) + resultTokensList = [acrDictLowercase.get(token.lower(), token.lower()) for token in inputTokensList] + else: + resultTokensList = inputTokensList + + return resultTokensList + +def ExpandContractions(inputText): + resultText = """" + if inputText != '': + resultText = contractions.fix(inputText) + return resultText + +def cleanText( inputText, + functionSequence = ['RemoveNoise','ExpandContractions','Normalize','ReplaceAcronym', + 'CorrectSpelling','RemoveStopwords','RemovePunctuation','RemoveNumericTokens'], + fRemoveNoise = True, + fExpandContractions = False, + fNormalize = True, + fReplaceAcronym = False, + fCorrectSpelling = False, + fRemoveStopwords = True, + fRemovePunctuation = True, + fRemoveNumericTokens = True, + removeNoise_fHtmlDecode = True, + removeNoise_fRemoveHyperLinks = True, + removeNoise_fRemoveMentions = True, + removeNoise_fRemoveHashtags = True, + removeNoise_RemoveOrReplaceEmoji = 'remove', + removeNoise_fUnicodeToAscii = True, + removeNoise_fRemoveNonAscii = True, + tokenizationLib='nltk', + normalizationMethod = 'Lemmatization', + lemmatizationLib = 'nltk', + acronymDict = None, + stopwordsRemovalLib = 'nltk', + stopwordsList = None, + extend_or_replace_stopwordslist = 'extend', + removeNumeric_fIncludeSpecialCharacters = True, + fRemovePuncWithinTokens = False + ): + if inputText is not None and inputText != """": + for function in functionSequence: + if function == 'RemoveNoise': + if (fRemoveNoise == True): + inputText = RemoveNoise(inputText, + removeNoise_fHtmlDecode, + removeNoise_fRemoveHyperLinks, + removeNoise_fRemoveMentions, + removeNoise_fRemoveHashtags, + removeNoise_RemoveOrReplaceEmoji, + removeNoise_fUnicodeToAscii, + removeNoise_fRemoveNonAscii) + if function == 'ExpandContractions': + if (fExpandContractions == True): + inputText = ExpandContractions(inputText) + if function == 'Normalize': + if (fNormalize == True): + inputTokens = WordTokenize(inputText, tokenizationLib) + if (normalizationMethod == 'Stemming'): + inputTokens = Stemmize(inputTokens) + else: + inputTokens = Lemmatize(inputTokens, lemmatizationLib) + inputText = "" "".join(inputTokens) + if function == 'ReplaceAcronym': + if fReplaceAcronym == True and (acronymDict is not None) and acronymDict != 'None': + inputText = ToLowercase(inputText) + inputTokens = WordTokenize(inputText, tokenizationLib) + inputTokens= ReplaceAcronym(inputTokens, acronymDict) + inputText = "" "".join(inputTokens) + if function == 'CorrectSpelling': + if (fCorrectSpelling == True): + try: + inputTokens = WordTokenize(inputText, tokenizationLib) + inputTokens = CorrectSpelling(inputTokens) + inputText = "" "".join(inputTokens) + except Exception as e: + print(e) + pass + if function == 'RemoveStopwords': + if (fRemoveStopwords == True): + inputText = ToLowercase(inputText) + inputTokens = WordTokenize(inputText, tokenizationLib) + inputTokens = RemoveStopwords(inputTokens, stopwordsRemovalLib, stopwordsList, extend_or_replace_stopwordslist) + inputText = "" "".join(inputTokens) + if function == 'RemovePunctuation': + if (fRemovePunctuation == True): + inputText = RemovePunctuation(inputText, fRemovePuncWithinTokens) + if function == 'RemoveNumericTokens': + if (fRemoveNumericTokens == True): + inputText = RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters) + inputText = ToLowercase(inputText) + + return inputText + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import openai +import tiktoken +import numpy as np +import pandas as pd +from pathlib import Path +from openai.embeddings_utils import get_embedding +from sklearn.base import BaseEstimator, TransformerMixin + +class embedding(BaseEstimator, TransformerMixin): + + def __init__(self, embedding_engine='Text-Embedding', embedding_ctx_size=8191, encoding_method='cl100k_base'): + self.embedding_engine = embedding_engine + self.embedding_ctx_size = embedding_ctx_size + self.encoding_method = encoding_method + self.number_of_features = 1536 + + def fit(self,X,y=None): + return self + + def transform(self, X): + setup_openai() + + X = map(lambda text: self.len_safe_get_embedding( text), X) + return list(X) + + def split_large_text(self, large_text): + encoding = tiktoken.get_encoding( self.encoding_method) + tokenized_text = encoding.encode(large_text) + + chunks = [] + current_chunk = [] + current_length = 0 + + for token in tokenized_text: + current_chunk.append(token) + current_length += 1 + + if current_length >= self.embedding_ctx_size: + chunks.append(encoding.decode(current_chunk).rstrip(' .,;')) + current_chunk = [] + current_length = 0 + + if current_chunk: + chunks.append(encoding.decode(current_chunk).rstrip(' .,;')) + + return chunks + + def len_safe_get_embedding(self, text): + chunk_embeddings = [] + chunk_lens = [] + for chunk in self.split_large_text(text): + chunk_embeddings.append( get_embedding(chunk, engine=self.embedding_engine)) + chunk_lens.append(len(chunk)) + + chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=None) + chunk_embeddings = chunk_embeddings / np.linalg.norm(chunk_embeddings) # normalizes length to 1 + chunk_embeddings = chunk_embeddings.tolist() + return chunk_embeddings + + def get_feature_names_out(self): + return [str(x) for x in range(self.number_of_features)] + + def get_feature_names(self): + return self.get_feature_names_out() + +"""""" +Open AI initialization has to be done separately as follows: + 1. During training read the parameters from user + a. from config + b. SQLite database + c. From Json file +"""""" +class setup_openai(): + + def __init__( self, config=None): + param_keys = ['api_type','api_key','api_base','api_version'] + if isinstance(config, dict): + valid_params = {x:y for x,y in config.items() if x in param_keys} + self._update_params(valid_params) + elif self._is_sqlite(): + self._update_params( self._get_cred_from_sqlite()) + elif ((Path(__file__).parent.parent/'etc')/'openai.json').exists(): + with open(((Path(__file__).parent.parent/'etc')/'openai.json'), 'r') as f: + import json + params = json.load(f) + valid_params = {x:y for x,y in params.items() if x in param_keys} + self._update_params(valid_params) + else: + raise ValueError('Open AI credentials are not provided.') + + def _is_sqlite(self): + try: + from AION.appbe.sqliteUtility import sqlite_db + from AION.appbe.dataPath import DATA_DIR + db_dir = Path(DATA_DIR)/'sqlite' + db_file = 'config.db' + if (db_dir/db_file).exists(): + sqlite_obj = sqlite_db(db_dir,db_file) + if sqlite_obj.table_exists('openai'): + return True + return False + except: + return False + + def _get_cred_from_sqlite(self): + from AION.appbe.sqliteUtility import sqlite_db + from AION.appbe.dataPath import DATA_DIR + db_dir = Path(DATA_DIR)/'sqlite' + db_file = 'config.db' + sqlite_obj = sqlite_db(db_dir,db_file) + data = sqlite_obj.read_data('openai')[0] + param_keys = ['api_type','api_key','api_base','api_version'] + return dict((x,y) for x,y in zip(param_keys,data)) + + def _update_params(self, valid_params): + for key, value in valid_params.items(): + if key == 'api_type': + openai.api_type = value + elif key == 'api_key': + openai.api_key = value + elif key == 'api_base': + openai.api_base = value + elif key == 'api_version': + openai.api_version = value + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import logging +from distutils.util import strtobool +import numpy as np +import pandas as pd +from text import TextProcessing +from sklearn.preprocessing import FunctionTransformer +from sklearn.base import BaseEstimator, TransformerMixin +from pathlib import Path + +external_model = None +external_model_type = None + +def get_one_true_option(d, default_value): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + +class textProfiler(): + + def __init__(self): + self.log = logging.getLogger('eion') + self.embedder = None + self.bert_embedder_size = 0 + + def textCleaning(self, textCorpus): + textProcessor = TextProcessing.TextProcessing() + textCorpus = textProcessor.transform(textCorpus) + return(textCorpus) + + def sentense_encode(self, item): + return self.model.encode(item,show_progress_bar=False) + + def get_embedding_size(self, model, config): + if model in config.keys(): + config = config[model] + else: + config = {} + model = model.lower() + if model == 'glove': + size_map = {'default': 100, '50d': 50, '100d':100, '200d': 200, '300d':300} + size_enabled = get_one_true_option(config, 'default') + return size_map[size_enabled] + elif model == 'fasttext': + size_map = {'default': 300} + size_enabled = get_one_true_option(config, 'default') + return size_map[size_enabled] + elif model == 'latentsemanticanalysis': + size_map = {'default': 100, '50d': 50" +", '100d':100, '200d': 200, '300d':300,'500d':500,'700d':700,'1000d':1000} + size_enabled = get_one_true_option(config, 'default') + return size_map[size_enabled] + elif model in ['tf_idf', 'countvectors']: + return int(config.get('maxFeatures', 2000)) + else: # for word2vec + return 300 + + + def cleaner(self, conf_json, pipeList, data_path=None): + cleaning_kwargs = {} + textCleaning = conf_json.get('textCleaning') + self.log.info(""Text Preprocessing config: "",textCleaning) + cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True')) + cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True')) + cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False')) + cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False')) + cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True')) + cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True')) + cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True')) + cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'), + 'lemmatization').capitalize() + + removeNoiseConfig = textCleaning.get('removeNoiseConfig') + if type(removeNoiseConfig) is dict: + cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True')) + cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True')) + cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True')) + cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True')) + cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace' + cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True')) + cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True')) + + acronymConfig = textCleaning.get('acronymConfig') + if type(acronymConfig) is dict: + cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None) + + stopWordsConfig = textCleaning.get('stopWordsConfig') + if type(stopWordsConfig) is dict: + cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', '[]') + if isinstance(cleaning_kwargs['stopwordsList'], str): + if cleaning_kwargs['stopwordsList'] != '[]': + cleaning_kwargs['stopwordsList'] = cleaning_kwargs['stopwordsList'][1:-1].split(',') + else: + cleaning_kwargs['stopwordsList'] = [] + cleaning_kwargs['extend_or_replace_stopwordslist'] = 'replace' if strtobool(stopWordsConfig.get('replace', 'True')) else 'extend' + removeNumericConfig = textCleaning.get('removeNumericConfig') + if type(removeNumericConfig) is dict: + cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True')) + + removePunctuationConfig = textCleaning.get('removePunctuationConfig') + if type(removePunctuationConfig) is dict: + cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False')) + + cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False')) + + libConfig = textCleaning.get('libConfig') + if type(libConfig) is dict: + cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk') + cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk') + cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk') + if data_path: + cleaning_kwargs['data_path'] = data_path + textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs) + pipeList.append((""TextProcessing"",textProcessor)) + + textFeatureExtraction = conf_json.get('textFeatureExtraction') + if strtobool(textFeatureExtraction.get('pos_tags', 'False')): + pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk') + posTagger = TextProcessing.PosTagging( pos_tags_lib, data_path) + pipeList.append((""posTagger"",posTagger)) + return pipeList + + def embedding(self, conf_json, pipeList): + ngram_min = 1 + ngram_max = 1 + textFeatureExtraction = conf_json.get('textFeatureExtraction') + if strtobool(textFeatureExtraction.get('n_grams', 'False')): + n_grams_config = textFeatureExtraction.get(""n_grams_config"") + ngram_min = int(n_grams_config.get('min_n', 1)) + ngram_max = int(n_grams_config.get('max_n', 1)) + if (ngram_min < 1) or ngram_min > ngram_max: + ngram_min = 1 + ngram_max = 1 + invalidNgramWarning = 'WARNING : invalid ngram config.\\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max) + self.log.info(invalidNgramWarning) + ngram_range_tuple = (ngram_min, ngram_max) + textConversionMethod = conf_json.get('textConversionMethod') + conversion_method = get_one_true_option(textConversionMethod, None) + embedding_size_config = conf_json.get('embeddingSize', {}) + embedding_size = self.get_embedding_size(conversion_method, embedding_size_config) + if conversion_method.lower() == ""countvectors"": + vectorizer = TextProcessing.ExtractFeatureCountVectors( ngram_range=ngram_range_tuple,max_features=embedding_size) + pipeList.append((""vectorizer"",vectorizer)) + self.log.info('----------> Conversion Method: CountVectors') + elif conversion_method.lower() in [""fasttext"",""glove""]: + embedding_method = conversion_method + wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method, embedding_size) + pipeList.append((""vectorizer"",wordEmbeddingVecotrizer)) + self.log.info('----------> Conversion Method: '+str(conversion_method)) + elif conversion_method.lower() == ""openai"": + from text.openai_embedding import embedding as openai_embedder + vectorizer = openai_embedder() + pipeList.append((""vectorizer"",vectorizer)) + self.log.info('----------> Conversion Method: '+str(conversion_method)) + elif conversion_method.lower() == ""sentencetransformer_distilroberta"": + from sentence_transformers import SentenceTransformer + embedding_pretrained = {'model':'sentence-transformers/msmarco-distilroberta-base-v2','size': 768} + self.bert_embedder_size = embedding_pretrained['size'] + self.model = SentenceTransformer(embedding_pretrained['model']) + self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output) + pipeList.append((""vectorizer"",self.embedder)) + self.log.info('----------> Conversion Method: SentenceTransformer using msmarco_distilroberta') + + elif conversion_method.lower() == ""sentencetransformer_minilm"": + from sentence_transformers import SentenceTransformer + embedding_pretrained = {'model':'sentence-transformers/all-MiniLM-L6-v2','size': 384} + self.bert_embedder_size = embedding_pretrained['size'] + self.model = SentenceTransformer(embedding_pretrained['model']) + self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output) + pipeList.append((""vectorizer"",self.embedder)) + self.log.info('----------> Conversion Method: SentenceTransformer using MiniLM-L6-v2') + + elif conversion_method.lower() == ""sentencetransformer_mpnet"": + from sentence_transformers import SentenceTransformer + embedding_pretrained = {'model':'sentence-transformers/all-mpnet-base-v2','size': 768} + self.bert_embedder_size = embedding_pretrained['size'] + self.model = SentenceTransformer(embedding_pretrained['model']) + self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output) + pipeList.append((""vectorizer"",self.embedder)) + self.log.info('----------> Conversion Method: SentenceTransformer using mpnet-base-v2') + + elif conversion_method.lower() == 'latentsemanticanalysis': + vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(ngram_range=ngram_range_tuple) + pipeList.append((""vectorizer"",vectorizer)) + self.log.info('----------> Conversion Method: latentsemanticanalysis') + elif conversion_method.lower() == 'tf_idf': + vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(ngram_range=ngram_range_tuple,max_features=embedding_size) + pipeList.append((""vectorizer"",vectorizer)) + self.log.info('----------> Conversion Method: TF_IDF') + else: + df1 = pd.DataFrame() + #df1['tokenize'] = textCorpus + self.log.info('----------> Conversion Method: '+str(conversion_method)) + return pipeList + + def sentence_transformer_output(self, transformer, names=None): + return [str(x) for x in range(self.bert_embedder_size)] + + +class textCombine(TransformerMixin): + def __init__(self): + pass + def fit(self, X, y=None): + return self + def transform(self, X): + if X.shape[1] > 1: + return np.array(["" "".join(i) for i in X]) + else: + if isinstance(X, np.ndarray): + return np.ndarray.flatten(X) + else: + return X + +def get_pretrained_model_path(): + try: + from appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + except: + modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' + + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath + +def set_pretrained_model(pipe): + from text.Embedding import load_pretrained + import importlib.util + global external_model + global external_model_type + params = pipe.get_params() + model_name = params.get('text_process__vectorizer__preTrainedModel', None) + if model_name and model_name.lower() in ['fasttext','glove'] and not external_model: + if model_name == 'fasttext' and importlib.util.find_spec('fasttext'): + import fasttext + import fasttext.util + cwd = os.getcwd() + os.chdir(get_pretrained_model_path()) + fasttext.util.download_model('en', if_exists='ignore') + external_model = fasttext.load_model('cc.en.300.bin') + os.chdir(cwd) + external_model_type = 'binary' + print('loaded fasttext binary') + else: + model_path = TextProcessing.checkAndDownloadPretrainedModel(model_name) + embed_size, external_model = load_pretrained(model_path) + external_model_type = 'vector' + print(f'loaded {model_name} vector') + pipe.set_params(text_process__vectorizer__external_model = external_model) + pipe.set_params(text_process__vectorizer__external_model_type = external_model_type) + +def reset_pretrained_model(pipe, clear_mem=True): + global external_model + global external_model_type + params = pipe.get_params() + is_external_model = params.get('text_process__vectorizer__external_model', None) + if (isinstance(is_external_model, pd.DataFrame) and not is_external_model.empty) or is_external_model: + pipe.set_params(text_process__vectorizer__external_model = None) + pipe.set_params(text_process__vectorizer__external_model_type = None) + if clear_mem: + external_model = None + +def release_pretrained_model(): + global external_model + global external_model_type + external_model = None + external_model_type = None + '''" +" +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sys +import logging +from collections import Counter + +import spacy +import numpy as np +import pandas as pd +import nltk +from nltk.corpus import stopwords +from nltk import pos_tag +from nltk.tokenize import word_tokenize +from nltk.stem.wordnet import WordNetLemmatizer +from textblob import TextBlob +from sklearn.feature_extraction.text import CountVectorizer +''' +nltk.download(""punkt"") +nltk.download(""wordnet"") +''' +stopWords = stopwords.words(""english"") + +class ExploreTextData: + + + def __init__(self, logEnabled=False): + self.logEnabled = logEnabled + + def __Log(self, logType=""info"", text=None): + if logType.lower() == ""exception"": + logging.exception( text) + elif self.logEnabled: + if logType.lower() == ""info"": + logging.info( text) + elif logType.lower() == ""debug"": + logging.debug( text) + + def Describe(self, inputCorpus): + """""" Generate descriptive statistics for length of documents. + Parameters + ---------- + + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + dict + Summary statistics of the Series or Dataframe provided. + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + stat = {} + word_count = self.DocumentWordCount(inputCorpus) + stat['count'] = float(len(word_count)) + stat['mean'] = float(word_count.mean()) + stat['std'] = float(word_count.std()) + stat['max'] = float(word_count.max()) + stat['min'] = float(word_count.min()) + return pd.DataFrame.from_dict(stat, orient='index') + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def DocumentLength(self, inputCorpus): + """""" Calculate the length of each document in corpus + Parameters + ---------- + + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + pandas.Series of {int} + series of length of documents + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + return inputCorpus.str.len() + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def DocumentWordCount(self, inputCorpus): + """""" Calculate the number of words in each document in corpus + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + pandas.Series of {int} + series of number of words in documents + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + return inputCorpus.str.split().map(lambda x: len(x)) + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def AverageWordLength(self, inputCorpus): + """""" Calculate the average length of words in each document in corpus + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + pandas.Series of {double} + series of average length of words in documents + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + return inputCorpus.str.split()\\ + .apply(lambda x: [len(i) for i in x])\\ + .map(lambda x: np.mean(x)) + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def StopWordsCount(self, inputCorpus): + """""" Calculate the number of stopwords in each document in corpus + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + pandas.Series of {int} + series of count of stopwords in documents + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + stopWordsCount = [] + inputCorpus = list(inputCorpus) + for doc in inputCorpus: + count = 0 + for word in doc.split(): + if word in stopWords: + count += 1 + stopWordsCount.append(count) + return pd.Series(stopWordsCount) + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def MostCommonWords(self, inputCorpus, num_of_words=40): + """""" get the most common words in corpus + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + Pandas.DataFrame{string, int} + Dataframe with columns ""most_common_words"" and ""freq"" + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + new = inputCorpus.str.split() + new = new.values.tolist() + corpus = [word for i in new for word in i if word not in stopWords] + counter = Counter(corpus) + most = counter.most_common() + + x, y = [], [] + for word, count in most[: num_of_words + 1]: + x.append(word) + y.append(count) + return pd.DataFrame([x, y],index=['most_common_words', 'freq']).T + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def NullCount(self, inputCorpus): + """""" Calculate the number of null entries in corpus + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + Returns + ------- + int + count of null entries in corpus + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + return pd.Series(inputCorpus.isnull().sum()) + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def TopNgram(self, inputCorpus, ngram, num_of_words=10): + """""" Get the top words from the ngrams + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + ngram: int + ngram required + num_of_words:int, optional + numbers of words to be returned + Returns + ------- + Pandas.DataFrame{string, int} + Dataframe with columns ""ngram_words"" and ""freq"" + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + words = [] + for doc in inputCorpus: + word = [w for w in word_tokenize(doc) if (w not in stopWords)] + words.append("" "".join(word)) + vec = CountVectorizer(ngram_range=(ngram, ngram)).fit(words) + bag_of_words = vec.transform(inputCorpus) + sum_words = bag_of_words.sum(axis=0) + words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] + words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)[:num_of_words] + words = [] + frequency = [] + for word, freq in words_freq: + words.append(word) + frequency.append(freq) + return pd.DataFrame([words, frequency],index=['ngram_words', 'freq']).T + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def Polarity(self, inputCorpus): + """""" Get the polarity of the text + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + + Returns + ------- + pandas.Series {double} + series of calculated polarity of the documents + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + return inputCorpus.apply(lambda x: TextBlob(x).sentiment.polarity) + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def ReadabilityScore(self, inputCorpus): + """""" Get the Readability Score of the text + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + + Returns + ------- + pandas.Series {double} + series of calculated Readability Score of the documents + """""" + import textstat + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + + if isinstance(inputCorpus, pd.Series): + return pd.Series([textstat.flesch_reading_ease(text) for text in inputCorpus]) + else: + return [textstat.flesch_reading_ease(inputCorpus)] + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def TagEntityCount(self, inputCorpus): + """""" Calculate the frequency of each entity present in documents + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + + Returns + ------- + Pandas.DataFrame{string, int} + Dataframe with columns ""entity"" and ""freq"" + """""" + def ner(text): + doc = nlp(text) + return [X.label_ for X in doc.ents] + + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + nlp = spacy.load(""en_core_web_sm"") + + ent = inputCorpus.apply(lambda x: ner(x)) + ent = [x for sub in ent for x in sub] + + counter = Counter(ent) + count = counter.most_common() + x, y = map(list, zip(*count)) + return pd.DataFrame([x, y],index=['entity', 'freq']).T + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def MostCommonTokenPerEntity(self, inputCorpus, entity=""GPE""): + """""" Get the frequency of most common words corresponding to the specified entity in documents + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + + entity: string, optional + name of the entity corresponding to which words are counted + Returns + ------- + Pandas.DataFrame{string, int} + Dataframe with columns ""token"" and ""freq"" + """""" + + def ner(text, ent): + doc = nlp(text) + return [X.text for X in doc.ents if X.label_ == ent] + + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + nlp = spacy.load(""en_core_web_sm"") + gpe = inputCorpus.apply(lambda x: ner(x, entity.upper())) + gpe = [i for x in gpe for i in x] + counter = Counter(gpe) + + x, y = map(list, zip(*counter.most_common(10))) + return pd.DataFrame([x, y],index=['token', 'freq']).T + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def MostCommonPosTag(self, inputCorpus): + """""" Get the frequency of most common POS tag present in documents + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + + Returns + ------- + Pandas.DataFrame{string, int} + Dataframe with columns ""postag"" and ""freq"" + """""" + def pos(text): + pos = pos_tag(word_tokenize(text)) + pos = list(map(list, zip(*pos)))[1] + return pos + + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + tags = inputCorpus.apply(lambda x: pos(x)) + tags = [x for l in tags for x in l] + counter = Counter(tags) + + x, y = list(map(list, zip(*counter.most_common(7)))) + return pd.DataFrame([x, y],index=['postag', 'freq']).T + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def MostCommonWordsInPOSTag(self, inputCorpus, tag=""NN""): + """""" Get the frequency of most common words related to specified POS tag present in documents + Parameters + ---------- + inputCorpus: sequence of input documents where each document consists of paragraphs or sentences + + tag: string, optional + POS tag corresponding to which words frequency will be calculated + Returns + ------- + Pandas.DataFrame{string, int} + Dataframe with columns ""words"" and ""freq"" + """""" + def get_POSTag(text, tag): + adj = [] + pos = pos_tag(word_tokenize(text)) + for word, tg in pos: + if tg == tag: + adj.append(word) + return adj + + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + words = inputCorpus.apply(lambda x: get_POSTag(x, tag.upper())) + words = [x for l in words for x in l] + counter = Counter(words) + x = [] + y = [] + if len(counter): + x, y = list(map(list, zip(*counter.most_common(7)))) + return pd.DataFrame([x, y],index=['words', 'freq']).T + except: + self.__Log(""exception"", sys.exc_info()) + raise + + def __preprocessData(self, inputCorpus): + """""" Prepare the data for topic modelling + """""" + try: + self.__Log(""info"", ""Start of {} function"".format(sys._getframe().f_code.co_name)) + corpus = [] + lem = WordNetLemmatizer() + for doc in inputCorpus: + words = [w for w in word_tokenize(doc) if (w not in stopWords)] + words = [lem.lemmatize(w) for w in words if len(w) > 2] + corpus.append(words) + return corpus + except: + self.__Log(""exception"", sys.exc_info()) + raise import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))" +" +from .cat_type_str import cat_to_str +__version__ = ""1.0"" ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +class cat_to_str: + def __init__(self): + pass + + def convert(self, x): + return pd.DataFrame(x).astype(str) + + import pandas as pd +def dataGarbageValue(dataFrame,datetimeFeature): + if datetimeFeature == '' or datetimeFeature.lower() == 'na': + return 'Success','' + try: + features = datetimeFeature.split(',') + for dtfeature in features: + dataFrame[dtfeature] = pd.to_datetime(dataFrame[dtfeature],errors='coerce') + if pd.isnull(dataFrame[dtfeature]).sum() > 0: + return 'Error',dtfeature+' feature have some garbage values' + except Exception as e: + print(e) + return 'Error', 'Datetime features validation error' + + return 'Success','' import os +from pathlib import Path +import pandas as pd +import numpy as np +import json + +def listToStringWithDelimiter(s, vectorDBFeatureDelimitInDoc): + #lenght + sLen = len(s) + + # initialize an empty string + str1 = """" + + # traverse in the string + for i in range(0, sLen-1): + str1 +=str(s[i])+vectorDBFeatureDelimitInDoc + str1 +=str(s[sLen-1]) + + # return string + return str1 + +def save_csv(df, fileLocation, encoding=None): + #import pdb;pdb.set_trace(); + try: + parent_dir = Path(fileLocation).parent + parent_dir.mkdir(parents=True, exist_ok=True) + if encoding: + df.to_csv(fileLocation, encoding=encoding, index=False,) + else: + df.to_csv(fileLocation, index=False) + return True, '' + except Exception as e: + print(e) + return False, str(e) + +def save_csv_compressed(df, fileLocation, encoding=None): + try: + parent_dir = Path(fileLocation).parent + parent_dir.mkdir(parents=True, exist_ok=True) + if encoding: + df.to_csv(fileLocation, encoding=encoding, index=False, compression='gzip') + else: + df.to_csv(fileLocation, index=False, compression='gzip') + return True, '' + except Exception as e: + print(e) + return False, str(e) +def read_df(fileLocation,encoding=None, nrows=None): + parent_dir = Path(fileLocation).parent + if parent_dir.exists(): + try: + if encoding and nrows: + df = pd.read_csv(fileLocation, encoding=encoding,nrows=nrows,encoding_errors= 'replace') + elif encoding: + df = pd.read_csv(fileLocation, encoding=encoding,encoding_errors= 'replace') + elif nrows: + df = pd.read_csv(fileLocation, nrows=nrows) + return True, df + + except Exception as e: + df = pd.read_csv(fileLocation, encoding=""utf-8"",encoding_errors= 'replace') + print(e) + return True,df + else: + print(""parent fails"") +def read_df_compressed(fileLocation, encoding=None, nrows=None): + parent_dir = Path(fileLocation).parent + if parent_dir.exists(): + try: + if encoding: + df = pd.read_csv(fileLocation, encoding=encoding, compression=""gzip"",encoding_errors= 'replace') + if nrows: + df = pd.read_csv(fileLocation, nrows=nrows, compression=""gzip"") + else: + df = pd.read_csv(fileLocation, encoding=""utf-8"", compression=""gzip"",encoding_errors= 'replace') + return True, df + + except Exception as e: + df = pd.read_csv(fileLocation, encoding=""utf-8"",encoding_errors= 'replace') + print(e) + return True,df + else: + print(""parent fails"") + +def save_chromadb(df, config_obj, fileLocation, modelFeatures): + import chromadb + #from chromadb.config import Settings + try: + parent_dir = Path(fileLocation).parent + parent_dir.mkdir(parents=True, exist_ok=True) + vectorDBFeatureDelimitInDoc = config_obj.getVectorDBFeatureDelimitInDoc() + persist_directory = os.path.dirname(os.path.abspath(fileLocation)) + + # client = chromadb.Client( + # Settings( + # persist_directory=persist_directory, + # chroma_db_impl=""duckdb+parquet"", + # ) + # ) + client = chromadb.PersistentClient(path=persist_directory) + + + # Create a new chroma collection + collection_name = os.path.basename(fileLocation).split('/')[-1] + collection_name = collection_name.replace('.csv', '') + collection_name = collection_name + 'VecDB' + collection = client.create_collection( + name=collection_name, + metadata={""hnsw:space"": ""cosine""} + + ) + + features = modelFeatures.split("","") + dftxt = pd.concat([df.pop(x) for x in features], axis=1) + + stepSize = 500 + for i in range(0, len(df),stepSize): + start = i + end = i+ stepSize + dfembdary = df.iloc[start:end].to_numpy() + dftxtary = dftxt.iloc[start:end].to_numpy() + idxary = df.iloc[start:end].index.values + #convert to string + idxary = [str(x) for x in idxary] + dftxtary = [listToStringWithDelimiter(x.tolist(), vectorDBFeatureDelimitInDoc) for x in dftxtary] + collection.add( + embeddings=dfembdary.tolist(), + ids=idxary, + documents= dftxtary + ) + client.persist() + + return True, '' + except Exception as e: + return False, str(e) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' import joblib +import pandas as pd +import sys +import math +import time +import pandas as pd +import numpy as np +from sklearn.metrics import confusion_matrix +from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score +from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LinearRegression +import argparse +import json + +def mltesting(modelfile,datafile,features,target): + + + model = joblib.load(modelfile) + + ProblemName = model.__class__.__name__ + + + if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']: + Problemtype = 'Classification' + elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']: + Problemtype = 'Regression' + else: + Problemtype = 'Unknown' + if Problemtype == 'Classification': + Params = model.get_params() + try: + df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) + if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC': + features = model.feature_names_in_ + elif ProblemName == 'XGBClassifier': + features = model.get_booster().feature_names + elif ProblemName == 'LGBMClassifier': + features = model.feature_name_ + elif ProblemName == 'CatBoostClassifier': + features = model.feature_names_ + + modelfeatures = features + dfp = df[modelfeatures] + tar = target + target = df[tar] + predic = model.predict(dfp) + output = {} + matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) + matrixconfusion = matrixconfusion.to_json(orient='index') + classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() + classificationreport = round(classificationreport,2) + classificationreport = classificationreport.to_json(orient='index') + output[""Precision""] = ""%.2f"" % precision_score(target, predic,average='weighted') + output[""Recall""] = ""%.2f"" % recall_score(target, predic,average='weighted') + output[""Accuracy""] = ""%.2f"" % accuracy_score(target, predic) + output[""ProblemName""] = ProblemName + output[""Status""] = ""Success"" + output[""Params""] = Params + output[""Problemtype""] = Problemtype + output[""Confusionmatrix""] = matrixconfusion + output[""classificationreport""] = classificationreport + + # import statistics + # timearray = [] + # for i in range(0,5): + # start = time.time() + # predic1 = model.predict(dfp.head(1)) + # end = time.time() + # timetaken = (round((end - start) * 1000,2),'Seconds') + # timearray.append(timetaken) + # print(timearray) + + + start = time.time() + for i in range(0,5): + predic1 = model.predict(dfp.head(1)) + end = time.time() + timetaken = (round((end - start) * 1000,2),'Seconds') + + # print(timetaken) + + start1 = time.time() + for i in range(0,5): + predic2 = model.predict(dfp.head(10)) + end1 = time.time() + timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds') + # print(timetaken1) + + + start2 = time.time() + for i in range(0,5): + predic3 = model.predict(dfp.head(100)) + end2 = time.time() + timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') + # print(timetaken2) + + output[""onerecord""] = timetaken + output[""tenrecords""] = timetaken1 + output[""hundrecords""] = timetaken2 + print(json.dumps(output)) + except Exception as e: + output = {} + output['Problemtype']='Classification' + output['Status']= ""Fail"" + output[""ProblemName""] = ProblemName + output[""Msg""] = 'Detected Model : {} \\\\n Problem Type : Classification \\\\n Error : {}'.format(ProblemName, str(e).replace('""','//""').replace('\\n', '\\\\n')) + print(output[""Msg""]) + print(json.dumps(output)) + + elif Problemtype == 'Regression': + Params = model.get_params() + try: + df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) + if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor': + features = model.feature_names_in_ + elif ProblemName == 'XGBRegressor': + features = model.get_booster().feature_names + elif ProblemName == 'LGBMRegressor': + features = model.feature_name_ + elif ProblemName == 'CatBoostRegressor': + features = model.feature_names_ + modelfeatures = features + dfp = df[modelfeatures] + tar = target + target = df[tar] + predict = model.predict(dfp) + mse = mean_squared_error(target, predict) + mae = mean_absolute_error(target, predict) + rmse = math.sqrt(mse) + r2 = r2_score(target,predict,multioutput='variance_weighted') + output = {} + output[""MSE""] = ""%.2f"" % mean_squared_error(target, predict) + output[""MAE""] = ""%.2f"" % mean_absolute_error(target, predict) + output[""RMSE""] = ""%.2f"" % math.sqrt(mse) + output[""R2""] = ""%.2f"" %r2_score(target,predict,multioutput='variance_weighted') + output[""ProblemName""] = ProblemName + output[""Problemtype""] = Problemtype + output[""Params""] = Params + output['Status']='Success' + start = time.time() + predic1 = model.predict(dfp.head(1)) + end = time.time() + timetaken = (round((end - start) * 1000,2) ,'Seconds') + + # print(timetaken) + + start1 = time.time() + predic2 = model.predict(dfp.head(10)) + end1 = time.time() + timetaken1 = (round((end1 - start1) * 1000,2),'Seconds') + # print(timetaken1) + + + start2 = time.time() + predic3 = model.predict(dfp.head(100)) + end2 = time.time() + timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') + # print(timetaken2) + + output[""onerecord""] = timetaken + output[""tenrecords""] = timetaken1 + output[""hundrecords""] = timetaken2 + print(json.dumps(output)) + except Exception as e: + output = {} + output['Problemtype']='Regression" +"' + output['Status']='Fail' + output[""ProblemName""] = ProblemName + output[""Msg""] = 'Detected Model : {} \\\\n Problem Type : Regression \\\\n Error : {}'.format(ProblemName, str(e).replace('""','//""').replace('\\n', '\\\\n')) + print(json.dumps(output)) + + else: + output = {} + output['Problemtype']='Unknown' + output['Status']='Fail' + output['Params'] = '' + output[""ProblemName""] = ProblemName + output[""Msg""] = 'Detected Model : {} \\\\n Error : {}'.format(ProblemName, 'Model not supported') + print(json.dumps(output)) + return(json.dumps(output)) + +def baseline_testing(modelFile,csvFile,features,target): + features = [x.strip() for x in features.split(',')] + return mltesting(modelFile,csvFile,features,target) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from importlib.metadata import version +import sys +import os +def requirementfile(deploy_path,model,textFeatures,learner_type): + print('hola', model) + modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] + requires = '' + for mod in modules: + requires += f""{mod}=={version(mod)}\\n"" + if len(textFeatures) > 0: + tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] + for mod in tmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Extreme Gradient Boosting (XGBoost)': + mmodules = ['xgboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Light Gradient Boosting (LightGBM)': + mmodules = ['lightgbm'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Categorical Boosting (CatBoost)': + mmodules = ['catboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'arima': + mmodules = ['pmdarima'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'fbprophet': + mmodules = ['prophet'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': + mmodules = ['tensorflow'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 + mmodules = ['lifelines'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'sentencetransformer': #bug 12833 + mmodules = ['sentence_transformers'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + filename = os.path.join(deploy_path,'requirements.txt') + f = open(filename, ""wb"") + f.write(str(requires).encode('utf8')) + f.close() + + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import json +import shutil +import logging +import sys +from AionConfigManager import AionConfigManager +from sklearn.externals import joblib +class edgeformats: + def __init__(self,deploy_path): + self.deploy_path = deploy_path + self.edge_deploy_path = os.path.join(deploy_path,""edge"") + os.mkdir(self.edge_deploy_path) + + + def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file): + if edge_format == 'onnx': + from skl2onnx import convert_sklearn + from skl2onnx.common.data_types import FloatTensorType + xtrain = xtrain[features] + initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))] + filename = os.path.join(self.deploy_path,saved_model) + loaded_model = joblib.load(filename) + onx = convert_sklearn(loaded_model, initial_types=initial_type) + onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx') + with open(onnx_filename, ""wb"") as f: + f.write(onx.SerializeToString()) + self.createedgeruntimeFile(onnx_filename,profiled_data_file,features) + + def createedgeruntimeFile(self,onnx_filename,datafilepath,features): + runtimefilecontent = '' + runtimefilecontent += 'import pandas' + runtimefilecontent += '\\n' + runtimefilecontent += 'import numpy' + runtimefilecontent += '\\n' + runtimefilecontent += 'import sys' + runtimefilecontent += '\\n' + runtimefilecontent += 'import onnxruntime as rt' + runtimefilecontent += '\\n' + runtimefilecontent += 'def onnx_runtime_validation():' + runtimefilecontent += '\\n' + runtimefilecontent += ' modelfile = r""'+str(onnx_filename)+'""' + runtimefilecontent += '\\n' + runtimefilecontent += ' datafile = r""'+str(datafilepath)+'""' + runtimefilecontent += '\\n' + runtimefilecontent += ' dataframe = pandas.read_csv(datafile)' + runtimefilecontent += '\\n' + runtimefilecontent += ' dataframe = dataframe['+str(features)+']' + runtimefilecontent += '\\n' + runtimefilecontent += ' df = dataframe.head(8)' + runtimefilecontent += '\\n' + runtimefilecontent += ' dataset = df.values' + runtimefilecontent += '\\n' + runtimefilecontent += ' sess = rt.InferenceSession(modelfile)' + runtimefilecontent += '\\n' + runtimefilecontent += ' input_name = sess.get_inputs()[0].name' + runtimefilecontent += '\\n' + runtimefilecontent += ' label_name = sess.get_outputs()[0].name' + runtimefilecontent += '\\n' + runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape' + runtimefilecontent += '\\n' + runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)' + runtimefilecontent += '\\n' + runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]' + runtimefilecontent += '\\n' + runtimefilecontent += ' df[\\'predictions\\'] = pred_onx' + runtimefilecontent += '\\n' + runtimefilecontent += ' result = df.to_json(orient=""records"")' + runtimefilecontent += '\\n' + runtimefilecontent += ' return(result)' + runtimefilecontent += '\\n' + runtimefilecontent += 'if __name__ == ""__main__"":' + runtimefilecontent += '\\n' + runtimefilecontent += ' output = onnx_runtime_validation()' + runtimefilecontent += '\\n' + runtimefilecontent += ' print(""predictions:"",output)' + filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py') + f = open(filename, ""w"") + f.write(str(runtimefilecontent)) + f.close() + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import json +import shutil +import logging + +class outputFormatter: + def __init__(self): + self.log = logging.getLogger('eion') + self.log.info('========> Inside Output Formatter') + + def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature): + self.output_formatfile = 'import json' + self.output_formatfile += '\\n' + self.output_formatfile += 'import numpy as np' + self.output_formatfile += '\\n' + self.output_formatfile += 'import pandas as pd' + self.output_formatfile += '\\n' + self.output_formatfile += 'import os' + self.output_formatfile += '\\n' + self.output_formatfile += 'from pathlib import Path' + self.output_formatfile += '\\n' + if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()==""anomaly_detection""): + self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity' + self.output_formatfile += '\\n' + + self.output_formatfile += 'class output_format(object):' + self.output_formatfile += '\\n' + if(model == 'VAR'): + self.output_formatfile += ' def invertTransformation(self,predictions):' + self.output_formatfile += '\\n' + self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"".."",""data"",""trainingdata.csv""))' + self.output_formatfile += '\\n' + self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount) + self.output_formatfile += '\\n' + self.output_formatfile += ' targetFeature = ""'+str(targetFeature)+'""' + self.output_formatfile += '\\n' + self.output_formatfile += ' columns = targetFeature.split("","")' + self.output_formatfile += '\\n' + self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)' + self.output_formatfile += '\\n' + self.output_formatfile += ' for j in range(0,len(columns)):' + self.output_formatfile += '\\n' + self.output_formatfile += ' for i in range(0, len(predictions)):' + self.output_formatfile += '\\n' + self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction = pred' + self.output_formatfile += '\\n' + self.output_formatfile += ' for col in columns:' + self.output_formatfile += '\\n' + self.output_formatfile += ' if col in dictDiffCount:' + self.output_formatfile += '\\n' + self.output_formatfile += ' if dictDiffCount[col]==2:' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction = pred' + self.output_formatfile += '\\n' + self.output_formatfile += ' return(prediction)' + self.output_formatfile += '\\n' + self.log.info(""op:modelType: \\n""+str(modelType)) + if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()==""anomaly_detection""): + # if modelType == 'anomaly_detection': + self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):' + self.output_formatfile += '\\n' + self.output_formatfile += ' try:' + self.output_formatfile += '\\n' + self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) ' + self.output_formatfile += '\\n' + self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) ' + self.output_formatfile += '\\n' + self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() ' + self.output_formatfile += '\\n' + self.output_formatfile += ' except Exception as e:' + self.output_formatfile += '\\n' + self.output_formatfile += ' print(f""find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} "")\\n' + self.output_formatfile += ' return anomaly_info_df' + self.output_formatfile += '\\n' + if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()==""anomaly_detection""): + if (datetimeFeature!='' and datetimeFeature!='NA'): + self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' def apply_output_format(self,df,modeloutput):' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' def apply_output_format(self,df,modeloutput):' + self.output_formatfile += '\\n' + + if modelType.lower() == 'classification': + self.output_formatfile += ' modeloutput =" +"round(modeloutput,2)' + self.output_formatfile += '\\n' + + + if(learner_type == 'ImageClassification'): + if(str(output_label) != '{}'): + inv_mapping_dict = {v: k for k, v in output_label.items()} + self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict) + self.output_formatfile += '\\n' + self.output_formatfile += ' predictions = []' + self.output_formatfile += '\\n' + self.output_formatfile += ' for x in modeloutput:' + self.output_formatfile += '\\n' + self.output_formatfile += ' x = le_dict[x]' + self.output_formatfile += '\\n' + self.output_formatfile += ' predictions.append(x)' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' predictions=modeloutput' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = predictions' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\')' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)}' + self.output_formatfile += '\\n' + elif(learner_type == 'Text Similarity'): + self.output_formatfile += ' df[\\'prediction\\'] = np.where(modeloutput > '+str(threshold)+',1,0)' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'probability\\'] = modeloutput' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)}' + self.output_formatfile += '\\n' + elif(learner_type == 'TS'): + if(model == 'VAR'): + self.output_formatfile += ' modeloutput = self.invertTransformation(modeloutput)' + self.output_formatfile += '\\n' + self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\\'records\\',double_precision=2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(modeloutput)}' + elif(model.lower() == 'fbprophet'): + self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\\'records\\')' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(modeloutput)}' + elif((model.lower() == 'lstm' or model.lower() == 'mlp') and len(features) >= 1): + self.output_formatfile += ' modeloutput = modeloutput.round(2)\\n' + self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\\'records\\')\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(modeloutput)}\\n' + else: + self.output_formatfile += ' modeloutput = modeloutput.round(2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' modeloutput = json.dumps(modeloutput.tolist())' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":eval(modeloutput)}' + self.output_formatfile += '\\n' + elif(learner_type in ['RecommenderSystem','similarityIdentification','contextualSearch']): + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)}' + self.output_formatfile += '\\n' + else: + if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'): + if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'): + if(str(output_label) != '{}'): + inv_mapping_dict = {v: k for k, v in output_label.items()} + self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict) + self.output_formatfile += '\\n' + ''' + if(model in ['SGDClassifier']): + self.output_formatfile += ' modeloutput = modeloutput.replace({""predict_class"": le_dict})' + else: + self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)' + ''' + if modelType != 'anomaly_detection': + self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)' + self.output_formatfile += '\\n' + if(threshold != -1): + ''' + if(model in ['SGDClassifier']): + self.output_formatfile += ' df[\\'prediction\\'] = np.where(modeloutput[\\'probability\\'] > '+str(threshold)+',1,0)' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'probability\\'] = modeloutput[\\'probability\\']' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = """"' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'probability\\'] = np.where(df[\\'prediction\\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' + self.output_formatfile += '\\n' + ''' + self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'probability\\'] = np.where(df[\\'prediction\\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' + self.output_formatfile += '\\n' + else: + ''' + if(model in ['SGDClassifier']): + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput[\\'predict_class\\']' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'probability\\'] = """"' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = ""NA""' + self.output_formatfile += '\\n' + else: + + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1)' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' + self.output_formatfile += '\\n' + ''' + if modelType == 'anomaly_detection': + # if (model.lower()=='autoencoder'): + if model.lower() in ['autoencoder']: + if (datetimeFeature != '' and datetimeFeature.lower() != 'na'): + self.output_formatfile += ' df[modeloutput.columns] = modeloutput\\n' + self.output_formatfile += ' anomaly_df=df[df[\\'anomaly\\'] == True]\\n' + self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\\n' + self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\\'data\\')\\n' + self.output_formatfile += ' anomaly_prediction_df.to_csv(f""{new_dir}/anomaly_data.csv"")\\n' + self.output_formatfile += ' try:\\n' + self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\\n' + self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\\n' + self.output_formatfile += ' anomaly_prediction_df.drop(""Time_diff"",axis=1,inplace=True)\\n' + self.output_formatfile += ' except:\\n' + self.output_formatfile += ' pass\\n' + self.output_formatfile += ' try:\\n' + self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\\'left\\')\\n' + self.output_formatfile += ' df_out[\\'anomaly\\'].replace([\\'None\\', \\'NaN\\', np.nan], ""Normal"", inplace=True)\\n' + self.output_formatfile += ' df_out[\\'anomalyType\\'].replace([\\'None\\', \\'NaN\\', np.nan], ""Normal"", inplace=True)\\n' + self.output_formatfile += ' df_out.to_csv(f""{new_dir}/overall_ad_output.csv"") \\n' + self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str) \\n' + self.output_formatfile += ' df_out.drop(""time_diff"",axis=1,inplace=True)\\n' + self.output_formatfile += ' except Exception as e:\\n' + self.output_formatfile += ' print(""anomaly data updated issue"",e)\\n' + self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\\n' + self.output_formatfile += ' df=df_out \\n' + else: + self.output_formatfile += ' df[modeloutput.columns] = modeloutput\\n' + elif (model.lower()=='dbscan'): + if (datetimeFeature != '' and datetimeFeature.lower() != 'na'): + self.output_formatfile += ' df[\\'anomaly\\'] = modeloutput[\\'cluster\\']== -1\\n' + self.output_formatfile += ' anomaly_df=df[df[\\'anomaly\\'] == True]\\n' + self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\\n' + self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\\'data\\')\\n' + self.output_formatfile += ' try:\\n' + self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\\n' + self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\\n' + self.output_formatfile += ' except:\\n' + self.output_formatfile += ' pass\\n' + self.output_formatfile += ' try:\\n' + self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\\'left\\')\\n' + self.output_formatfile += ' df_out[\\'anomaly\\'].replace([\\'None\\', \\'NaN\\', np.nan], ""Normal"", inplace=True)\\n' + self.output_formatfile += ' df_out.to_csv(f""{new_dir}/overall_ad_output.csv"") \\n' + self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\\n' + self.output_formatfile += ' except Exception as e:\\n' + self.output_formatfile += ' print(""anomaly data updated."")\\n' + self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\\n' + self.output_formatfile += ' df=df_out \\n' + else: + self.output_formatfile += ' df[\\'anomaly\\'] = modeloutput[\\'cluster\\']== -1\\n' + self.output_formatfile += ' df.sort_values(by=[\\'anomaly\\'], ascending=False, inplace=True)\\n' + else: + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)' + self.output_formatfile += '\\n' + if learner_type != 'DL': + self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1).round(2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' + self.output_formatfile += '\\n' + else:" +" + if model == 'COX': + self.output_formatfile += '\\n' + self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)' + self.output_formatfile += '\\n' + #self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\\'records\\',double_precision=2)' + #self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput[0]' + if(learner_type == 'objectDetection'): + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\']' + else: + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\'].round(2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)}' + self.output_formatfile += '\\n' + self.output_formatfile += ' return(json.dumps(outputjson))' + filename = os.path.join(deploy_path,'script','output_format.py') + #print(deploy_path) + f = open(filename, ""wb"") + self.log.info('-------> Output Mapping File Location :'+filename) + f.write(str(self.output_formatfile).encode('utf8')) + f.close() #task 11190: Item based Recommender system---Usnish +import os +def generate_recommender_code(deployPath): + code = """""" +import pandas as pd +import numpy as np +import os +ITEMID = 'itemId' +DATA_FOLDER = 'data' +USER_ITEM_MATRIX = 'user_item_matrix.csv' +ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv' +RATING = 'rating' +SIMILARITY_SCORE = 'similarity_score' + +class collaborative_filter(object): + def __init__(self): + self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0) + self.matrix.index.name = ITEMID + self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX)) + self.item_similarity_cosine.index.name = ITEMID + self.item_similarity_cosine.columns.name = ITEMID + def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5): + import operator + if not isinstance(picked_userid,str): + picked_userid = str(picked_userid) + if picked_userid not in self.matrix.columns: + raise KeyError(""UserID Does Not Exist"") + # Movies that the target user has not watched + try: + picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index() + picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist() + + # Movies that the target user has watched + picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \\ + .sort_values(ascending=False)) \\ + .reset_index() \\ + .rename(columns={picked_userid: 'rating'}) + + # Dictionary to save the unwatched movie and predicted rating pair + rating_prediction = {} + # Loop through unwatched movies + for picked_movie in picked_userid_unwatched: + if not isinstance(picked_movie,str): + picked_movie = str(picked_movie) + # Calculate the similarity score of the picked movie with other movies + try: + picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename( + columns={picked_movie: SIMILARITY_SCORE}) + # Rank the similarities between the picked user watched movie and the picked unwatched movie. + picked_userid_watched_similarity = pd.merge(left=picked_userid_watched, + right=picked_movie_similarity_score, + on=ITEMID, + how='inner') \\ + .sort_values(SIMILARITY_SCORE, ascending=False)[ + :number_of_similar_items] + # Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user + try: + predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6) + except Exception as e: + predicted_rating = 0 + # Save the predicted rating in the dictionary + + rating_prediction[picked_movie] = predicted_rating + except Exception as e: + rating_prediction[picked_movie] = 0 + # Return the top recommended movies + + return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations] + except Exception as e: + print(e) + raise KeyError(str(e)) + def predict(self,X): + predictions = [] + for index,row in X.iterrows(): + score = self.item_based_rec(int(row[""uid""]),int(row[""numberOfRecommendation""])) + df = pd.DataFrame(score,columns=['ItemId','Ratings']) + predictions.append(df) + return predictions"""""" + filename = os.path.join(deployPath, 'script', 'item_recommendation.py') + # print(deploy_path) + f = open(filename, ""wb"") + + f.write(str(code).encode('utf8')) + f.close() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +from pathlib import Path +from AION.prediction_package.imports import importModule +from AION.prediction_package import utility +from AION.prediction_package.utility import TAB_CHAR +from importlib.metadata import version + +"""""" + This file provide the functionality which is common for most of the + problem types deployment. +"""""" + +def main_code(): + return """""" +class predict(): + + def __init__(self): + self.profiler = inputprofiler() + self.selector = selector() + self.trainer = trainer() + self.formatter = output_format() + + def run(self, data): + try: + df = self._parse_data(data) + raw_df = df.copy() + df = self.profiler.run(df) + df = self.selector.run(df) + df = self.trainer.run(df) + output = self.formatter.run(raw_df, df) + print(""predictions:"",output) + return (output) + except Exception as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + + def _parse_data(self, data): + file_path = Path(data) + if file_path.suffix == "".tsv"": + df = pd.read_csv(data,encoding='utf-8',sep='\\\\t',skipinitialspace = True,na_values=['-','?']) + elif file_path.suffix in ["".csv"", "".dat""]: + df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) + elif file_path.suffix in ["".gz""] and file_path.stem.endswith('.csv'): + df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) + elif file_path.suffix == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + df = pd.json_normalize(jsonData) + else: + jsonData = json.loads(data) + df = pd.json_normalize(jsonData) + return df + +import sys +if __name__ == ""__main__"": + output = predict().run(sys.argv[1]) + """""" + +def profiler_code(params, indent=0): + """""" + This will create the profiler file based on the config file. + separated file is created as profiler is required for input drift also. + """""" + imported_modules = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} + ] + importer = importModule() + utility.import_modules(importer, imported_modules) + code = """""" + + +class inputprofiler(): +"""""" + init_code = """""" + def __init__(self): +"""""" + if params.get('text_features'): + imported_modules.append({'module':'importlib.util'}) + init_code += """""" + # preprocessing + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if not preprocess_path.exists(): + raise ValueError(f'Preprocess model file not found: {preprocess_path}') + self.profiler = joblib.load(preprocess_path) + +"""""" + run_code = """""" + def run(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + if params.get('input_features_type'): + imported_modules.append({'module':'dtype','mod_from':'numpy'}) + run_code += f"""""" + df = df.astype({params.get('input_features_type')}) +"""""" + if params.get('word2num_features'): + imported_modules.append({'module':'w2n','mod_from':'word2number'}) + run_code += f"""""" + def s2n(value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan + df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))"""""" + if params.get('unpreprocessed_columns'): + run_code += f"""""" + unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}'] + df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True) + """""" + if params.get('force_numeric_conv'): + run_code += f"""""" + df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""""" + if params.get('conversion_method','').lower() == 'glove': + code_text, modules = __profiler_glove_code(params) + imported_modules.extend( modules) + init_code += code_text + elif params.get('conversion_method','').lower() == 'fasttext': + init_code += __profiler_fasttext_code(params) + run_code += __profiler_main_code(params) + if params.get('unpreprocessed_columns'): + run_code += f"""""" + df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data + """""" + utility.import_modules(importer, imported_modules) + import_code = importer.getCode() + return import_code + code + init_code + run_code + +def __profiler_glove_code(params, indent=2): + modules = [] + modules.append({'module':'load_pretrained','mod_from':'text.Embedding'}) + modules.append({'module':'TextProcessing','mod_from':'text'}) + code = """""" +model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') +embed_size, pretrained_model = load_pretrained(model_path) +self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) +"""""" + return code.replace('\\n', '\\n'+(indent * TAB_CHAR)), modules + +def __profiler_fasttext_code(params, indent=2): + code = """""" +def get_pretrained_model_path(): + try: + from AION.appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + except: + modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' + + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath +if not importlib.util.find_spec('fasttext'): + raise ValueError('fastText not installed') +else: + import os + import fasttext + import fasttext.util + cwd = os.getcwd() + os.chdir(get_pretrained_model_path()) + fasttext.util.download_model('en', if_exists='ignore') + pretrained_model = fasttext.load_model('cc.en.300.bin') + os.chdir(cwd) +self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) +self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary') +"""""" + return code.replace('\\n', '\\n'+(indent * TAB_CHAR)) + +def __profiler_main_code(params, indent=2): + code = f"""""" +df = self.profiler.transform(df) +columns = {" +"params['output_features']} +if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns=columns) +else: + df = pd.DataFrame(df, columns=columns) +return df +"""""" + return code.replace('\\n', '\\n'+(indent * TAB_CHAR)) + +def feature_selector_code( params, indent=0): + modules = [ + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'} + ] + code = """""" +class selector(): + # this class + def __init__(self): + pass + + def run(self, df):"""""" + code +=f"""""" + return df[{params['output_features']}] +"""""" + return code, modules + +def feature_reducer_code( params, indent=0): + modules = [ + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} + ] + code = f"""""" +class selector(): + def __init__(self): + reducer_file = (Path(__file__).parent/""model"")/""{params['reducer_file']}"" + if not reducer_file.exists(): + raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}') + self.model = joblib.load(reducer_file) + + def run(self, df): + reducer_input = {params['input_features']} + reducer_output = {params['output_features']} + df = self.model.transform(df[reducer_input]) + return pd.DataFrame(df,columns=reducer_output) + """""" + if indent: + code = code.replace('\\n', '\\n'+(indent * TAB_CHAR)) + return code, modules + +def create_feature_list(config=None, target_feature=None, deploy_path=None): + featurelist = [] + if 'profiler' in config: + if 'input_features_type' in config['profiler']: + input_features = config['profiler']['input_features_type'] + for x in input_features: + featurelt={} + featurelt['feature'] = x + if x == target_feature: + featurelt['Type'] = 'Target' + else: + if input_features[x] in ['int','int64','float','float64']: + featurelt['Type'] = 'Numeric' + elif input_features[x] == 'object': + featurelt['Type'] = 'Text' + elif input_features[x] == 'category': + featurelt['Type'] = 'Category' + else: + featurelt['Type'] = 'Unknown' + featurelist.append(featurelt) + + featurefile = f"""""" +import json +def getfeatures(): + try: + features = {featurelist} + outputjson = {{""status"":""SUCCESS"",""features"":features}} + output = json.dumps(outputjson) + print(""Features:"",output) + return(output) + except Exception as e: + output = {{""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}} + print(""Features:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = getfeatures() +"""""" + with open( deploy_path/'featureslist.py', 'wb') as f: + f.write( str(featurefile).encode('utf8')) + +def requirement_file(deploy_path,model,textFeatures,learner_type='ML'): + modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] + requires = '' + for mod in modules: + requires += f""{mod}=={version(mod)}\\n"" + if len(textFeatures) > 0: + tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] + for mod in tmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Extreme Gradient Boosting (XGBoost)': + mmodules = ['xgboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Light Gradient Boosting (LightGBM)': + mmodules = ['lightgbm'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Categorical Boosting (CatBoost)': + mmodules = ['catboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'arima': + mmodules = ['pmdarima'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'fbprophet': + mmodules = ['prophet'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': + mmodules = ['tensorflow'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 + mmodules = ['lifelines'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'sentencetransformer': #bug 12833 + mmodules = ['sentence_transformers'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + with open( deploy_path/'requirements.txt', 'wb') as f: + f.write(str(requires).encode('utf8')) + +def create_readme_file(deploy_path,modelfile,features): + data = json.dumps([{x:x+'_value'} for x in features]) + backslash_data = data.replace('""', '\\\\""') + content = f"""""" +========== Files Structures ========== +{modelfile} ------ Trained Model +aion_prediction.py --> Python package entry point +script/inputprofiler.py --> Profiling like FillNA and Category to Numeric + +========== How to call the model ========== +============== From Windows Terminal ========== +python aion_prediction.py ""{backslash_data}"" +============== From Linux Terminal ========== +python aion_prediction.py ""{data}"" + +============== Output ========== +{{""status"":""SUCCESS"",""data"":[{{""Data1"":""Value"",""prediction"":""Value""}}]}} ## for single Row/Record +{{""status"":""SUCCESS"",""data"":[{{""Data1"":""Value"",""prediction"":""Value""}},{{""Data1"":""Value"",""prediction"":""Value""}}]}} ## For Multiple Row/Record +{{""status"":""ERROR"",""message"":""description""}} ## In Case Exception or Error + """""" + filename = deploy_path/'readme.txt' + with open(filename, 'w') as f: + f.write(content) + +def create_util_folder(deploy_path): + import tarfile + ext_path = Path(__file__).parent.parent/'utilities' + for x in ext_path.iterdir(): + if x.suffix == '.tar': + if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']: + my_tar = tarfile.open(x) + my_tar.extractall(deploy_path) + my_tar.close() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import json +import shutil +import logging + +class aionPrediction: + + def __init__(self): + self.log = logging.getLogger('eion') + + def create_optimus_prediction_file (self,classname,deploy_path,learner_type): + self.predictionFile = 'import warnings' + self.predictionFile += '\\n' + self.predictionFile += 'warnings.filterwarnings(""ignore"")' + self.predictionFile += '\\n' + self.predictionFile += 'import json' + self.predictionFile += '\\n' + self.predictionFile += 'import os' + self.predictionFile += '\\n' + self.predictionFile += 'import sys' + self.predictionFile += '\\n' + self.predictionFile += 'import pandas as pd' + self.predictionFile += '\\n' + self.predictionFile += 'from pandas import json_normalize' + self.predictionFile += '\\n' + self.predictionFile += 'from importlib import import_module' + self.predictionFile += '\\n' + self.predictionFile += 'import importlib.util' + self.predictionFile += '\\n' + self.predictionFile += 'class prediction:' + self.predictionFile += '\\n' + self.predictionFile += ' def predict_from_json(self,json_data):' + self.predictionFile += '\\n' + self.predictionFile += ' data = json.loads(json_data)' + self.predictionFile += '\\n' + self.predictionFile += ' output=self.predict(data)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",output)' + self.predictionFile += '\\n' + self.predictionFile += '\\n' + self.predictionFile += ' def predict_from_file(self,filename):' + self.predictionFile += '\\n' + self.predictionFile += ' with open(filename,\\'r\\',encoding=\\'utf-8\\') as f:' + self.predictionFile += '\\n' + self.predictionFile += ' data = json.load(f)' + self.predictionFile += '\\n' + self.predictionFile += ' output=self.predict(data)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",output)' + self.predictionFile += '\\n' + self.predictionFile += '\\n' + self.predictionFile += ' def predict(self,json_data):' + self.predictionFile += '\\n' + self.predictionFile += ' try:' + self.predictionFile += '\\n' + #self.predictionFile += ' jsonData = json.loads(json_data)' + self.predictionFile += ' jsonData=json_data' + self.predictionFile += '\\n' + self.predictionFile += ' model_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/trained_model.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' model_obj.loader.exec_module(model)' + self.predictionFile += '\\n' + #if(learner_type != 'TextML'): + self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/inputprofiler.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)' + self.predictionFile += '\\n' + + self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/selector.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' selector_obj.loader.exec_module(selector)' + self.predictionFile += '\\n' + + self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/output_format.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' output_format_obj.loader.exec_module(output_format)' + self.predictionFile += '\\n' + + self.predictionFile += ' df = json_normalize(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' df0 = df.copy()' + self.predictionFile += '\\n' + #if(learner_type != 'TextML'): + self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()' + self.predictionFile += '\\n' + self.predictionFile += ' df = profilerobj.apply_profiler(df)' + self.predictionFile += '\\n' + self.predictionFile += ' selectobj = selector.selector()' + self.predictionFile += '\\n' + self.predictionFile += ' df = selectobj.apply_selector(df)' + self.predictionFile += '\\n' + self.predictionFile += ' output = model.trained_model().predict(df,"""")' + self.predictionFile += '\\n' + self.predictionFile += ' outputobj = output_format.output_format()' + self.predictionFile += '\\n' + self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' + #self.predictionFile += '\\n' + #self.predictionFile += ' print(output)' + self.predictionFile += '\\n' + self.predictionFile += ' return output' + self.predictionFile += '\\n' + self.predictionFile += ' except KeyError as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' return json.dumps(output)' + self.predictionFile += '\\n' + self.predictionFile += ' except Exception as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' return json.dumps(output)' + self.predictionFile += '\\n' + self.prediction" +"File += '\\n' + self.predictionFile += 'if __name__ == ""__main__"":' + self.predictionFile += '\\n' + self.predictionFile += ' predictobj = prediction()' + self.predictionFile += '\\n' + self.predictionFile += ' predictobj.predict_from_file(sys.argv[1])' + self.predictionFile += '\\n' + + + filename = os.path.join(deploy_path,'prediction.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_text_drift_file(self,deploy_path,features,target,model_type): #task-14549 + self.predictionFile = 'import warnings' + self.predictionFile += '\\n' + self.predictionFile += 'warnings.filterwarnings(""ignore"")' + self.predictionFile += '\\n' + self.predictionFile += 'import json' + self.predictionFile += '\\n' + self.predictionFile += 'import os' + self.predictionFile += '\\n' + self.predictionFile += 'import sys' + self.predictionFile += '\\n' + self.predictionFile += 'import pandas as pd' + self.predictionFile += '\\n' + self.predictionFile += 'from monitoring import check_drift' + self.predictionFile += '\\n' + self.predictionFile += 'def drift(data):' + self.predictionFile += '\\n' + self.predictionFile += ' try:' + self.predictionFile += '\\n' + self.predictionFile += ' if os.path.splitext(data)[1] == "".json"":' + self.predictionFile += '\\n' + self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.load(f)' + self.predictionFile += '\\n' + self.predictionFile += ' else:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.loads(data)' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData[\\'features\\'] = \\''+"","".join([feature for feature in features])+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData[\\'target\\'] = \\''+target+'\\'' + self.predictionFile += '\\n' + if model_type.lower() != 'timeseriesforecasting': #task 11997 + self.predictionFile += ' htmlfilepath=evidently_details(jsonData)' + self.predictionFile += '\\n' + else: + self.predictionFile += ' htmlfilepath=\\'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.dumps(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' output = check_drift(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' output = json.loads(output)' + self.predictionFile += '\\n' + self.predictionFile += ' output[\\'htmlPath\\'] = str(htmlfilepath)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""drift:"", json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return(output)' + self.predictionFile += '\\n' + self.predictionFile += ' except KeyError as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""drift:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' except Exception as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""drift:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + if model_type.lower() != 'timeseriesforecasting': #task 11997 + self.predictionFile += 'def evidently_details(deployJson):' + self.predictionFile += '\\n' + self.predictionFile += ' features = deployJson[\\'features\\'].split(\\',\\')' + self.predictionFile += '\\n' + self.predictionFile += ' target = deployJson[\\'target\\']' + self.predictionFile += '\\n' + self.predictionFile += """"""\\ + try: + from evidently.report import Report + from evidently.metrics import TextDescriptorsDriftMetric, ColumnDriftMetric + from evidently.pipeline.column_mapping import ColumnMapping + from sklearn.preprocessing import LabelEncoder + historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?']) + currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?']) + historicaldataFrame.columns = historicaldataFrame.columns.str.strip() + currentdataFrame.columns = currentdataFrame.columns.str.strip() + hdf = historicaldataFrame.dropna(subset=features) + cdf = currentdataFrame.dropna(subset=features) + hdf['Text_Features'] = hdf[features].apply(""-"".join, axis=1) + cdf['Text_Features'] = cdf[features].apply(""-"".join, axis=1) + hdf['target'] = historicaldataFrame[target] + cdf['target'] = currentdataFrame[target] + le = LabelEncoder() + le.fit(hdf['target']) + hdf['target'] = le.transform(hdf['target']) + le.fit(cdf['target']) + cdf['target'] = le.transform(cdf['target']) + hd = hdf[['Text_Features', 'target']] + cd = cdf[['Text_Features', 'target']] + column_mapping = ColumnMapping() + column_mapping.target = 'target' + column_mapping.prediction = 'target' + column_mapping.text_features = ['Text_Features'] + column_mapping.numerical_features = [] + column_mapping.categorical_features = [] + performance_report = Report(metrics=[ColumnDriftMetric('target'),TextDescriptorsDriftMetric(column_name='Text_Features')]) + performance_report.run(reference_data=hd, current_data=cd,column_mapping=column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),""log"",""My_report.html"") + performance_report.save_html(report) + return(report) + except Exception as e: + print('Error: ', e) + return('NA')"""""" + + self.predictionFile += '\\n' + self.predictionFile += 'if __name__ == ""__main__"":' + self.predictionFile += '\\n' + self.predictionFile += ' output = drift(sys.argv[1])' + filename = os.path.join(deploy_path,'aion_ipdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_drift_file(self,deploy_path,features,target,model_type): + self.predictionFile = 'import warnings' + self.predictionFile += '\\n' + self.predictionFile += 'warnings.filterwarnings(""ignore"")' + self.predictionFile += '\\n' + self.predictionFile += 'import json' + self.predictionFile += '\\n' + self.predictionFile += 'import os' + self.predictionFile += '\\n' + self.predictionFile += 'import sys' + self.predictionFile += '\\n' + self.predictionFile += 'import pandas as pd' + self.predictionFile += '\\n' + self.predictionFile += 'from monitoring import check_drift' + self.predictionFile += '\\n' + self.predictionFile += 'from pandas import json_normalize' + self.predictionFile += '\\n' + self.predictionFile += 'from script.inputprofiler import inputprofiler' + self.predictionFile += '\\n' + self.predictionFile += 'def drift(data):' + self.predictionFile += '\\n' + self.predictionFile += ' try:' + self.predictionFile += '\\n' + self.predictionFile += ' if os.path.splitext(data)[1] == "".json"":' + self.predictionFile += '\\n' + self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.load(f)' + self.predictionFile += '\\n' + self.predictionFile += ' else:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.loads(data)' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData[\\'features\\'] = \\''+"","".join([feature for feature in features])+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData[\\'target\\'] = \\''+target+'\\'' + self.predictionFile += '\\n' + if model_type.lower() != 'timeseriesforecasting': #task 11997 + self.predictionFile += ' htmlfilepath=evidently_details(jsonData)' + self.predictionFile += '\\n' + else: + self.predictionFile += ' htmlfilepath=\\'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.dumps(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' output = check_drift(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' output = json.loads(output)' + self.predictionFile += '\\n' + self.predictionFile += ' output[\\'htmlPath\\'] = str(htmlfilepath)' + self.predictionFile += '\\n' + self.predictionFile += ' output = json.dumps(output)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""drift:"",output)' + self.predictionFile += '\\n' + self.predictionFile += ' return(output)' + self.predictionFile += '\\n' + self.predictionFile += ' except KeyError as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""drift:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' except Exception as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""drift:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + if model_type.lower() != 'timeseriesforecasting': #task 11997 + self.predictionFile += 'def evidently_details(deployJson):' + self.predictionFile += '\\n' + self.predictionFile += ' features = deployJson[\\'features\\'].split(\\',\\')' + self.predictionFile += '\\n' + self.predictionFile += ' target = deployJson[\\'target\\']' + self.predictionFile += '\\n' + self.predictionFile += """"""\\ + try: + from evidently.report import Report + from evidently.metric_preset import DataDriftPreset + historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?']) + currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?']) + historicaldataFrame.columns = historicaldataFrame.columns.str.strip() + currentdataFrame.columns = currentdataFrame.columns.str.strip() + profilerobj = inputprofiler() + historicaldataFramep = profilerobj.run(historicaldataFrame) + currentdataFramep = profilerobj.run(currentdataFrame) + hdf = historicaldataFramep[features] + cdf = currentdataFramep[features] + hdf['target'] = historicaldataFrame[target] + cdf['target'] = currentdataFrame[target] + data_drift_report = Report(metrics = [DataDriftPreset()]) + data_drift_report.run(reference_data=hdf,current_data=cdf,column_mapping = None) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','my_report.html') + data_drift_report.save_html(report) + return(report) + except Exception as e: + print('Error') + return('NA')"""""" + + self.predictionFile += '\\n' + self.predictionFile += 'if __name__ == ""__main__"":' + self.predictionFile += '\\n' + self.predictionFile += ' output = drift(sys.argv[1])' + filename = os.path.join(deploy_path,'aion_ipdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_prediction_file(self,classname,deploy_path,learner_type,grouperbyjson,rowfilterexpression,model_type,datetimeFeature): + self.predictionFile = 'import warnings' + self.predictionFile += '\\n' + self.predictionFile += 'warnings.filterwarnings(""ignore"")' + self.predictionFile += '\\n' + self.predictionFile += 'import json' + self.predictionFile += '\\n' + self.predictionFile += 'import os' + self.predictionFile += '\\n' + self.predictionFile += 'import sys' + self.predictionFile += '\\n' + self.predictionFile += 'import pandas as pd' + self.predictionFile += '\\n' + self.predictionFile += 'from pandas import json_normalize' + self.predictionFile += '\\n' + + if(learner_type.lower() != 'recommendersystem'): #task 11190" +" + self.predictionFile += 'from script.selector import selector' + self.predictionFile += '\\n' + self.predictionFile += 'from script.inputprofiler import inputprofiler' + self.predictionFile += '\\n' + #self.predictionFile += 'from '+classname+' import '+classname + self.predictionFile += 'from script.trained_model import trained_model' + self.predictionFile += '\\n' + else: + self.predictionFile += 'from script.item_recommendation import collaborative_filter' + self.predictionFile += '\\n' + + self.predictionFile += 'from script.output_format import output_format' + self.predictionFile += '\\n' + if (learner_type != 'RecommenderSystem'): #task 11190 + self.predictionFile += 'profilerobj = inputprofiler()' + self.predictionFile += '\\n' + self.predictionFile += 'selectobj = selector()' + self.predictionFile += '\\n' + self.predictionFile += 'modelobj = trained_model()' + self.predictionFile += '\\n' + else: + self.predictionFile += 'colabobj = collaborative_filter()' + self.predictionFile += '\\n' + + self.predictionFile += 'outputobj = output_format()' + self.predictionFile += '\\n' + self.predictionFile += 'def predict(data):' + self.predictionFile += '\\n' + self.predictionFile += ' try:' + self.predictionFile += '\\n' + self.predictionFile += ' if os.path.splitext(data)[1] == "".tsv"":' + self.predictionFile += '\\n' + self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',sep=\\'\\\\t\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])' + self.predictionFile += '\\n' + self.predictionFile += ' elif os.path.splitext(data)[1] == "".csv"":' + self.predictionFile += '\\n' + self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])' + self.predictionFile += '\\n' + self.predictionFile += ' elif os.path.splitext(data)[1] == "".dat"":' + self.predictionFile += '\\n' + self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])' + self.predictionFile += '\\n' + self.predictionFile += ' else:' + self.predictionFile += '\\n' + self.predictionFile += ' if os.path.splitext(data)[1] == "".json"":' + self.predictionFile += '\\n' + self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.load(f)' + self.predictionFile += '\\n' + self.predictionFile += ' else:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.loads(data)' + self.predictionFile += '\\n' + self.predictionFile += ' df = json_normalize(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)' + self.predictionFile += '\\n' + if str(rowfilterexpression) != '': + self.predictionFile += ' filterexpression = ""'+rowfilterexpression+'""' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.query(filterexpression)' + self.predictionFile += '\\n' + #print(grouperbyjson) + if str(grouperbyjson) != '': + datetime = grouperbyjson['datetime'] + unit = grouperbyjson['unit'] + if unit == '': + self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'])' + self.predictionFile += '\\n' + else: + self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'],unit=\\''+unit+'\\')' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.reset_index()' + self.predictionFile += '\\n' + self.predictionFile += ' df.set_index(\\'date\\',inplace=True)' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.'+grouperbyjson['groupbystring'] + self.predictionFile += '\\n' + self.predictionFile += ' df.columns = df.columns.droplevel(0)' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.reset_index()' + self.predictionFile += '\\n' + + self.predictionFile += ' df0 = df.copy()' + self.predictionFile += '\\n' + + if(learner_type != 'RecommenderSystem'): #task 11190 + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)' + self.predictionFile += '\\n' + else: + self.predictionFile += ' df = profilerobj.apply_profiler(df)' + self.predictionFile += '\\n' + self.predictionFile += ' df = selectobj.apply_selector(df)' + self.predictionFile += '\\n' + #self.predictionFile += ' modelobj = '+classname+'()' + self.predictionFile += ' output = modelobj.predict(df,"""")' + self.predictionFile += '\\n' + else: + self.predictionFile += ' output = colabobj.predict(df)' + self.predictionFile += '\\n' + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)' + self.predictionFile += '\\n' + else: + self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",output)' + self.predictionFile += '\\n' + self.predictionFile += ' return(output)' + self.predictionFile += '\\n' + self.predictionFile += ' except KeyError as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' except Exception as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += 'if __name__ == ""__main__"":' + self.predictionFile += '\\n' + self.predictionFile += ' output = predict(sys.argv[1])' + + filename = os.path.join(deploy_path,'aion_predict.py') + f = open(filename, ""w"") + f.write(str(self.predictionFile)) + f.close() + def create_classification_text_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +# from evidently.dashboard import Dashboard +# from evidently.tabs import ClassificationPerformanceTab +from evidently.pipeline.column_mapping import ColumnMapping +from aion_predict import predict +from evidently.report import Report +from evidently.pipeline.column_mapping import ColumnMapping +from evidently.metric_preset import ClassificationPreset +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.text_features = features.split(',') + iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()]) + iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + iris_model_performance_dashboard.save_html(report) + metrics_output = iris_model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + def create_classification_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +from evidently.report import Report +from evidently.metric_preset import ClassificationPreset +from evidently.pipeline.column_mapping import ColumnMapping +from aion_predict import predict +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.numerical_features = features.split(',') + model_performance_dashboard = Report(metrics = [ClassificationPreset()]) + model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + model_performance_dashboard.save_html(report) + metrics_output = model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + else: + output = {""status"":""SUCCESS"",""htmlPath"":'NA'} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + def create_model_service(self,deploy_path,serviceName,problemType): + + filedata = """""" +from flask import Flask, jsonify, request +from flask_restful import Resource, Api +from aion_predict import predict"""""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +from aion_xai import local_analysis +from aion_ipdrift" +"import drift +from aion_opdrift import odrift"""""" + filedata += """""" +import json +import os +import pandas as pd +import io +import argparse +from pathlib import Path +from flask_cors import CORS, cross_origin +app = Flask(__name__) +#cross origin resource from system arguments +parser = argparse.ArgumentParser() +parser.add_argument('-ip', '--ipaddress', help='IP Address') +parser.add_argument('-p', '--port', help='Port Number') +parser.add_argument(""-cors"", type=str, required=False) +d = vars(parser.parse_args()) + +modelPath = Path(__file__).parent +try: + with open( (modelPath/'etc')/'display.json', 'r') as f: + disp_data = json.load(f) + is_explainable = not disp_data.get('textFeatures') +except: + disp_data = {} + is_explainable = True + +if ""cors"" in d.keys(): + if d[""cors""] != '' and d[""cors""] != None: + d[""cors""] = [s.strip() for s in d[""cors""].split("","")] + #cors = CORS(app, resources={r""/AION/*"": {""origins"": [""http://localhost"", ""http://localhost:5000""]}}) + cors = CORS(app, resources={r""/AION/*"": {""origins"": d[""cors""]}}) +api = Api(app) +class predictapi(Resource): + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + data = request.get_json() + output = predict().run(json.dumps(data)) + return jsonify(json.loads(output)) + +class predictfileapi(Resource): + def post(self): + if 'file' in request.files: + file = request.files['file'] + urlData = file.read() + rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) + data = rawData.to_json(orient='records') + output = predict().run(data) + return jsonify(json.loads(output)) + else: + displaymsg='File is mising' + return jsonify(displaymsg) + + def get(self): + msg=\\"""""" +RequestType: POST +Body:send file content in body\\"""""" + return jsonify(msg) + """""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +class explainapi(Resource): + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + data = request.get_json() + if is_explainable: + output = local_analysis(json.dumps(data)) + else: + output = json.dumps({""status"":""FAIL"",""data"":""explain api is not supported when text features are used for training""}) + return jsonify(json.loads(output)) + +class monitoringapi(Resource): + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + def post(self): + data = request.get_json() + output = drift(json.dumps(data)) + return jsonify(json.loads(output)) + +class performanceapi(Resource): + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + def post(self): + data = request.get_json() + output = odrift(json.dumps(data)) + return jsonify(json.loads(output)) + """""" + filedata += """""" +api.add_resource(predictapi, '/AION/{serviceName}/predict')"""""".format(serviceName=serviceName) + filedata += """""" +api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')"""""".format(serviceName=serviceName) + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +api.add_resource(explainapi, '/AION/{serviceName}/explain') +api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') +api.add_resource(performanceapi, '/AION/{serviceName}/performance')"""""".format(serviceName=serviceName) + filedata += """""" +if __name__ == '__main__': + args = parser.parse_args() + app.run(args.ipaddress,port = args.port,debug = True)"""""" + filename = os.path.join(deploy_path,'aion_service.py') + f = open(filename, ""wb"") + f.write(str(filedata).encode('utf8')) + f.close() + + def create_regression_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +from evidently.report import Report +from evidently.metric_preset import RegressionPreset +from evidently.pipeline.column_mapping import ColumnMapping +from aion_predict import predict +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.numerical_features = features.split(',') + iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) + iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + iris_model_performance_dashboard.save_html(report) + metrics_output = iris_model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + else: + output = {""status"":""SUCCESS"",""htmlPath"":'NA'} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_regression_text_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +from aion_predict import predict +from evidently.report import Report +from evidently.pipeline.column_mapping import ColumnMapping +from evidently.metric_preset import RegressionPreset +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.numerical_features = features.split(',') + iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) + iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + iris_model_performance_dashboard.save_html(report) + metrics_output = iris_model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + else: + output = {""status"":""SUCCESS"",""htmlPath"":'NA'} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_publish_service(self,datalocation,usecaseid,version,problemType): + filename = os.path.join(datalocation,'aion_publish_service.py') + if not os.path.exists(filename): + filedata = """""" +import sys +import json +import time +import sqlite3 +import argparse +import pandas as pd +import io +from pathlib import Path +from datetime import datetime + +filename = Path(__file__).parent/'config.json' +with open (filename, ""r"") as f: + data = json.loads(f.read()) +modelVersion = str(data['version']) +modelPath = Path(__file__).parent/modelVersion +sys.path.append(str(modelPath)) + +try: + with open( (modelPath/'etc')/'display.json', 'r') as f: + disp_data = json.load(f) + is_explainable = not disp_data.get('textFeatures') +except: + disp_data = {} + is_explainable = True + +from flask import Flask, jsonify, request +from flask_restful import Resource, Api +from flask_cors import CORS, cross_origin +from flask import Response +from aion_predict import predict +"""""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +from aion_ipdrift import drift +from aion_opdrift import odrift +if is_explainable: + from aion_xai import local_analysis +"""""" + filedata += """""" +dataPath = Path(__file__).parent/'data' +dataPath.mkdir(parents=True, exist_ok=True) +app = Flask(__name__) +#cross origin resource from system arguments +parser = argparse.ArgumentParser() +parser.add_argument('-ip', '--ipaddress', help='IP Address') +parser.add_argument('-p', '--port', help='Port Number') +parser.add_argument(""-cors"", type=str, required=False) +d = vars(parser.parse_args()) + +if ""cors"" in d.keys(): + if d[""cors""] != '' and d[""cors""] != None: + d[""cors""] = [s.strip() for s in d[""cors""].split("","")] + #cors = CORS(app, resources={r""/AION/*"": {""origins"": [""http://localhost"", ""http://localhost:5000""]}}) + cors = CORS(app, resources={r""/AION/*"": {""origins"": d[""cors""]}}) +api = Api(app) + +class sqlite_db(): + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + '.db' + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + self.tables = [] + + def table_exists(self, name): + if name in self.tables: + return True + elif name: + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + if len(listOfTables) > 0 : + self.tables.append(name) + return True + return False + + def read(self," +"table_name,condition=''): + if condition == '': + return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + else: + return pd.read_sql_query(f""SELECT * FROM {table_name} WHERE {condition}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + def update(self,table_name,updates,condition): + update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' + self.cursor.execute(update_query) + self.conn.commit() + return True + def write(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def delete(self, name): + pass + def close(self): + self.conn.close()"""""" + filedata += """""" +app = Flask(__name__) +api = Api(app) + +class predictapi(Resource): + + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('metrices'): + data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',""noOfActualCalls"":'0',""mid"":'0'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) + data = request.get_json() + output = predict().run(json.dumps(data)) + outputobj = json.loads(output) + if outputobj['status'] == 'SUCCESS': + try: + df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') + if not sqlite_dbObj.table_exists('prodData'): + sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) + sqlite_dbObj.write(df2,'prodData') + except: + pass + try: + data = sqlite_dbObj.read('metrices') + #print(data) + if len(data) == 0: + data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',""noOfActualCalls"":'0'}] + data = pd.read_json(json.dumps(data), orient ='records') + sqlite_dbObj.write(data,'metrices') + else: + noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 + sqlite_dbObj.update('metrices',""noOfPredictCalls = '""+str(noofPredictCalls)+""'"",""mid = 0"") + except Exception as e: + print(e) + pass + return jsonify(json.loads(output)) +class predictfileapi(Resource): + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('metrices'): + data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',""noOfActualCalls"":'0',""mid"":'0'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) + if 'file' in request.files: + file = request.files['file'] + urlData = file.read() + rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) + data = rawData.to_json(orient='records') + output = predict().run(data) + outputobj = json.loads(output) + if outputobj['status'] == 'SUCCESS': + try: + df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') + if not sqlite_dbObj.table_exists('prodData'): + sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) + sqlite_dbObj.write(df2,'prodData') + except: + pass + try: + data = sqlite_dbObj.read('metrices') + #print(data) + if len(data) == 0: + data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',""noOfActualCalls"":'0'}] + data = pd.read_json(json.dumps(data), orient ='records') + sqlite_dbObj.write(data,'metrices') + else: + noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 + sqlite_dbObj.update('metrices',""noOfPredictCalls = '""+str(noofPredictCalls)+""'"",""mid = 0"") + except Exception as e: + print(e) + pass + return jsonify(json.loads(output)) + else: + output = {'status':'error','msg':'File is missing'} + return jsonify(output) + """""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +class explainapi(Resource): + + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + data = request.get_json() + if is_explainable: + output = local_analysis(json.dumps(data)) + else: + output = json.dumps({""status"":""FAIL"",""data"":""explain api is not supported when text features are used for training""}) + return jsonify(json.loads(output)) + +class monitoringapi(Resource): + + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('monitoring'): + data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) + trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' + if not sqlite_dbObj.table_exists('prodData'): + return jsonify({'status':'Error','msg':'Prod data not available'}) + data = sqlite_dbObj.read('prodData') + filetimestamp = str(int(time.time())) + dataFile = dataPath/('AION_' + filetimestamp+'.csv') + data.to_csv(dataFile, index=False) + data = request.get_json() + data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} + output = drift(json.dumps(data)) + outputData = json.loads(output) + status = outputData['status'] + + if status == 'SUCCESS': + Msg = str(outputData['data']) + else: + Msg = 'Error during drift analysis' + now = datetime.now() # current date and time + date_time = now.strftime(""%m/%d/%Y, %H:%M:%S"") + data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.write(data,'monitoring') + return jsonify(json.loads(output))"""""" + filedata += """""" + +class matricesapi(Resource): + + def get(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if sqlite_dbObj.table_exists('metrices'): + df1 = sqlite_dbObj.read('metrices') + else: + df1 = pd.DataFrame() + #print(df1) + if sqlite_dbObj.table_exists('monitoring'): + df2 = sqlite_dbObj.read('monitoring') + else: + df2 = pd.DataFrame() + + msg = {'Deployed Version':str(modelVersion)} + + if df1.shape[0] > 0: + msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])}) + else: + msg.update({'noOfPredictCalls':'0'}) + driftDetails = [] + for idx in reversed(df2.index): + driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])} + driftDetails.append(driftd) + msg.update({'driftDetails':driftDetails}) + return jsonify(msg) + +class performanceapi(Resource): + + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('monitoring'): + data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) + trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' + if not sqlite_dbObj.table_exists('prodData'): + return jsonify({'status':'Error','msg':'Prod data not available'}) + data = sqlite_dbObj.read('prodData') + filetimestamp = str(int(time.time())) + dataFile = dataPath/('AION_' + filetimestamp+'.csv') + data.to_csv(dataFile, index=False) + data = request.get_json() + data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} + output = odrift(json.dumps(data)) + return jsonify(json.loads(output)) + """""" + filedata += """""" +api.add_resource(predictapi, '/AION/{serviceName}/predict') +api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file') +api.add_resource(matricesapi, '/AION/{serviceName}/metrices')"""""".format(serviceName=usecaseid) + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +api.add_resource(explainapi, '/AION/{serviceName}/explain') +api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') +api.add_resource(performanceapi, '/AION/{serviceName}/performance') +"""""".format(serviceName=usecaseid) + filedata += """""" +if __name__ == '__main__': + args = parser.parse_args() + app.run(args.ipaddress,port = args.port,debug = True)"""""" + f = open(filename, ""wb"") + f.write(str(filedata).encode('utf8')) + f.close() + data = {'version':version} + filename = os.path.join(datalocation,'config.json') + with open(filename, ""w"") as outfile: + json.dump(data, outfile) + outfile.close() ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +import platform +import json +import shutil +import logging +from pathlib import Path + +def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None): + self.selectorfile += 'import pandas as pd' + self.selectorfile += '\\n' + self.selectorfile += 'import joblib' + self.selectorfile += '\\n' + self.selectorfile += 'import os' + self.selectorfile += '\\n' + self.selectorfile += 'import numpy as np' + self.selectorfile += '\\n' + self.selectorfile += 'class selector(object):' + self.selectorfile += '\\n' + self.selectorfile += ' def apply_selector(self,df):' + self.selectorfile += '\\n' + if pcaModel_pickle_file != '': + self.selectorfile += "" pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','""+pcaModel_pickle_file+""'))"" + self.selectorfile += '\\n' + self.selectorfile += ' bpca_features = '+str(bpca_features) + self.selectorfile += '\\n' + self.selectorfile += ' apca_features = '+str(apca_features) + self.selectorfile += '\\n' + self.selectorfile += ' df = pcaModel.transform(df[bpca_features])' + self.selectorfile += '\\n' + self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)' + self.selectorfile += '\\n' + if(len(features) != 0) and model_type != 'BM25': + if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder': + self.selectorfile += ' df = df['+str(features)+']' + self.selectorfile += '\\n' + self.selectorfile += ' return(df)' + filename = os.path.join(deploy_path,'script','selector.py') + f = open(filename, ""wb"") + self.log.info('-------> Feature Selector File Location :'+filename) + f.write(str(self.selector" +"file).encode('utf8')) + f.close() + featurefile = 'import json' + featurefile +='\\n' + featurefile += 'def getfeatures():' + featurefile +='\\n' + featurefile +=' try:' + featurefile +='\\n' + featurelist = [] + if 'profiler' in config: + if 'input_features_type' in config['profiler']: + inputfeatures = config['profiler']['input_features_type'] + for x in inputfeatures: + featurelt={} + featurelt['feature'] = x + print(x,inputfeatures[x]) + if x == targetFeature: + featurelt['Type'] = 'Target' + else: + if inputfeatures[x] in ['int','int64','float','float64']: + featurelt['Type'] = 'Numeric' + elif inputfeatures[x] == 'object': + featurelt['Type'] = 'Text' + elif inputfeatures[x] == 'category': + featurelt['Type'] = 'Category' + else: + featurelt['Type'] = 'Unknown' + featurelist.append(featurelt) + + featurefile +=' features = '+str(featurelist) + featurefile +='\\n' + featurefile +=' outputjson = {""status"":""SUCCESS"",""features"":features}' + featurefile +='\\n' + featurefile +=' output = json.dumps(outputjson)' + featurefile +='\\n' + featurefile +=' print(""Features:"",output)' + featurefile +='\\n' + featurefile +=' return(output)' + featurefile +='\\n' + featurefile +=' except Exception as e:' + featurefile +='\\n' + featurefile +=' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + featurefile +='\\n' + featurefile +=' print(""Features:"",json.dumps(output))' + featurefile +='\\n' + featurefile +=' return (json.dumps(output))' + featurefile +='\\n' + featurefile +='if __name__ == ""__main__"":' + featurefile +='\\n' + featurefile +=' output = getfeatures()' + filename = os.path.join(deploy_path,'featureslist.py') + f = open(filename, ""wb"") + f.write(str(featurefile).encode('utf8')) + f.close() + +def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig): + self.modelfile += ' def __init__(self):' + self.modelfile += '\\n' + if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()==""autoencoder""): + modelfile=modelfile.replace('.sav','') + self.modelfile+="" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif(learner_type == 'TextDL' or learner_type == 'DL'): + if modelName.lower() == 'googlemodelsearch': + self.modelfile += ' import autokeras as ak' + self.modelfile += '\\n' + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))"" + self.modelfile += '\\n' + else: + if scoreParam == 'recall': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'recall': recall_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[recall_m])' + self.modelfile += '\\n' + elif scoreParam == 'precision': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'precision': precision_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[precision_m])' + self.modelfile += '\\n' + elif scoreParam == 'roc_auc': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[tf.keras.metrics.AUC()])' + self.modelfile += '\\n' + elif scoreParam == 'f1_score': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'f1_score': f1_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[f1_m])' + self.modelfile += '\\n' + elif scoreParam == 'r2': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'r2': r_square},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[r_square])' + self.modelfile += '\\n' + elif scoreParam == 'rmse': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'rmse': rmse_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[rmse_m])' + self.modelfile += '\\n' + elif scoreParam == 'mse': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif scoreParam == 'mae': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + + elif scoreParam == 'accuracy': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + else: + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif(learner_type == 'Text Similarity'): + self.modelfile += "" self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','""+preprocessing_pipe+""'))"" + self.modelfile += '\\n' + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})"" + self.modelfile += '\\n' + elif(learner_type in ['similarityIdentification','contextualSearch']): + if scoreParam == 'VectorDB Cosine': + vectorfiledbname = 'trainingdataVecDB' + self.modelfile += f""\\ + \\n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\\ + \\n client = chromadb.PersistentClient(path=persist_directory)\\ + \\n self.collection_name = '{vectorfiledbname}'\\ + \\n self.collection = client.get_collection(self.collection_name)\\n"" + + else: + self.modelfile += "" self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\\n\\n"" + elif(learner_type == 'ImageClassification'): + self.modelfile += ' self.config='+str(imageconfig) + self.modelfile += '\\n' + if(modelName.lower() == 'densenet'): + self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))' + else: + self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))' + self.modelfile += '\\n' + self.modelfile += ' headModel = baseModel.output' + self.modelfile += '\\n' + self.modelfile += ' headModel = Flatten(name=""flatten"")(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = Dense(1024, activation=\\'relu\\')(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = Dropout(0.5)(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = Dense(2, activation=\\'sigmoid\\')(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)' + self.modelfile += '\\n' + self.modelfile += ' opt = Adam(lr=self.config[\\'lr\\'])' + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=""binary_crossentropy"", optimizer=opt, metrics=[""accuracy""])' + self.modelfile += '\\n' + self.modelfile += "" self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif(learner_type == 'objectDetection'): + self.modelfile += "" self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\\n"" + self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+""/export/pipeline.config""\\n' + self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+""/export/checkpoint/""\\n' + self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+""/export/label_map.pbtxt""\\n' + self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\\n' + self.modelfile += ' self.detection_model = model_builder.build(model_config=configs[""model""], is_training=False)\\n' + self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\\n' + self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, ""ckpt-0"")).expect_partial()\\n' + self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\\ + use_display_name=True)\\n' + elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'): + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif modelName.lower() == 'neural architecture search': + self.modelfile += ' import autokeras as ak' + self.modelfile += '\\n' + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects=ak.CUSTOM_OBJECTS)"" + self.modelfile += '\\n' + else: + self.modelfile += "" self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + +def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None): + self.modelfile += ' def predict(self,X,features_names):' + self.modelfile += '\\n' + + if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()==""autoencoder""): + + self.modelfile += f"" X=X[{feature}]\\n"" + self.modelfile += f"" X = np.asarray(X).astype('float32')\\n"" + self.modelfile += f"" reconstructed = self.model.predict(X)\\n"" + self.modelfile += f"" predict_loss = tf.keras.losses.mae(reconstructed,X)\\n"" + self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\\n' + self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\\n' + self.modelfile += ' prediction_df = pd.DataFrame()\\n' + self.modelfile += ' prediction_df[""loss""] = predict_loss\\n' + self.modelfile += ' prediction_df[""max_threshold""] = max_threshold\\n" +"' + self.modelfile += ' prediction_df[""min_threshold""] = min_threshold\\n' + self.modelfile += ' prediction_df[""anomaly""] = np.where((prediction_df[""loss""] > prediction_df[""max_threshold""]) | (prediction_df[""loss""] <= prediction_df[""min_threshold""]), True, False)\\n' + self.modelfile += ' return prediction_df\\n' + + + elif(learner_type == 'RecommenderSystem'): + self.modelfile += ' predictions = []' + self.modelfile += '\\n' + self.modelfile += ' for index,row in X.iterrows():' + self.modelfile += '\\n' + self.modelfile += ' score = self.model.predict(int(row[""uid""]),int(row[""iid""]))' + self.modelfile += '\\n' + self.modelfile += ' predictions.append(score.est)' + self.modelfile += '\\n' + self.modelfile += ' return predictions' + elif(learner_type in ['similarityIdentification','contextualSearch']): + tfeatures = list(modelFeatures.split("","")) + if indexFeature != '' and indexFeature != 'NA': + ifeatures = indexFeature.split("","") + for ifes in ifeatures: + if ifes not in tfeatures: + tfeatures.append(ifes) + if model_type == 'BM25': + self.modelfile += f""\\n\\ + tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\\n\\ + bm25 = BM25Okapi(tokenized_corpus)\\n\\ + tokenized_query = [doc.split(' ') for doc in X.tokenize]\\n\\ + logcnt = 5\\n\\ + output = []\\n\\ + for query in tokenized_query:\\n\\ + doc_scores = bm25.get_scores(query)\\n\\ + related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\\n\\ + x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\n\\ + x['Score'] = doc_scores[related_docs_indices]\\n\\ + x['Score'] = round(x['Score'],2).astype(str)+'%'\\n\\ + output.append(x)\\n\\ + return output\\n"" + elif scoreParam == 'VectorDB Cosine': + featuresVecDB = modelFeatures.split("","") + self.modelfile += ' logcnt = 5\\n' + self.modelfile += f"" columns = {featuresVecDB}\\n"" + self.modelfile += f""\\ + \\n output = []\\ + \\n for rowindex, row in X.iterrows():\\ + \\n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\\ + \\n results = self.collection.query(\\ + \\n query_embeddings=queryembedding.tolist(),\\ + \\n n_results=logcnt\\ + \\n )\\ + \\n x = pd.DataFrame(columns=columns)\\ + \\n for i in range(0, len(results['ids'][0])):\\ + \\n documentAry = results['documents'][0][i]\\ + \\n documentAry = documentAry.split(' ~&~ ')\\ + \\n for j in range(0, len(documentAry)):\\ + \\n x.at[i,columns[j]] = documentAry[j]\\ + \\n x.at[i,'Score'] = results['distances'][0][i]\\ + \\n output.append(x)\\ + \\n return output"" + else: + self.modelfile += ' columns = self.train_input.columns.tolist()\\n' + self.modelfile += ' logcnt = 5\\n' + self.modelfile += f"" train_input = self.train_input[{tfeatures}]\\n"" + for tf in tfeatures: + self.modelfile += f"" columns.remove('{tf}')\\n"" + self.modelfile += f""\\ + \\n results = cosine_similarity(self.train_input[columns],X)\\ + \\n output = []\\ + \\n for i in range(results.shape[1]):\\ + \\n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\\ + \\n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\ + \\n scores = []\\ + \\n for j in range(0,logcnt):\\ + \\n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\\ + \\n x['Score'] = scores\\ + \\n output.append(x)\\ + \\n return output"" + elif(learner_type == 'Text Similarity'): + self.modelfile += ' X[""'+firstDocFeature+'""] = X[""'+firstDocFeature+'""].astype(str)' + self.modelfile += '\\n' + self.modelfile += ' X[""'+secondDocFeature+'""] = X[""'+secondDocFeature+'""].astype(str)' + self.modelfile += '\\n' + self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X[""'+firstDocFeature+'""].values)' + self.modelfile += '\\n' + self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X[""'+secondDocFeature+'""].values)' + self.modelfile += '\\n' + self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\\'post\\')' + self.modelfile += '\\n' + self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\\'post\\')' + self.modelfile += '\\n' + self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])' + self.modelfile += '\\n' + self.modelfile += ' return(prediction)' + self.modelfile += '\\n' + elif(learner_type == 'ImageClassification'): + self.modelfile += ' predictions = []' + self.modelfile += '\\n' + self.modelfile += ' for index, row in X.iterrows(): ' + self.modelfile += '\\n' + self.modelfile += ' img = cv2.imread(row[\\'imagepath\\'])' + self.modelfile += '\\n' + self.modelfile += ' img = cv2.resize(img, (self.config[\\'img_width\\'],self.config[\\'img_height\\']))' + self.modelfile += '\\n' + self.modelfile += ' img = image.img_to_array(img)' + self.modelfile += '\\n' + self.modelfile += ' img = np.expand_dims(img, axis=0)' + self.modelfile += '\\n' + self.modelfile += ' img = img/255' + self.modelfile += '\\n' + self.modelfile += ' prediction = self.model.predict(img)' + self.modelfile += '\\n' + self.modelfile += ' prediction = np.argmax(prediction,axis=1)' + self.modelfile += '\\n' + self.modelfile += ' predictions.append(prediction[0])' + self.modelfile += '\\n' + self.modelfile += ' return(predictions)' + self.modelfile += '\\n' + elif(learner_type == 'objectDetection'): + self.modelfile += ' @tf.function\\n' + self.modelfile += ' def detect_fn(image):\\n' + self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\\n' + self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\\n' + self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\\n' + self.modelfile += ' return detections\\n' + self.modelfile += ' def load_image_into_numpy_array(path):\\n' + self.modelfile += ' return np.array(Image.open(path))\\n' + self.modelfile += ' imageLocation = []\\n' + self.modelfile += ' for i, row in X.iterrows():\\n' + self.modelfile += ' if (""confidance"" in row) and row[""confidance""] <= 1.0:\\n' + self.modelfile += ' confidance = row[""confidance""]\\n' + self.modelfile += ' else:\\n' + self.modelfile += ' confidance = 0.8\\n' + self.modelfile += ' imageName = str(Path(row[""imagepath""]).stem)+""_output""+str(Path(row[""imagepath""]).suffix)\\n' + self.modelfile += ' image_np = load_image_into_numpy_array(row[""imagepath""])\\n' + self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\\n' + self.modelfile += ' detections = detect_fn(input_tensor)\\n' + self.modelfile += ' num_detections = int(detections.pop(""num_detections""))\\n' + self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\\n\\ + for key, value in detections.items()}\\n' + self.modelfile += ' detections[""num_detections""] = num_detections\\n' + self.modelfile += ' detections[""detection_classes""] = detections[""detection_classes""].astype(np.int64)\\n' + self.modelfile += ' label_id_offset = 1\\n' + self.modelfile += ' image_np_with_detections = image_np.copy()\\n' + self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\\n\\ + image_np_with_detections,\\n\\ + detections[""detection_boxes""],\\n\\ + detections[""detection_classes""]+label_id_offset,\\n\\ + detections[""detection_scores""],\\n\\ + self.category_index,\\n\\ + use_normalized_coordinates=True,\\n\\ + max_boxes_to_draw=200,\\n\\ + min_score_thresh=confidance,\\n\\ + agnostic_mode=False)\\n' + + self.modelfile += ' plt.figure()\\n' + self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\\n' + self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\\n' + self.modelfile += ' plt.show()\\n' + self.modelfile += ' return imageLocation\\n' + else: + if(learner_type == 'DL' and model != 'Neural Network'): + self.modelfile += ' X = np.expand_dims(X, axis=2)' + self.modelfile += '\\n' + if(learner_type == 'TextDL'): + self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' + self.modelfile += '\\n' + elif(learner_type == 'TextML'): + self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)' + self.modelfile += '\\n' + elif(learner_type == 'DL' and model_type == 'Classification'): + self.modelfile += ' X = X.astype(np.float32)' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' + self.modelfile += '\\n' + else: + if(model_type == 'Classification' or model_type == 'TLClassification'): + if model == 'Neural Architecture Search': + self.modelfile += ' X = X.astype(np.float32)' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(self.model.predict(X))' + self.modelfile += '\\n' + else: + if optimizationmethod == 'genetic': + self.modelfile += '\\n' + self.modelfile += ' try:' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))' + self.modelfile += '\\n' + self.modelfile += ' except:' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(self.model.predict(X))' + else: + self.modelfile += ' X = X.astype(np.float32)' + self.modelfile += '\\n' + if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': + self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(q.numpy())' + else: + self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)' + self.modelfile += '\\n' + elif model_type == 'Regression' and model == 'NAS': + self.modelfile += \\ +"""""" + X = X.astype(np.float32) + return self.model.predict(X) +"""""" + elif(learner_type == 'TS'): + if model.lower() == 'fbprophet': + self.modelfile += ' sessonal_freq=""'+str(sessonal_freq)+'""' + self.modelfile += '\\n' + self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=int(X[""noofforecasts""][0]),freq=sessonal_freq,include_history = False)' + self.modelfile += '\\n' + if (additional_regressors): + self.modelfile += '\\n' + self.modelfile += ' additional_regressors='" +"+str(additional_regressors) + self.modelfile += '\\n' + self.modelfile += ' ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]' + self.modelfile += '\\n' + " +"cing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from importlib.metadata import version +import sys + + +class importModule(): + + def __init__(self): + self.importModule = {} + self.stdlibModule = [] + self.localModule = {} + + def addLocalModule(self,module, mod_from=None, mod_as=None): + if module == '*': + if module not in self.localModule.keys(): + self.localModule[module]= [mod_from] + else: + self.localModule[module].append(mod_from) + elif module not in self.localModule.keys(): + self.localModule[module] = {'from':mod_from, 'as':mod_as} + + def addModule(self, module, mod_from=None, mod_as=None): + if module not in self.importModule.keys(): + self.importModule[module] = {'from':mod_from, 'as':mod_as} + if module in sys.stdlib_module_names: + self.stdlibModule.append(module) + elif isinstance(self.importModule[module], list): + if mod_as not in [x['as'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as not in [x['from'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as != self.importModule[module]['as']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + elif mod_from != self.importModule[module]['from']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + + def getModules(self): + return (self.importModule, self.stdlibModule) + + def getBaseModule(self, extra_importers=[]): + modules_alias = { 'sklearn':'scikit-learn', + 'genetic_selection':'sklearn-genetic', + 'google': 'google-cloud-storage', + 'azure':'azure-storage-file-datalake'} + local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} + modules = [] + require = """" + if extra_importers: + extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] + importers_module = [self.importModule] + extra_importers + for importer_module in importers_module: + for k,v in importer_module.items(): + if v['from']: + mod = v['from'].split('.')[0] + else: + mod = k + if mod in modules_alias.keys(): + mod = modules_alias[mod] + modules.append(mod) + modules = list(set(modules)) + for mod in modules: + try: + if mod in local_modules.keys(): + require += f""{local_modules[mod]}\\n"" + else: + require += f""{mod}=={version(mod)}\\n"" + except : + if mod not in sys.stdlib_module_names: + raise + return require + + def getCode(self): + def to_string(k, v): + mod = '' + if v['from']: + mod += 'from {} '.format(v['from']) + mod += 'import {}'.format(k) + if v['as']: + mod += ' as {} '.format(v['as']) + return mod + + modules = """" + local_modules = """" + std_lib_modules = """" + third_party_modules = """" + for k,v in self.importModule.items(): + if k in self.stdlibModule: + std_lib_modules = std_lib_modules + '\\n' + to_string(k, v) + elif isinstance(v, dict): + third_party_modules = third_party_modules + '\\n' + to_string(k, v) + elif isinstance(v, list): + for alias in v: + third_party_modules = third_party_modules + '\\n' + to_string(k, alias) + for k,v in self.localModule.items(): + if k != '*': + local_modules = local_modules + '\\n' + to_string(k, v) + else: + for mod_from in v: + local_modules = local_modules + '\\n' + f'from {mod_from} import {k}' + if std_lib_modules: + modules = modules + ""\\n#Standard Library modules"" + std_lib_modules + if third_party_modules: + modules = modules + ""\\n\\n#Third Party modules"" + third_party_modules + if local_modules: + modules = modules + ""\\n\\n#local modules"" + local_modules + '\\n' + return modules + + def copyCode(self, importer): + self.importModule, self.stdlibModule = importer.getModules() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +import platform +import json +import shutil +import logging +from pathlib import Path +from prediction_package import production +from prediction_package import prediction_transformation as cs + +class DeploymentManager: + def __init__(self): + self.requirementfile='' + self.modelfile='' + self.s2i_environmentfile='' + self.selectorfile='' + self.profilerfile='' + self.readmepackagename='' + self.pythonpackage='' + self.log = logging.getLogger('eion') + + def include_import_file(self,learner_type,method,scoreParam,model_type,model): + if((learner_type == 'DL') or (learner_type == 'TextDL')): + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras import backend as K' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'): + self.modelfile += 'import joblib' + self.modelfile += '\\n' + self.modelfile += 'import os' + self.modelfile += '\\n' + self.modelfile += 'import pandas as pd' + self.modelfile += '\\n' + self.modelfile += 'import numpy as np' + self.modelfile += '\\n' + self.modelfile += 'from pathlib import Path' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + self.modelfile += 'from keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'import warnings' + self.modelfile += '\\n' + self.modelfile += 'from sklearn.preprocessing import StandardScaler' + self.modelfile += '\\n' + self.modelfile += 'warnings.filterwarnings(""ignore"")' + self.modelfile += '\\n' + if(learner_type == 'ImageClassification'): + self.modelfile += 'import os' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.models import Sequential' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.preprocessing import image' + self.modelfile += '\\n' + self.modelfile += 'import numpy as np' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.layers import Input' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.models import Model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.optimizers import Adam' + self.modelfile += '\\n' + self.modelfile += 'import cv2' + self.modelfile += '\\n' + if(learner_type == 'objectDetection'): + self.modelfile += 'import os\\n' + self.modelfile += 'from object_detection.utils import label_map_util\\n' + self.modelfile += 'from object_detection.utils import config_util\\n' + self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\\n' + self.modelfile += 'from object_detection.builders import model_builder\\n' + self.modelfile += 'import tensorflow as tf\\n' + self.modelfile += 'import numpy as np\\n' + self.modelfile += 'from PIL import Image\\n' + self.modelfile += 'import matplotlib.pyplot as plt\\n' + self.modelfile += 'import pandas as pd\\n' + self.modelfile += 'from pathlib import Path\\n' + if(learner_type == 'Text Similarity'): + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras import backend as K' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + if(model == 'Neural Architecture Search'): + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras import backend as K' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + self.modelfile += 'import joblib' + self.modelfile += '\\n' + self.modelfile += 'import os' + self.modelfile += '\\n' + self.modelfile += 'import pandas as pd' + self.modelfile += '\\n' + self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\\n' + self.modelfile += 'import numpy as np\\n' + self.modelfile += 'from pathlib import Path\\n' + if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': + self.modelfile += 'from tensorflow import constant' + self.modelfile += '\\n' + self.modelfile += 'from tf_agents.trajectories import time_step' + self.modelfile += '\\n' + self.requirementfile += 'tensorflow==2.5.0' + if model.lower() == 'lstm' or model.lower() == 'mlp': + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.requirementfile += 'tensorflow==2.5.0' + if(learner_type == 'Text Similarity'): + self.modelfile += 'def cosine_distance(vests):' + self.modelfile += '\\n'; + self.modelfile += ' x, y = vests' + self.modelfile += '\\n'; + self.modelfile += ' x = K.l2_normalize(x, axis=-1)' + self.modelfile += '\\n'; + self.modelfile += ' y = K.l2_normalize(y, axis=-1)' + self.modelfile += '\\n'; + self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)' + self.modelfile += '\\n'; + self.modelfile += 'def cos_dist_output_shape(shapes):' + self.modelfile += '\\n'; + self.modelfile += ' shape1, shape2 = shapes' + self.modelfile += '\\n'; + self.modelfile += ' return (shape1[0],1)' + self.modelfile += '\\n'; + + if(learner_type == 'TextDL' or learner_type == 'DL'): + if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'): + self.modelfile += 'def recall_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())' + self.modelfile += '\\n'; + self.modelfile += ' return recall' + self.modelfile += '\\n'; + if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'): + self.modelfile += 'def precision_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' precision = true_positives / (predicted_positives + K.epsilon())' + self.modelfile += '\\n'; + self.modelfile += ' return precision' + self.modelfile += '\\n'; + if(scoreParam.lower() == 'f1_score'): + self.modelfile += 'def f1_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' precision = precision_m(y_true, y_pred)' + self.modelfile += '\\n'; + self.modelfile += ' recall = recall_m(y_true, y_pred)' + self.modelfile += '\\n';" +" + self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))' + self.modelfile += '\\n'; + if(scoreParam.lower() == 'rmse'): + self.modelfile += 'def rmse_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))' + self.modelfile += '\\n'; + if(scoreParam.lower() =='r2'): + self.modelfile += 'def r_square(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))' + self.modelfile += '\\n'; + self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))' + self.modelfile += '\\n'; + self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))' + self.modelfile += '\\n'; + if(learner_type.lower() in ['similarityidentification','contextualsearch']): + self.modelfile += 'from pathlib import Path\\n' + if model_type == 'BM25': + self.modelfile += 'from rank_bm25 import BM25Okapi\\n' + elif scoreParam == 'VectorDB Cosine': + self.modelfile += 'import chromadb\\n' + else: + self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\\n' + + self.pythonpackage += '========== Python Packags Requires =========' + self.pythonpackage += '\\n' + self.pythonpackage += 'scikit-learn' + self.pythonpackage += '\\n' + self.pythonpackage += 'scipy' + self.pythonpackage += '\\n' + self.pythonpackage += 'numpy' + self.pythonpackage += '\\n' + if((learner_type == 'DL') or (learner_type =='TextDL')): + self.modelfile += 'import numpy as np' + self.modelfile += '\\n' + self.requirementfile += 'scikit-learn==0.21.3' + self.requirementfile += '\\n' + self.requirementfile += 'scipy==1.3.3' + self.requirementfile += '\\n' + self.requirementfile += 'numpy==1.17.4' + self.requirementfile += '\\n' + + if(learner_type == 'TextML'): + self.requirementfile += 'spacy==2.2.3' + self.requirementfile += '\\n' + self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz' + self.requirementfile += '\\n' + + if(learner_type == 'DL' or learner_type == 'TextDL'): + self.requirementfile += 'keras==2.3.1' + self.requirementfile += '\\n' + self.requirementfile += 'tensorflow==2.0.0b1' + self.requirementfile += '\\n' + + if(learner_type == 'RecommenderSystem'): + self.requirementfile += 'surprise' + self.requirementfile += '\\n' + if(method == 'package'): + self.modelfile += 'import surprise' + self.modelfile += '\\n' + self.modelfile += 'import statsmodels' + self.modelfile += '\\n' + self.requirementfile += 'statsmodels==0.10.2' + self.requirementfile += '\\n' + + def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False): + self.readme='========== Files Structures ==========' + self.readme+='\\n' + self.readme+=modelfile+' ------ Trained Model' + self.readme+='\\n' + self.readme+='aion_prediction.py --> Python package entry point' + self.readme+='\\n' + if not single_file: + self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric' + self.readme+='\\n' + self.readme+='script/selector.py --> Feature Selection' + self.readme+='\\n' + self.readme+='script/trained_model.py --> Read the model file and call the prediction' + self.readme+='\\n' + self.readme+='script/output_format.py --> Output formatter file' + self.readme+='\\n' + self.readme+= self.pythonpackage + self.readme+= '========== How to call the model ==========' + self.readme+='\\n' + self.readme+= '============== From Windows Terminal ==========' + self.readme+='\\n' + if method == 'optimus_package': + self.readme += 'python aion_prediction.py filename.json' + self.readme +='\\n' + self.readme += '========== Embedded Methods ==========' + self.readme +='\\n' + self.readme += 'Function Name: predict_from_json - When input is Json Data' + self.readme +='\\n' + self.readme += 'Function Name: predict_from_file - When input is Json File' + self.readme +='\\n' + else: + callpython = 'python aion_prediction.py ""[{' + for x in features: + if(callpython != 'python prediction.py ""[{'): + callpython += ',' + callpython += '\\\\\\""'+str(x)+'\\\\\\""'+':'+'\\\\\\""'+str(x)+'_value'+'\\\\\\""' + callpython += '}]""' + self.readme += callpython + self.readme+='\\n' + self.readme+= '============== From Linux Terminal ==========' + self.readme+='\\n' + callpython = 'python aion_prediction.py \\'[{' + temp =callpython + for x in features: + if(callpython != temp): + callpython += ',' + callpython += '""'+str(x)+'""'+':'+'""'+str(x)+'_value'+'""' + callpython += '}]\\'' + self.readme += callpython + self.readme+='\\n' + self.readme+= '============== Output ==========' + self.readme+='\\n' + self.readme+= '{""status"":""SUCCESS"",""data"":[{""Data1"":""Value"",""prediction"":""Value""}]}' ## For Single Row/Record' + self.readme+='\\n' + self.readme+= '{""status"":""SUCCESS"",""data"":[{""Data1"":""Value"",""prediction"":""Value""},{""Data1"":""Value"",""prediction"":""Value""}]} ## For Multiple Row/Record' + self.readme+='\\n' + self.readme+= '{""status"":""ERROR"",""message"":""description""} ## In Case Exception or Error' + self.readme+='\\n' + #print(self.readme) + filename = os.path.join(deploy_path,'readme.txt') + self.log.info('-------> Readme File Location: '+filename) + f = open(filename, ""wb"") + f.write(str(self.readme).encode('utf8')) + f.close() + def create_class(self,classname): + #self.modelfile += 'class '+classname+'(object):' + self.modelfile += 'class trained_model(object):' + self.modelfile += '\\n' + + def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''): + profiler = deploy.get('profiler',{}) + if isinstance(features, str): + features = features.split(',') + code = f"""""" +import scipy +import joblib +import numpy as np +import pandas as pd +from pathlib import Path +"""""" + if text_feature: + code += """""" +import importlib.util\\n"""""" + + if wordToNumericFeatures: + code += """""" +from word2number import w2n + +def s2n(value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan +"""""" + if 'code' in deploy.get('preprocess',{}).keys(): + code += deploy['preprocess']['code'] + + if profiler.get('conversion_method','').lower() == 'glove': + code += """""" +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + from text.Embedding import load_pretrained + from text import TextProcessing + model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') + embed_size, loaded_model = load_pretrained(model_path) + self.model.set_params(text_process__vectorizer__external_model = loaded_model) + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + elif profiler.get('conversion_method','').lower() == 'fasttext': + code += """""" +def get_pretrained_model_path(): + try: + from AION.appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + except: + modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' + + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath + +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + if not importlib.util.find_spec('fasttext'): + raise ValueError('fastText not installed') + else: + import os + import fasttext + import fasttext.util + cwd = os.getcwd() + os.chdir(get_pretrained_model_path()) + fasttext.util.download_model('en', if_exists='ignore') + loaded_model = fasttext.load_model('cc.en.300.bin') + os.chdir(cwd) + self.model.set_params(text_process__vectorizer__external_model = loaded_model) + self.model.set_params(text_process__vectorizer__external_model_type = 'binary') + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + else: + code += """""" +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + if 'code' in deploy.get('preprocess',{}).keys(): + code += "" df = preprocess( df)\\n"" + if wordToNumericFeatures: + code += f"""""" + df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))"""""" + if profiler.get('unpreprocessed_columns'): + code += f"""""" + unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}'] + df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True) + """""" + if profiler.get('force_numeric_conv'): + code += f"""""" + df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce') + """""" + code += f"""""" + if self.model: + df = self.model.transform(df)"""""" + code += f"""""" + columns = {output_columns} + if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns=columns) + else: + df = pd.DataFrame(df, columns=columns) + """""" + ##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + pass + else: + if profiler.get('unpreprocessed_columns'): + code += f"""""" + df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data + """""" + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + ##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now. + # code += f"""""" + # df.set_index('{datetimeFeature}', inplace=True)"""""" + code += f"""""" + return(df,'{datetimeFeature}')\\n"""""" + else: + code += f"""""" + return(df)"""""" + return code + + + def no_profiling_code(self, features): + if isinstance(features, str): + features = features.split(',') + return f"""""" +import pandas as pd +import numpy as np + +class inputprofiler(object): + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + return df[{features}] + """""" + + def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None,datetimeFeature=''): + filename = str(Path(deploy_path)/'script'/'inputprofiler.py') + if 'profiler' in config: + if model_type == 'BM25': + code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features']) + elif model == 'KaplanMeierFitter': + code = self.no_profiling_code(features) + elif model.lower() in ['arima', 'fbprophet']: #task 12627 + code = self.no_profiling_code('nooff" +"orecasts') + else: + code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature) + if code: + with open(filename,'w',encoding=""utf-8"") as f: + f.write(code) + self.log.info('-------> Profiler File Location :'+filename) + return + self.profilerfile += 'import pandas as pd' + self.profilerfile += '\\n' + self.profilerfile += 'import joblib' + self.profilerfile += '\\n' + self.profilerfile += 'import os' + self.profilerfile += '\\n' + self.profilerfile += 'from word2number import w2n' + self.profilerfile += '\\n' + + self.profilerfile += 'import numpy as np' + self.profilerfile += '\\nfrom pathlib import Path\\n' + #print(""1"") + #print(profiler) + if(learner_type == 'Text Similarity' or len(text_features) > 0): + self.profilerfile += 'from text import TextProcessing' + self.profilerfile += '\\n' + self.profilerfile += 'def textCleaning(textCorpus):' + self.profilerfile += '\\n' + self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()' + self.profilerfile += '\\n' + self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)' + self.profilerfile += '\\n' + self.profilerfile += ' return(textCorpus)' + self.profilerfile += '\\n' + + self.profilerfile += 'class inputprofiler(object):' + self.profilerfile += '\\n' + self.profilerfile += ' def s2n(self,value):' + self.profilerfile += '\\n' + self.profilerfile += ' try:' + self.profilerfile += '\\n' + self.profilerfile += ' x=eval(value)' + self.profilerfile += '\\n' + self.profilerfile += ' return x' + self.profilerfile += '\\n' + self.profilerfile += ' except:' + self.profilerfile += '\\n' + self.profilerfile += ' try:' + self.profilerfile += '\\n' + self.profilerfile += ' return w2n.word_to_num(value)' + self.profilerfile += '\\n' + self.profilerfile += ' except:' + self.profilerfile += '\\n' + self.profilerfile += ' return np.nan ' + self.profilerfile += '\\n' + self.profilerfile += ' def apply_profiler(self,df):' + self.profilerfile += '\\n' + if(len(wordToNumericFeatures) > 0): + for w2nFeature in wordToNumericFeatures: + if w2nFeature not in features: + continue + self.profilerfile += "" df['""+w2nFeature+""']=df['""+w2nFeature+""'].apply(lambda x: self.s2n(x))"" + self.profilerfile += '\\n' + self.profilerfile += "" df = df.replace(r'^\\s*$', np.NaN, regex=True)"" + self.profilerfile += '\\n' + self.profilerfile += ' try:' + self.profilerfile += '\\n' + self.profilerfile += ' df.dropna(how=""all"",axis=1,inplace=True)' + self.profilerfile += '\\n' + self.profilerfile += ' except:' + self.profilerfile += '\\n' + self.profilerfile += ' df.fillna(0)' + self.profilerfile += '\\n' + + if model_type.lower() != 'timeseriesforecasting': #task 11997 + self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/""model""/""preprocess_pipe.pkl""\\n' + self.profilerfile += ' if preprocess_path.exists():\\n' + self.profilerfile += ' model = joblib.load(preprocess_path)\\n' + if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder': + self.profilerfile += f"" df[{features}] = model.transform(df[{features}])\\n"" + else: + self.profilerfile += f"" df = model.transform(df)\\n"" + if 'operation' in profiler: + y = profiler['operation'] + for action in y: + feature = action['feature'] + #if feature not in features: + # continue + operation = action['Action'] + if(operation == 'Drop'): + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + self.profilerfile += "" df.drop(columns=['""+feature+""'],inplace = True)"" + self.profilerfile += '\\n' + if(operation == 'FillValue'): + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + fvalue = action['value'] + self.profilerfile += "" df['""+feature+""'] = df['""+feature+""'].fillna(value='""+fvalue+""')"" + self.profilerfile += '\\n' + if(operation == 'Encoder'): + value = action['value'] + value = value.replace(""\\n"", ""\\\\n"") + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + self.profilerfile += "" le_dict=""+str(value) + self.profilerfile += '\\n' + self.profilerfile += "" df['""+feature+""'] = df['""+feature+""'].apply(lambda x: le_dict.get(x,-1))"" + self.profilerfile += '\\n' + self.profilerfile += "" if -1 in df['""+feature+""'].values:"" + self.profilerfile += '\\n' + self.profilerfile += "" raise Exception('Category value of ""+feature+"" not present in training data')"" + self.profilerfile += '\\n' + if 'conversion' in profiler: + catergoryConverton = profiler['conversion'] + #print(catergoryConverton) + if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton): + self.profilerfile += "" encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))"" + self.profilerfile += '\\n' + self.profilerfile += "" CategoryFeatures = ""+str(catergoryConverton['features']) + self.profilerfile += '\\n' + if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding': + self.profilerfile += "" transformed_data = encoder.transform(df[CategoryFeatures]).toarray()"" + self.profilerfile += '\\n' + self.profilerfile += "" feature_labels = encoder.get_feature_names(CategoryFeatures)"" + self.profilerfile += '\\n' + self.profilerfile += "" transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) "" + self.profilerfile += '\\n' + else: + self.profilerfile += "" transformed_data = encoder.transform(df[CategoryFeatures])"" + self.profilerfile += '\\n' + self.profilerfile += "" dataColumns=list(df.columns)"" + self.profilerfile += '\\n' + self.profilerfile += "" nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))"" + self.profilerfile += '\\n' + self.profilerfile += "" dataArray=df[nonNormFeatures]"" + self.profilerfile += '\\n' + self.profilerfile += "" df = pd.concat([dataArray, transformed_data],axis=1)"" + self.profilerfile += '\\n' + y = json.loads(numericToLabel_json) + + for feature_details in y: + feature = feature_details['feature'] + if feature not in features: + continue + label = feature_details['Labels'] + bins = feature_details['Bins'] + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + self.profilerfile += "" cut_bins=""+str(bins) + self.profilerfile += '\\n' + self.profilerfile += "" cut_labels=""+str(label) + self.profilerfile += '\\n' + self.profilerfile += "" df['""+feature+""'] = pd.cut(df['""+feature+""'],bins=cut_bins,labels=cut_labels)"" + self.profilerfile += '\\n' + self.profilerfile += "" df['""+feature+""'] = df['""+feature+""'].fillna(value=0)"" + self.profilerfile += '\\n' + + if(len(text_features) > 0): + if(len(text_features) > 1): + self.profilerfile += ' merge_features = '+str(text_features) + self.profilerfile += '\\n' + self.profilerfile += ' df[\\'combined\\'] = df[merge_features].apply(lambda row: \\' \\'.join(row.values.astype(str)), axis=1)' + self.profilerfile += '\\n' + self.profilerfile += ' features = [\\'combined\\']' + self.profilerfile += '\\n' + else: + self.profilerfile += "" features = ""+str(text_features) + self.profilerfile += '\\n' + if model_type == 'BM25': + self.profilerfile += """"""\\ + df_text = df[features[0]] + pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) + df['tokenize'] = pipe.transform(df_text)\\n"""""".format(preprocessing_pipe=preprocessing_pipe) + elif conversion_method == 'sentenceTransformer': + self.profilerfile += """"""\\ + df_text = df[features[0]] + from sentence_transformers import SentenceTransformer + model = SentenceTransformer(\\'sentence-transformers/msmarco-distilroberta-base-v2\\') + df_vect = model.encode(df_text) + for empCol in {text_features}: + df = df.drop(columns=[empCol]) + if isinstance(df_vect, np.ndarray): + df1 = pd.DataFrame(df_vect) + else: + df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names()) + df1 = df1.add_suffix(\\'_vect\\') + df = pd.concat([df, df1],axis=1)\\n"""""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) + else: + self.profilerfile += """"""\\ + df_text = df[features[0]] + pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) + df_vect=pipe.transform(df_text) + for empCol in {text_features}: + df = df.drop(columns=[empCol]) + if isinstance(df_vect, np.ndarray): + df1 = pd.DataFrame(df_vect) + else: + df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names()) + df1 = df1.add_suffix(\\'_vect\\') + df = pd.concat([df, df1],axis=1)\\n"""""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) + + if(learner_type == 'Text Similarity'): + self.profilerfile += ' df[\\''+firstDocFeature+'\\'] = textCleaning(df[\\''+firstDocFeature+'\\'])' + self.profilerfile += '\\n' + self.profilerfile += ' df[\\''+secondDocFeature+'\\'] = textCleaning(df[\\''+secondDocFeature+'\\'])' + self.profilerfile += '\\n' + if len(normFeatures) > 0 and normalizer != '': + self.profilerfile += "" normFeatures = ""+str(normFeatures) + self.profilerfile += '\\n' + self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\\'..\\',\\'model\\',\\''+normalizer+'\\'))' + self.profilerfile += '\\n' + self.profilerfile += ' dataColumns=list(df.columns)' + self.profilerfile += '\\n' + self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))' + self.profilerfile += '\\n' + self.profilerfile += ' dataframe=df[normFeatures]' + self.profilerfile += '\\n' + self.profilerfile += ' transDf = normalizepipe.transform(dataframe)' + self.profilerfile += '\\n' + self.profilerfile += ' nontransDF=df[nonNormFeatures].values' + self.profilerfile += '\\n' + self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures' + self.profilerfile += '\\n' + self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)' + self.profilerfile += '\\n' + self.profilerfile += ' df=scaledDf' + self.profilerfile += '\\n' + else: + self.profilerfile += ' df=df.dropna()\\n' + self.profilerfile += ' return(df)' + filename = os.path.join(deploy_path,'script','inputprofiler.py') + self.log.info('-------> Profiler File Location :'+filename) + f = open(filename, ""w"",encoding=""utf-8"") + f.write(str(self.profilerfile)) + f.close() + + def isEnglish(self, s): + try: + s.encode(encoding='utf-8').decode('ascii') + except UnicodeDecodeError: + return False + else: + return True + + def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config" +"=None): + cs.create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config) + + def create_init_function_for_regress" +".crate_readme_file(deploy_path,saved_model,features,deployJson['method']) + from prediction_package.requirements import requirementfile + requirementfile(deploy_path,model,textFeatures,learner_type) + os.chdir(deploy_path) + textdata = False + if(learner_type == 'Text Similarity' or len(textFeatures) > 0): + textdata = True + self.create_util_folder(deploy_path,learner_type) + self.log.info('Status:- |... Model deployment completed') + + + def deployTSum(self,deploy_path,preTrainedModellocation): + def create_predict(preTrainedModellocation): + + text = f"""""" +import sys +import json +def predict(data): + try: + import pandas as pd + import numpy as np + from pathlib import Path + keywordsFile =Path(__file__).parent/'data'/'keywordDataBase.csv' + outputSumFile =Path(__file__).parent/'data'/'summarizedOutput.csv' + fileName=data + #print(""fileName---"",fileName) + inputDataFileFrame = pd.DataFrame() + inputDataFileFrame['Sentences']="""" + rowIndex=0 + if fileName.endswith("".pdf""): + from pypdf import PdfReader + reader = PdfReader(fileName) + number_of_pages = len(reader.pages) + text="""" + textOutputForFile="""" + OrgTextOutputForFile="""" + for i in range(number_of_pages) : + page = reader.pages[i] + text1 = page.extract_text() + text=text+text1 + import nltk + tokens = nltk.sent_tokenize(text) + for sentence in tokens: + sentence=sentence.replace(""\\\\n"", "" "") + if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) : + continue + inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip()) + rowIndex=rowIndex+1 + if fileName.endswith("".txt""): + data=[] + with open(fileName, ""r"",encoding=""utf-8"") as f: + data.append(f.read()) + str1 = """" + for ele in data: + str1 += ele + sentences=str1.split(""."") + count=0 + for sentence in sentences: + count += 1 + inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip()) + rowIndex=rowIndex+1 + inputDataFileFrame['LabelByKw']=0 + #print(inputDataFileFrame) + keywordsFileFrame=pd.read_csv(keywordsFile,encoding='utf-8') + Keyword_list = keywordsFileFrame['Keyword'].tolist() + for i in inputDataFileFrame.index: + for x in Keyword_list: + if (str(inputDataFileFrame[""Sentences""][i])).lower().find(x) != -1: + inputDataFileFrame['LabelByKw'][i]=1 + break + import pickle + from sklearn.preprocessing import LabelEncoder + pkl_filename='classificationModel.sav' + pkl_filename =Path(__file__).parent/'model'/'classificationModel.sav' + with open(pkl_filename, 'rb') as file: + pickle_model = pickle.load(file) + testsample=inputDataFileFrame[[""Sentences""]] + labelencoder = LabelEncoder() + testsample[""Sentences""] = labelencoder.fit_transform(testsample[""Sentences""]) + y_predicted = pickle_model.predict_proba(testsample) + + df=pd.DataFrame({{""SectionName"":np.nan,""Sentences"":np.nan, ""Predicted_Prob"":y_predicted[:,1]}}) + df['LabelByModel']=df['Predicted_Prob'].apply(lambda x: 0 if x <= 0.5 else 1 ) + inputDataFileFrame['LabelByModel']= df['LabelByModel'] + textToSum="""" + for i in inputDataFileFrame.index: + if (inputDataFileFrame['LabelByModel'][i] or inputDataFileFrame['LabelByKw'][i]) : + textToSum=textToSum+"" ""+inputDataFileFrame[""Sentences""][i] + stdir=r""{preTrainedModellocation}"" + stdir = stdir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\') + from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + modelbert = AutoModelForSeq2SeqLM.from_pretrained(stdir,local_files_only=True) + tokenizer = AutoTokenizer.from_pretrained(stdir,local_files_only=True) + inputs = tokenizer(""summarize: "" + textToSum, return_tensors=""pt"", max_length=512, truncation=True) + outputs = modelbert.generate(inputs[""input_ids""], max_length=512, min_length=140, length_penalty=2.0, num_beams=4, early_stopping=True) + summarizedOutputOfSection= tokenizer.decode(outputs[0]) + summarizedOutputOfSection=summarizedOutputOfSection.replace("" "","""") + summarizedOutputOfSection=summarizedOutputOfSection.replace("" "","""") + sumDatadata = [summarizedOutputOfSection] + df = pd.DataFrame(sumDatadata, columns=['textSum']) + df.to_csv(outputSumFile,encoding='utf-8') + outputjson = {{""status"":""SUCCESS"",""msg"":""Press Download button to download summarized output"",""data"":summarizedOutputOfSection}} + print(""predictions:"",json.dumps(outputjson)) + return (json.dumps(outputjson)) + except KeyError as e: + output = {{""status"":""FAIL"",""message"":str(e).strip('""')}} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + output = {{""status"":""FAIL"",""message"":str(e).strip('""')}} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = predict(sys.argv[1]) + """""" + return text + deploy_path = Path(deploy_path) + aion_prediction = deploy_path/'aion_predict.py' + + with open(aion_prediction, 'w') as f: + f.write(create_predict(preTrainedModellocation)) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from pathlib import Path +from AION.prediction_package.imports import importModule +from AION.prediction_package.aion_prediction import aionPrediction +from AION.prediction_package.utility import TAB_CHAR +from AION.prediction_package import utility +from AION.prediction_package import common +from AION.prediction_package.base import deployer + + +def is_supported(problem_type, algo=None): + """""" + Return True if problem_type supported otherwise False + """""" + supported = ['classification','regression','clustering','timeseriesforecasting','Text Similarity'] + return problem_type in supported + +def get_deployer(problem_type, algo=None, params={}): + """""" + Return deployer class object based on problem type + Raise error if no class is associated with problem type + """""" + params['problem_type'] = problem_type + if problem_type == 'classification': + return classification( params) + elif problem_type == 'regression': + return regression( params) + elif problem_type == 'clustering': + return clustering( params) + elif problem_type == 'timeseriesforecasting': + from AION.prediction_package.time_series import forecasting + return forecasting.get_deployer( params) + elif problem_type == 'Text Similarity': + return textSimilarity( params) + else: + raise ValueError('deployment is not supported') + +class classification( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.feature_reducer = False + if not self.name: + self.name = 'classification' + + def create_idrift(self): + obj = aionPrediction() + if self.params['features']['text_feat']: + obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) + else: + obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) + + def create_odrift(self): + obj = aionPrediction() + if self.params['features']['text_feat']: + obj.create_classification_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat']) + else: + obj.create_classification_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) + + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + code = f"""""" +class trainer(): +"""""" + init_code, run_code = self._get_train_code() + return code + init_code + run_code + + def _get_train_code(self): + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}')"""""" + + run_code = f"""""" + def run(self, df):\\ +"""""" + if self.params['training']['algo'] in ['Neural Network']: + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code += f"""""" + self.model = load_model(model_file) +"""""" + run_code += """""" + df = df.astype(np.float32) + return pd.DataFrame(np.argmax(self.model.predict(df),axis=1)) +"""""" + elif self.params['training']['algo'] in ['Neural Architecture Search']: + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + self.importer.addModule(module='autokeras',mod_as='ak') + init_code += f"""""" + self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) +"""""" + run_code += """""" + df = df.astype(np.float32) + return pd.DataFrame(self.model.predict(df)) +"""""" + elif self.params['training']['algo'] in ['Deep Q Network','Dueling Deep Q Network']: + self.importer.addModule('joblib') + self.importer.addModule(module='numpy',mod_as='np') + self.importer.addModule(module='constant',mod_from='tensorflow') + self.importer.addModule(module='time_step',mod_from='tf_agents.trajectories') + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + df = df.astype(np.float32) + q, _ = self.model(np.array(df), step_type=constant([time_step.StepType.FIRST] * np.array(df).shape[0]), training=False) + return pd.DataFrame(q.numpy()) +"""""" + elif self.params['training']['algo'] in ['Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: + self.importer.addModule(module='numpy',mod_as='np') + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code += f"""""" + self.model = load_model(model_file) +"""""" + run_code += """""" + df = np.expand_dims(df, axis=2) + df = df.astype(np.float32) + return pd.DataFrame(np.argmax(self.model.predict(df),axis=1)) +"""""" + else: + self.importer.addModule(module='joblib') + self.importer.addModule(module='numpy',mod_as='np') + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + df = df.astype(np.float32) + return pd.DataFrame(self.model.predict_proba(df), columns=self.model.classes_) + """""" + return init_code, run_code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('joblib') + self.importer.addModule('pandas', mod_as='pd') + return """""" +class output_format(): + + def __init__(self): + pass + + def run(self, raw_df, output): + output = round(output,2) + encoder_file = (Path(__file__).parent/""model"")/""label_encoder.pkl"" + if encoder_file.exists(): + encoder = joblib.load(encoder_file) + output.rename(columns=dict(zip(output.columns, encoder.inverse_transform(list(output.columns)))), inplace=True) + raw_df['prediction'] = output.idxmax(axis=1) + raw_df['probability'] = output.max(axis=1).round(2) + raw_df['remarks'] = output.apply(lambda x: x.to_json(double_precision=2), axis=1) + outputjson = raw_df.to_json(orient='records',double_precision=5) + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + """""" + +class regression( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.feature_reducer = False + if not self.name: + self.name = 'regression' + + def create_idrift(self): + obj = aionPrediction() + if self.params['features']['text_feat']: + obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) + else: + obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) + + def create_odrift(self): + obj = aionPrediction() + if self.params['features']['text_feat']: + obj.create_regression_text_performance_file(self.deploy_path,self.params['features" +"']['text_feat'],self.params['features']['target_feat']) + else: + obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) + + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + code = f"""""" +class trainer(): +"""""" + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') +"""""" + run_code = f"""""" + def run(self, df):\\ +"""""" + if self.params['training']['algo'] in ['Neural Architecture Search']: + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + self.importer.addModule(module='autokeras',mod_as='ak') + init_code += f"""""" + self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) +"""""" + run_code += """""" + df = df.astype(np.float32) + return self.model.predict(df).reshape(1, -1) +"""""" + elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: + self.importer.addModule(module='numpy',mod_as='np') + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code += f"""""" + self.model = load_model(model_file) +"""""" + run_code += """""" + df = np.expand_dims(df, axis=2) + df = df.astype(np.float32) + return self.model.predict(df).reshape(1, -1) +"""""" + else: + self.importer.addModule('joblib') + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + df = df.astype(np.float32) + return self.model.predict(df).reshape(1, -1) + """""" + return code + init_code + run_code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" +class output_format(): + + def __init__(self): + pass + + def run(self, raw_df, output): + raw_df['prediction'] = output[0] + raw_df['prediction'] = raw_df['prediction'].round(2) + outputjson = raw_df.to_json(orient='records',double_precision=5) + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + """""" + +class clustering( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.feature_reducer = False + if not self.name: + self.name = 'clustering' + + def training_code( self): + self.importer.addModule('joblib') + self.importer.addModule(module='pandas',mod_as='pd') + code = f"""""" +class trainer(): +"""""" + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') +"""""" + run_code = f"""""" + def run(self, df):\\ +"""""" + if self.params['training']['algo'] == 'DBSCAN': + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + return self.model.fit_predict(df) +"""""" + else: + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + return self.model.predict(df).reshape(1, -1) + """""" + return code + init_code + run_code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" +class output_format(): + + def __init__(self): + pass + + def run(self, raw_df, output): + raw_df['prediction'] = output[0] + raw_df['prediction'] = raw_df['prediction'].round(2) + outputjson = raw_df.to_json(orient='records',double_precision=2) + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + """""" + return code + +if __name__ == '__main__': + config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_ve" +"ct', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', '" +"fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect'," +"'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'rem" +"ember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_" +"vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect'," +"'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect'," +"'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect'," +"'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect'," +"'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_" +"vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}} + deployer = get_deployer('classification',params=config) + deployer.run( ) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import shutil +import subprocess +from os.path import expanduser +import platform +deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target') +modelname='AION_12' +version='1' +def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata): + modelPath = os.path.join(deploymentfolder) + filename = os.path.join(deploymentfolder,'docker_image') + modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py') + shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh') + aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl') + drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl') + sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages') + model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version) + docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version) + model_sitepackage = os.path.join(model_dockerSetup,'site-packages') + model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py') + model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh') + model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl') + model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl') + + try: + os.mkdir(model_dockerSetup) + except Exception as e: + print(""Error in creating Setup directpry ""+str(e)) + pass + shutil.copytree(modelPath, docker_setup) + if textdata: + shutil.copytree(sitepackage, model_sitepackage) + modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing') + ''' + try: + os.makedirs(modelpretrainpath, exist_ok=True) + except Exception as e: + print(""Error in creating Setup directpry ""+str(e)) + pass + ''' + home = expanduser(""~"") + if platform.system() == 'Windows': + hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing') + else: + hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing') + shutil.copytree(hostpretrainpath, modelpretrainpath) + + shutil.copyfile(modelservice, model_dockerSetupservicefile) + shutil.copyfile(shellscript, model_dockershellscript) + shutil.copyfile(aix, model_aix) + shutil.copyfile(drift,model_drift) + try: + os.mkdir(filename) + except: + pass + requirementfilename = os.path.join(model_dockerSetup,'requirements.txt') + installfilename = os.path.join(model_dockerSetup,'install.py') + dockerfile = os.path.join(model_dockerSetup,'Dockerfile') + dockerdata='FROM python:3.8-slim-buster' + dockerdata+='\\n' + if textdata: + dockerdata+='WORKDIR /root' + dockerdata+='\\n' + dockerdata+='COPY HCLT HCLT' + dockerdata+='\\n' + dockerdata+='WORKDIR /app' + dockerdata+='\\n' + dockerdata+='COPY requirements.txt requirements.txt' + dockerdata+='\\n' + dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version + dockerdata+='\\n' + if textdata: + dockerdata+='COPY site-packages site-packages' + dockerdata+='\\n' + dockerdata+='COPY install.py install.py' + dockerdata+='\\n' + dockerdata+='COPY run_modelService.py run_modelService.py' + dockerdata+='\\n' + dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl' + dockerdata+='\\n' + dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl' + dockerdata+='\\n' + dockerdata+='COPY start_" +"modelservice.sh start_modelservice.sh' + dockerdata+='\\n' + if textdata: + dockerdata+='''RUN apt-get update \\ + && apt-get install -y build-essential manpages-dev \\ + && python -m pip install --no-cache-dir --upgrade pip \\ + && python -m pip install --no-cache-dir pandas==1.2.4 \\ + && python -m pip install --no-cache-dir numpy==1.19.5 \\ + && python -m pip install --no-cache-dir joblib==1.0.1 \\ + && python -m pip install --no-cache-dir Cython==0.29.23 \\ + && mv site-packages/* /usr/local/lib/python3.8/site-packages \\ + && python -m pip install --no-cache-dir scipy==1.6.3 \\ + && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir scikit-learn==0.24.2 \\ + && python -m pip install --no-cache-dir spacy==2.2.3 \\ + && python -m pip install --no-cache-dir nltk==3.6.2 \\ + && python -m pip install --no-cache-dir textblob==0.15.3 \\ + && python -m pip install --no-cache-dir gensim==3.8.3 \\ + && python -m pip install --no-cache-dir demoji==1.1.0 \\ + && python -m pip install --no-cache-dir lxml==4.6.3 \\ + && python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \\ + && python -m pip install --no-cache-dir Unidecode==1.2.0 \\ + && python -m pip install --no-cache-dir pyspellchecker==0.6.2 \\ + && python -m pip install --no-cache-dir pycontractions==2.0.1 \\ + && python -m pip install --no-cache-dir tensorflow==2.4.1 \\ + && python -m pip install --no-cache-dir nltk==3.6.2 \\ + && python -m pip install --no-cache-dir -r requirements.txt \\ + && python install.py \\ + && chmod +x start_modelservice.sh +ENTRYPOINT [""./start_modelservice.sh""] +''' + else: + dockerdata+='''RUN apt-get update \\ + && apt-get install -y build-essential manpages-dev \\ + && python -m pip install --no-cache-dir --upgrade pip \\ + && python -m pip install --no-cache-dir pandas==1.2.4 \\ + && python -m pip install --no-cache-dir numpy==1.19.5 \\ + && python -m pip install --no-cache-dir joblib==1.0.1 \\ + && python -m pip install --no-cache-dir Cython==0.29.23 \\ + && python -m pip install --no-cache-dir scipy==1.6.3 \\ + && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir scikit-learn==0.24.2 \\ + && python -m pip install --no-cache-dir -r requirements.txt \\ + && chmod +x start_modelservice.sh +ENTRYPOINT [""./start_modelservice.sh""] +''' + f = open(dockerfile, ""w"") + f.write(str(dockerdata)) + f.close() + requirementdata='' + requirementdata+='word2number==1.1' + if learner_type == 'DL': + requirementdata+='\\n' + requirementdata+='tensorflow==2.5.0' + f = open(requirementfilename, ""w"") + f.write(str(requirementdata)) + f.close() + if textdata: + installfile=''' +import nltk +import ssl +try: + _create_unverified_https_context = ssl._create_unverified_context +except AttributeError: + pass +else: + ssl._create_default_https_context = _create_unverified_https_context +nltk.download('punkt') +nltk.download('wordnet') +nltk.download('stopwords') +nltk.download('averaged_perceptron_tagger')''' + f = open(installfilename, ""w"") + f.write(str(installfile)) + f.close() + try: + command = 'docker pull python:3.8-slim-buster' + os.system(command); + #subprocess.check_call([""chmod"", ""+x"", ""start_modelservice.sh""], cwd=model_dockerSetup) + subprocess.check_call([""docker"", ""build"", ""-t"",modelname.lower()+"":""+version,"".""], cwd=model_dockerSetup) + subprocess.check_call([""docker"", ""save"", ""-o"",modelname.lower()+""_""+version+"".tar"",modelname.lower()+"":""+version], cwd=model_dockerSetup) + dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+""_""+version+"".tar"") + shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+""_""+version+"".tar"")) + shutil.rmtree(model_dockerSetup) + return 'Success','SUCCESSFULLY' + except Exception as e: + print(""Error: ""+str(e)) + shutil.rmtree(model_dockerSetup) + return 'Error',str(e) + +#createDockerImage(deploymentfolder,modelname,version) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import subprocess +import os +import glob +import sys +import python_minifier +def encrypt_files(path): + cwd = os.getcwd() + secure_path = os.path.join(path,'SecuredScripts') + try: + if not os.path.exists(secure_path): + os.mkdir(secure_path) + files = [f for f in glob.glob(path + ""/*.py"")] + for file in files: + #encrypted_file_details[0] = file + #file = files[0] + #print(file) + #filename_w_dir = os.path.splitext(file) + filename_w_ext = os.path.basename(file) + filename, file_extension = os.path.splitext(filename_w_ext) + file_folder_path = os.path.join(secure_path,filename) + #print(file_folder_path) + + if not os.path.exists(file_folder_path): + os.mkdir(file_folder_path) + + # Minify python source code + minify_file = os.path.join(file_folder_path,filename+'_minify.py') + pythonfolder,_ = os.path.split(sys.executable) + pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe') + minify_command = ""\\""""+sys.executable+""\\"" \\""""+pyminify_script+ ""\\"" \\"""" + file + ""\\"" > \\"""" + minify_file+""\\"""" + subprocess.call(minify_command, shell=True) + # Change directory to folder path + os.chdir(file_folder_path) + + # Obfuscate minified file + pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe') + obfusc_commmand = ""\\""""+sys.executable+""\\"" \\""""+pyarmor_script+""\\"" obfuscate \\"""" + minify_file+""\\"""" + #print(obfusc_commmand) + subprocess.call(obfusc_commmand, shell=True) + + # Change directory to dist path + obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py') + #print(obfusc_file) + chdirpath = os.path.join(file_folder_path,'dist') + os.chdir(chdirpath) + + # Compress obfuscated file + compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py') + #print(compressed_file) + pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe') + compress_command = ""\\""""+sys.executable+""\\"" \\""""+pyminifier_script+""\\"" --gzip -o \\"""" +compressed_file + ""\\"" \\"""" + obfusc_file+""\\"""" + #print(compress_command) + subprocess.call(compress_command, shell=True) + + #compile_command = sys.executable+'-m py_compile ""' + compressed_file+'""' + #print(compile_command) + #subprocess.call(compile_command , shell=True) + #encrypted_file_details['compiled_file'] = file + #compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc') + #encrypted_file_details[1] = compiled_file + #encrypted_file_list.append(encrypted_file_details) + #encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc' + #encrypt_command = ""python "" + cwd + ""\\\\Encrypt_Key_Dcrypt.py "" + compiled_file + ' ' + encrypted_file + "" --g -e"" + #print(encrypt_command) + #subprocess.call(encrypt_command, shell=True) + #encrypted_file_list += ']' + #return(encrypted_file_list) + os.chdir(path) + except OSError as err: + print (""Creation of the directory %s failed ""+str(err)) + + +# Driver function +if __name__==""__main__"": + path = sys.argv[1] + encrypt_files(path) + + +#(base) C:\\Himanshu\\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py +#Obfuscate +#(base) C:\\Himanshu\\DataPreprocessing>pyarmor obfuscate C:\\Himanshu\\DataPreprocessing\\DataPreprocessing_minify.py +#Compression +#(base) C:\\Himanshu\\DataPreprocessing>pyminifier --gzip -o C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_compressed.py C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_minify.py +#(base) C:\\Himanshu\\DataPreprocessing>cd dist +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.py ""DocumentText"" ""Label"" 90 "".csv"" ""C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"" +#Compiling compressed .py to .pyc file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python -m py_compile DataPreprocessing_compressed.py +#Encrypt .pyc file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py C:\\Himanshu\\DataPreprocessing\\dist\\__pycache__\\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e +#Decrypt file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d +#Run decrypted file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc ""DocumentText"" ""Label"" 90 "".csv"" ""C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"" ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import sys +import subprocess +import glob +import shutil +import time +from aion_deployment.EncryptPythonSourceCode import encrypt_files +import json +def encrypt(alldirs): + for dir in alldirs: + try: + encrypt_files(dir) + except Exception as error_obj: + print(""Exception in encrypting"", error_obj) + print(""-""*50) +def replace_by_compressed(alldirs): + for dir in alldirs: + try: + #print(""Processing dir"", dir) + files = [f for f in glob.glob(dir + ""/*.py"")] + secure_path = os.path.join(dir, 'SecuredScripts') + time.sleep(6) + for file in files: + try: + filename_w_ext = os.path.basename(file) + filename, file_extension = os.path.splitext(filename_w_ext) + if filename == ""__init__"": + continue + #print(""Processing file"", file) + file_folder_path = os.path.join(secure_path, filename, 'dist') + compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py') + shutil.copy(compressed_file_path, dir) + os.remove(file) + new_compressed_file_path = os.path.join(dir, filename+'_compressed.py') + target_file_path = os.path.join(dir, filename_w_ext) + os.rename(new_compressed_file_path, target_file_path) + if filename == 'aion_prediction': + shutil.copytree(os.path.join(file_folder_path, 'pytransform'), os.path.join(dir, 'pytransform')) + except Exception as error_obj: + print(""Exception in file "", error_obj) + shutil.rmtree(secure_path) + except Exception as error_obj: + print(""Exception in dir "", error_obj) + +def start_Obfuscate(path): + project_path = path + subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] + alldirs = [ + project_path, + ] + for" +"subdir in subdirs: + if(subdir != 'pytransform'): + alldirs.append(os.path.join(project_path, subdir)) + encrypt(alldirs) + replace_by_compressed(alldirs) + +if __name__==""__main__"": + project_path = sys.argv[1] + print(""project_path"", project_path) + subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] + alldirs = [ + project_path, + ] + for subdir in subdirs: + alldirs.append(os.path.join(project_path, subdir)) + encrypt(alldirs) + print(""*""*50) + replace_by_compressed(alldirs) + + +# python eion_compress.py ""C:\\Users\\ashwani.s\\Desktop\\22April\\22April\\Mohita"" ""C:\\Users\\ashwani.s\\Desktop\\eion\\eion"" > logfile.log + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import numpy as np +import scipy +import warnings +import scipy.stats as st +import logging +import json +class inputdrift(): + def __init__(self,conf): + self.log = logging.getLogger('eion') + + def get_input_drift(self,ndf,hdf,outputfolder): + selectedColumns = self.features.split(',') + dataalertcount=0 + distributionChangeColumns="""" + distributionChangeMessage=[] + for i in range(0,len(selectedColumns)): + data1=hdf[selectedColumns[i]] + data2=ndf[selectedColumns[i]] + if(data1.dtype !=""str"" and data2.dtype !=""str"" ): + cumulativeData=data1.append(data2) + teststaticValue=teststatic(self,data1,data2) + if (teststaticValue < 0.05): + distributionName1,sse1=DistributionFinder(self,data1) + distributionName2,sse2=DistributionFinder(self,data2) + if(distributionName1 == distributionName2): + dataalertcount = dataalertcount + else: + dataalertcount = dataalertcount+1 + distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+"","" + changedColumn = {} + changedColumn['Feature'] = selectedColumns[i] + changedColumn['KS_Training'] = teststaticValue + changedColumn['Training_Distribution'] = distributionName1 + changedColumn['New_Distribution'] = distributionName2 + distributionChangeMessage.append(changedColumn) + + else : + dataalertcount = dataalertcount + + else : + response =""Selected Columns should be Numerical Values"" + + if(dataalertcount == 0): + resultStatus=""Model is working as expected"" + else : + resultStatus=json.dumps(distributionChangeMessage) + + return(dataalertcount,resultStatus) + +def DistributionFinder(self,data): + try: + distributionName ="""" + sse =0.0 + KStestStatic=0.0 + dataType="""" + if(data.dtype == ""float64""): + dataType =""Continuous"" + elif(data.dtype ==""int""): + dataType=""Discrete"" + elif(data.dtype ==""int64""): + dataType=""Discrete"" + if(dataType == ""Discrete""): + distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson] + index, counts = np.unique(data.astype(int),return_counts=True) + + if(len(index)>=2): + best_sse = np.inf + y1=[] + total=sum(counts) + mean=float(sum(index*counts))/total + variance=float((sum(index**2*counts) -total*mean**2))/(total-1) + dispersion=mean/float(variance) + theta=1/float(dispersion) + r=mean*(float(theta)/1-theta) + + for j in counts: + y1.append(float(j)/total) + + pmf1=st.bernoulli.pmf(index,mean) + pmf2=st.binom.pmf(index,len(index),p=mean/len(index)) + pmf3=st.geom.pmf(index,1/float(1+mean)) + pmf4=st.nbinom.pmf(index,mean,r) + pmf5=st.poisson.pmf(index,mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1- pmf5, 2.0)) + + sselist=[sse1,sse2,sse3,sse4,sse5] + + for i in range(0,len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName =best_distribution + sse=best_sse + + elif(dataType == ""Continuous""): + + distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] + + best_distribution = st.norm.name + best_sse = np.inf + datamin=data.min() + datamax=data.max() + nrange=datamax-datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') + + params = distribution.fit(data.astype(float)) + # Separate parts of parameters + arg = params[:-2] + loc = params[-2] + scale = params[-1] + + # Calculate fitted PDF and error with fit in distribution + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if(best_sse >sse > 0): + best_distribution = distribution.name + best_sse = sse + + distributionName =best_distribution + sse=best_sse + except: + response = str(sys.exc_info()[0]) + message='Job has Failed'+response + print(message) + return distributionName,sse + +##KStestStatic -pvalue finding +def teststatic(self,data1,data2): + try: + teststatic =st.ks_2samp(data1,data2) + pValue=0.0 + scipyVersion =scipy.__version__ + if(scipyVersion <= ""0.14.1""): + pValue =teststatic[1] + else: + pValue =teststatic.pvalue + except: + response = str(sys.exc_info()[0]) + print(""Input Drift Job Failed ""+response) + return pValue + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from pathlib import Path +from AION.prediction_package.imports import importModule +from AION.prediction_package.aion_prediction import aionPrediction +from AION.prediction_package.utility import TAB_CHAR +from AION.prediction_package import utility +from AION.prediction_package.base import deployer +from AION.prediction_package import common +import numpy as np + + + + +def get_deployer( params): + + if params['training']['algo'] == 'ARIMA': + return arima(params) + elif params['training']['algo'] == 'LSTM': + return lstm(params) + elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO': + return lstmencdec_mviuvo(params) + elif params['training']['algo'] == 'MLP': + return mlp(params) + elif params['training']['algo'] == 'VAR': + return var(params) + elif params['training']['algo'] == 'FBPROPHET': + return fbprophet(params) + else: + raise ValueError(f""Algorithm {params['training']['algo']} for time series forecasting is not supported"") + +def _profiler_code(params, importer): + """""" + This will create the profiler file based on the config file. + separated file is created as profiler is required for input drift also. + """""" + imported_modules = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} + ] + utility.import_modules(importer, imported_modules) + if 'code' in params['profiler'].get('preprocess',{}).keys(): + code = params['profiler']['preprocess']['code'] + else: + code = """" + code += """""" + +class inputprofiler(): + """""" + init_code = """""" + def __init__(self): + """""" + init_code += """""" + # preprocessing + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if not preprocess_path.exists(): + raise ValueError(f'Preprocess model file not found: {preprocess_path}') + self.profiler = joblib.load(preprocess_path) + """""" + run_code = """""" + def run(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + """""" + if 'code' in params['profiler'].get('preprocess',{}).keys(): + run_code += """""" + df = preprocess( df)"""""" + if params['profiler'].get('unpreprocessed_columns'): + run_code += f"""""" + unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}'] + df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True) + """""" + if params['profiler'].get('force_numeric_conv'): + run_code += f"""""" + df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""""" + run_code += _profiler_main_code(params) + if params['profiler'].get('unpreprocessed_columns'): + run_code += f"""""" + df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data +"""""" + run_code += """""" return df +"""""" + utility.import_modules(importer, imported_modules) + import_code = importer.getCode() + return import_code + code + init_code + run_code + +def _profiler_main_code(params): + code = f"""""" + df = self.profiler.transform(df) + columns = {params['profiler']['output_features']} + if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns=columns) + else: + df = pd.DataFrame(df, columns=columns) + """""" + return code + +class arima( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def profiler_code( self): + imported_modules = [ + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + ] + importer = importModule() + utility.import_modules(importer, imported_modules) + code = """""" + + +class inputprofiler(): + + def __init__(self): + pass + + def run( self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + return df[['noofforecasts']] +"""""" + return importer.getCode()" +"+ code + + def feature_engg_code(self): + self.importer.addModule(module='pandas',mod_as='pd') + return f"""""" +class selector(): + + def __init__(self): + pass + + def run(self, df): + return df +"""""" + def training_code( " +"]==2: + prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum() + prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum() + prediction = pred + return(prediction) + + def run(self,raw_df,df): + df = self.invertTransformation(df) + df = df.to_json(orient='records',double_precision=2) + outputjson = {{""status"":""SUCCESS"",""data"":json.loads(df)}} + return(json.dumps(outputjson)) +"""""" + +class fbprophet( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def profiler_code( self): + imported_modules = [ + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + ] + importer = importModule() + utility.import_modules(importer, imported_modules) + code = """""" + + +class inputprofiler(): + + def __init__(self): + pass + + def run( self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + return df[['noofforecasts']] +"""""" + return importer.getCode() + code + + def feature_engg_code(self): + self.importer.addModule(module='pandas',mod_as='pd') + return f"""""" +class selector(): + + def __init__(self): + pass + + def run(self, df): + return df +"""""" + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + self.importer.addModule(module='Path',mod_from='pathlib') + self.importer.addModule(module='joblib') + code = f"""""" +class trainer(): + + def __init__(self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = joblib.load(model_file) + +"""""" + code += f"""""" + def run(self,df): + sessonal_freq = '{self.params['training']['sessonal_freq']}' + ts_prophet_future = self.model.make_future_dataframe(periods=int(df[""noofforecasts""][0]),freq=sessonal_freq,include_history = False) + """""" + if (self.params['training']['additional_regressors']): + code += f"""""" + additional_regressors={self.params['training']['additional_regressors']} + ts_prophet_future[additional_regressors] = dataFrame[additional_regressors] + ts_prophet_future.reset_index(drop=True) + ts_prophet_future=ts_prophet_future.dropna() + """""" + code += """""" + train_forecast = self.model.predict(ts_prophet_future) + prophet_forecast_tail=train_forecast[[\\'ds\\', \\'yhat\\', \\'yhat_lower\\',\\'yhat_upper\\']].tail( int(df[""noofforecasts""][0])) + return(prophet_forecast_tail)"""""" + return code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" + +class output_format(): + + def __init__( self): + pass + + def run(self,raw_df,df): + df = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(df)} + return(json.dumps(outputjson)) + """""" + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- +import logging +logging.getLogger('tensorflow').disabled = True +import json +import mlflow +import mlflow.sklearn +import mlflow.sagemaker as mfs +# from sklearn.ensemble import RandomForestRegressor +from sklearn.metrics import mean_squared_error +from sklearn.model_selection import train_test_split +# from sklearn import datasets +import time +import numpy as np +# Load dataset +# from sklearn.datasets import load_iris +import pickle +# Load the pickled model +# from matplotlib import pyplot +import sys +import os +import boto3 +import subprocess +import os.path +from os.path import expanduser +import platform +from pathlib import Path + + +class aionMlopsService: + def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName): + try: + self.model=model + self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy + self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly) + self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName) + self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri) + self.experiment_name=experiment_name + self.mlflow_modelname=mlflow_modelname + self.awsaccesskey_id=awsaccesskey_id + self.awssecretaccess_key=awssecretaccess_key + self.aws_session_token=aws_session_token + self.mlflow_container_name=mlflow_container_name + self.aws_region=aws_region + self.aws_id=aws_id + self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn + self.sm_app_name=sm_app_name + self.sm_deploy_option=sm_deploy_option + self.delete_ecr_repository=delete_ecr_repository + self.ecrRepositoryName=ecrRepositoryName + + from appbe.dataPath import LOG_LOCATION + sagemakerLogLocation = LOG_LOCATION + + try: + os.makedirs(sagemakerLogLocation) + except OSError as e: + if (os.path.exists(sagemakerLogLocation)): + pass + else: + raise OSError('sagemakerLogLocation error.') + + self.sagemakerLogLocation=str(sagemakerLogLocation) + + + filename_mlops = 'mlopslog_'+str(int(time.time())) + filename_mlops=filename_mlops+'.log' + # filename = 'mlopsLog_'+Time() + filepath = os.path.join(self.sagemakerLogLocation, filename_mlops) + logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') + # logging.basicConfig(filename=""uq_logging.log"", format='%(asctime)s %(message)s',filemode='w') + # logging.basicConfig(filename=""uq_logging.log"", format=' %(message)s',filemode='w') + # logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO) + self.log = logging.getLogger('aionMLOps') + self.log.setLevel(logging.DEBUG) + # mlflow.set_experiment(self.experiment_name) + + except Exception as e: + self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def mlflowSetPath(self,path): + track_dir=os.path.join(path,'mlruns') + uri=""file:""+str(Path(track_dir)) + return uri + + #Currently not used this delete ecr repository option + def ecr_repository_delete(self,rep_name): + # import subprocess + client = boto3.client('ecr') + repositories = client.describe_repositories() + ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True) + mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true']) + self.log.info('Success: deleted aws ecr repository which contains mlops image.') + + def check_sm_deploy_status(self,app_name): + sage_client = boto3.client('sagemaker', region_name=self.aws_region) + endpoint_description = sage_client.describe_endpoint(EndpointName=app_name) + endpoint_status = endpoint_description[""EndpointStatus""] + try: + failure_reason=endpoint_description[""FailureReason""] + self.log.info(""sagemaker end point creation failure reason is: ""+str(failure_reason)) + except: + pass + endpoint_status=str(endpoint_status) + return endpoint_status + + def invoke_sm_endpoint(self,app_name, input_json): + client = boto3.session.Session().client(""sagemaker-runtime"", self.aws_region) + + response = client.invoke_endpoint( + EndpointName=app_name, + Body=input_json, + ContentType='application/json; format=pandas-split', + ) + # preds = response['Body'].read().decode(""ascii"") + preds = response['Body'].read().decode(""ascii"") + preds = json.loads(preds) + # print(""preds: {}"".format(preds)) + return preds + + def predict_sm_app_endpoint(self,X_test): + #print(X_test) + import pandas as pd + prediction=None + AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) + AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) + AWS_SESSION_TOKEN=str(self.aws_session_token) + region = str(self.aws_region) + #Existing model deploy options + # mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) + # mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) + try: + import subprocess + cmd = 'aws configure set region_name '+region + os.system(cmd) + cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID + os.system(cmd) + cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY + os.system(cmd) + ''' + aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region]) + aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID]) + aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY]) + ''' + except: + pass + #Create a session for aws communication using aws boto3 lib + # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) + session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + + #X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2) + # query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient=""split"") + try: + query_input = pd.DataFrame(X_test).to_json(orient=""split"") + #print(query_input) + prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input) + # self.log.info(""sagemaker end point Prediction: \\n""+str(prediction)) + + except Exception as e: + print(e) + return prediction + + + def deleteSagemakerApp(self,app_name,region): + # import mlflow.sagemaker as mfs + # region = 'ap-south-1' + # app_name = 'aion-demo-app' + mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) + # print(""AION mlops sagemaker application endpoint is deleted....\\n"") + self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name)) + + def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path): + + region = str(self.aws_region) + aws_id = str(self.aws_id) + iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn) + app_name = str(self.sm_app_name) + + model_uri = str(model_path) + app_status=False + mlflow_root_dir = None + try: + os." +"chdir(str(self.sagemakerLogLocation)) + mlflow_root_dir = os.getcwd() + self.log.info('mlflow root dir: '+str(mlflow_root_dir)) + except: + self.log.info(""path issue."") + + try: + c_status=self.check_sm_deploy_status(app_name) + #if ((c_status == ""Failed"") or (c_status == ""OutOfService"")): + if ((c_status == ""Failed"") or (c_status.lower() == ""failed"")): + app_status=False + self.log.info(""Sagemaker endpoint status: Failed.\\n"") + mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) + elif ((c_status.lower() == ""inservice"") or (c_status == ""InService"")): + app_status=True + self.log.info(""Sagemaker endpoint status: InService. Running sagemaker endpoint name: \\n""+str(app_name)) + else: + app_status=False + pass + except: + # print(""deploy status error.\\n"") + pass + + #aws ecr model app_name should contain only [[a-zA-Z0-9-]] + import re + if app_name: + pattern = re.compile(""[A-Za-z0-9-]+"") + # if found match (entire string matches pattern) + if pattern.fullmatch(app_name) is not None: + #print(""Found match: "") + pass + else: + app_name = 'aion-demo-app' + else: + app_name = 'aion-demo-app' + + mlflow_image=mlflow_container_name+':'+tag_id + image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image + deploy_option=""create"" + self.log.info('deploy_option: \\n'+str(deploy_option)) + if (deploy_option.lower() == ""create""): + # Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE + if not (app_status): + try: + mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode=""create"",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url) + self.log.info('sagemaker endpoint created and model deployed. Application name is: \\n'+str(app_name)) + except: + self.log.info('Creating end point application issue.Please check the connection and aws credentials \\n') + else: + self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\\n') + + + elif (deploy_option.lower() == ""delete""): + # import mlflow.sagemaker as mfs + # # region = 'ap-south-1' + # # app_name = 'aion-demo-app' + # mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) + # print(""Mlflow sagemaker application endpoint is deleted....\\n"") + # self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name)) + pass + elif (deploy_option.lower() == ""add""): + pass + elif (deploy_option.lower() == ""replace""): + pass + else: + pass + + return app_status + + def mlflow2sagemaker_deploy(self): + self.log.info(' ') + deploy_status=False + app_name = str(self.sm_app_name) + self.log.info('Sagemaker Application Name: '+str(app_name)) + + uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation) + mlflow.set_tracking_uri(uri_mlflow) + mlops_trackuri=mlflow.get_tracking_uri() + mlops_trackuri=str(mlops_trackuri) + self.log.info('mlops tracking uri: '+str(mlops_trackuri)) + localhost_deploy=False + try: + #Loading aion model to deploy in sagemaker + mlflow.set_experiment(self.experiment_name) + self.log.info('Endpoint Name: '+str(self.experiment_name)) + # Assume, the model already loaded from joblib in aionmlflow2smInterface.py file. + aionmodel2deploy=self.model + # run_id = None + # experiment_id=None + + + # Use the loaded pickled model to make predictions + # pred = knn_from_pickle.predict(X_test) + with mlflow.start_run(run_name='AIONMLOps') as run: + + # aionmodel2deploy.fit(X_train, y_train) + # predictions = aionmodel2deploy.predict(X_test) + mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname) + run_id = run.info.run_uuid + experiment_id = run.info.experiment_id + self.log.info('AION mlops experiment run_id: '+str(run_id)) + self.log.info('AION mlops experiment experiment_id: '+str(experiment_id)) + self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname)) + artifact_uri = {mlflow.get_artifact_uri()} + # print(""1.artifact_uri: \\n"",artifact_uri) + mlflow.end_run() + #If we need, we can check the mlflow experiments. + # try: + # mlflow_client = mlflow.tracking.MlflowClient('./mlruns') + # exp_list = mlflow_client.list_experiments() + # except: + # pass + #print(""mlflow exp_list: \\n"",exp_list) + mlflow_modelname=str(self.mlflow_modelname) + + mlops_trackuri=mlops_trackuri.replace('file:','') + mlops_trackuri=str(mlops_trackuri) + # mlflow_root_dir = os.getcwd() + mlflow_root_dir = None + try: + os.chdir(str(self.sagemakerLogLocation)) + mlflow_root_dir = os.getcwd() + self.log.info('mlflow root dir: '+str(mlflow_root_dir)) + except: + self.log.info(""path issue."") + model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname) + # model_path=mlops_trackuri+'\\\\%s\\\\%s\\\\artifacts\\\\%s' % (experiment_id, run_id,mlflow_modelname) + self.log.info(""local host aion mlops model_path is: ""+str(model_path)) + time.sleep(2) + + + #print(""Environment variable setup in the current working dir for aws sagemaker cli connection... \\n"") + self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \\n ') + AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) + AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) + AWS_SESSION_TOKEN=str(self.aws_session_token) + region = str(self.aws_region) + #Existing model deploy options + mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) + mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) + import subprocess + cmd = 'aws configure set region_name '+region + os.system(cmd) + cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID + os.system(cmd) + cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY + os.system(cmd) + #Create a session for aws communication using aws boto3 lib + # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) + session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + # session = boto3.session.Session( + # aws_access_key_id=AWS_ACCESS_KEY_ID, + # aws_secret_access_key=AWS_SECRET_ACCESS_KEY, + # aws_session_token=AWS_SESSION_TOKEN + # ) + # awsclient = session.resource('ecr') + # s3 = session.resource('s3') + self.log.info('aws environment variable setup done... \\n') + try: + os.chdir(mlflow_root_dir) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(mlflow_root_dir)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) + + mlflow_container_name=str(self.mlflow_container_name) + mlflow_version=mlflow.__version__ + tag_id=mlflow_version + + + if (self.mlflowtosagemakerPushOnly.lower() == ""true""): + self.log.info('Selected option is \\n') + aws_id=str(self.aws_id) + arn=str(self.iam_sagemakerfullaccess_arn) + mlflow_image=mlflow_container_name+':'+tag_id + image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image + # print(""image_url:========= \\n"",image_url) + deploy_status=True + try: + model_path=mlflowtosagemakerdeployModeluri + # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. + self.log.info('Deploy existing model container-Model path given by user: '+str(model_path)) + try: + os.chdir(model_path) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(model_path)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(model_path)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(model_path)) + + try: + mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName) + deploy_status=True + self.log.info('AION mlops pushed the docker container to aws ecr. \\n ') + except: + self.log.info(""error in pushing existing container to ecr.\\n"") + deploy_status=False + + + time.sleep(2) + #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. + try: + # print("" Changing directory to mlflow root dir....\\n"") + os.chdir(mlflow_root_dir) + except FileNotFoundError: + self.log.info('model path is not a directory. '+str(mlflow_root_dir)) + except NotADirectoryError: + self.log.info('model path is not a directory. '+str(mlflow_root_dir)) + # print(""{0} is not a directory"".format(mlflow_root_dir)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) + + # self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) + try: + if (deploy_status): + self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) + self.log.info('AION creates docker container and push the container into aws ecr.. ') + time.sleep(2) + except: + self.log.info('AION deploy error.check connection and aws config parameters. ') + deploy_status=False + # self.log.info('model deployed in sagemaker. ') + except Exception as e: + self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \\n'+str(e)) + elif (self.mlflowtosagemakerPushOnly.lower() == ""false""): + if (self.mlflowtosagemakerDeploy.lower() == ""true""): + self.log.info('Selected option is \\n') + deploy_status=True + try: + # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. + try: + os.chdir(model_path) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(model_path)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(model_path)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(model_path)) + try: + mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name]) + self.log.info('AION mlops creates docker container and push the container into aws ecr.. ') + deploy_status=True + time.sleep(2) + except: + self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') + deploy_status=False + self.log.info('Now deploying the model container to sagemaker starts....\\n ') + # Once docker push completes, again going back to mlflow parent dir for deployment + #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. + try: + os.chdir(mlflow_root_dir) + except" +"FileNotFoundError: + self.log.info('model_path does not exist. '+str(mlflow_root_dir)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) + + # app_name = str(self.sm_app_name) + try: + if (deploy_status): + self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path) + except: + self.log.info('mlops deploy error.check connection') + deploy_status=False + + except Exception as e: + exc = {""status"":""FAIL"",""message"":str(e).strip('""')} + out_exc = json.dumps(exc) + self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n') + + elif(self.mlflowtosagemakerDeploy.lower() == ""false""): + deploy_status=False + localhost_deploy=True + self.log.info('Selected option is \\n') + self.log.info(""User selected create-Deploy sagemaker option as False,"") + self.log.info(""Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. "") + try: + # ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns. + try: + os.chdir(model_path) + self.log.info('After change to AION mlops model dir, cwd: '+str(model_path)) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(model_path)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(model_path)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(model_path)) + + # mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) + + try: + if not (deploy_status): + mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) + self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with command.\\n ') + localhost_deploy=True + time.sleep(2) + except: + self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') + deploy_status=False + localhost_deploy=False + + # print(""AION mlops creates docker container and push the container into aws ecr.\\n"") + self.log.info('AION mlops creates docker container and stored locally... ') + time.sleep(2) + except Exception as e: + localhost_deploy=False + # print(""mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n"") + self.log.info('AION mlops failed to creates docker container in local machine.\\n'+str(e)) + else: + self.log.info('Deploy option not selected, Please check. ') + localhost_deploy=False + deploy_status=False + else: + pass + + + + + + localhost_container_status=""Notdeployed"" + mlflow2sm_deploy_status=""Notdeployed"" + if localhost_deploy: + localhost_container_status=""success"" + mlflow2sm_deploy_status=""Notdeployed"" + # print(""AION creates local docker container successfully.Please check in docker repository."") + self.log.info(""AION creates local docker container successfully.Please check in docker repository."") + # else: + # localhost_container_status=""failed"" + # # print(""AION failed to create local docker container successfully.Please check in docker repository."") + # self.log.info(""AION failed to create local docker container successfully.Please check in docker repository."") + + if (deploy_status): + # Finally checking whether mlops model is deployed to sagemaker or not. + app_name = str(self.sm_app_name) + deploy_s = self.check_sm_deploy_status(app_name) + if (deploy_s == ""InService""): + # print(""AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n"") + self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n'+str(app_name)) + mlflow2sm_deploy_status=""success"" + localhost_container_status=""Notdeployed"" + else: + # print(""AION Mlflow model not able to deploy at aws sagemaker\\n"") + self.log.info('AION mlops model not able to deploy at aws sagemaker.\\n') + mlflow2sm_deploy_status=""failed"" + localhost_container_status=""Notdeployed"" + + # else: + # mlflow2sm_deploy_status=""None"" + + + return mlflow2sm_deploy_status,localhost_container_status + + except Exception as inst: + exc = {""status"":""FAIL"",""message"":str(inst).strip('""')} + out_exc = json.dumps(exc) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' from kafka import KafkaConsumer +from json import loads +import pandas as pd +import json +import os,sys +import time +import multiprocessing +from os.path import expanduser +import platform +import datetime +modelDetails = {} +class Process(multiprocessing.Process): + def __init__(self, modelSignature,jsonData,predictedData,modelpath): + super(Process, self).__init__() + self.config = jsonData + self.modelSignature = modelSignature + self.data = predictedData + self.modelpath = modelpath + def run(self): + #data = pd.json_normalize(self.data) + minotoringService = self.config['minotoringService']['url'] + trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature] + #filetimestamp = 'AION_'+str(int(time.time()))+'.csv' + #data.to_csv(dataFile, index=False) + inputFieldsJson = {""trainingDataLocation"":trainingdatalocation,""currentDataLocation"":self.data} + inputFieldsJson = json.dumps(inputFieldsJson) + ser_url = minotoringService+self.modelSignature+'/monitoring' + driftTime = datetime.datetime.now() + import requests + try: + response = requests.post(ser_url, data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + outputStr=response.content + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + print(decoded_data) + status = decoded_data['status'] + msg = decoded_data['data'] + except Exception as inst: + if 'Failed to establish a new connection' in str(inst): + status = 'Fail' + msg = 'AION Service needs to be started' + else: + status = 'Fail' + msg = 'Error during Drift Analysis' + statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv') + df = pd.DataFrame(columns = ['dateTime', 'status', 'msg']) + df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True) + + print(df) + if (os.path.exists(statusFile)): + df.to_csv(statusFile, mode='a', header=False,index=False) + else: + df.to_csv(statusFile, header=True,index=False) + + + + +def launch_kafka_consumer(): + from appbe.dataPath import DATA_DIR + configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf') + with open(configfile,'r',encoding='utf-8') as f: + jsonData = json.load(f) + f.close() + kafkaIP=jsonData['kafkaCluster']['ip'] + kafkaport = jsonData['kafkaCluster']['port'] + topic = jsonData['kafkaCluster']['topic'] + kafkaurl = kafkaIP+':'+kafkaport + if jsonData['database']['csv'] == 'True': + database = 'csv' + elif jsonData['database']['mySql'] == 'True': + database = 'mySql' + else: + database = 'csv' + kafkaPath = os.path.join(DATA_DIR,'kafka') + if not (os.path.exists(kafkaPath)): + try: + os.makedirs(kafkaPath) + except OSError as e: + pass + consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8'))) + for message in consumer: + message = message.value + data = message['data'] + data = pd.json_normalize(data) + modelname = message['usecasename'] + version = message['version'] + modelSignature = modelname+'_'+str(version) + modelpath = os.path.join(kafkaPath,modelSignature) + try: + os.makedirs(modelpath) + except OSError as e: + pass + secondsSinceEpoch = time.time() + if modelSignature not in modelDetails: + modelDetails[modelSignature] = {} + modelDetails[modelSignature]['startTime'] = secondsSinceEpoch + if database == 'csv': + csvfile = os.path.join(modelpath,modelSignature+'.csv') + if (os.path.exists(csvfile)): + data.to_csv(csvfile, mode='a', header=False,index=False) + else: + data.to_csv(csvfile, header=True,index=False) + modelTimeFrame = jsonData['timeFrame'][modelSignature] + currentseconds = time.time() + print(currentseconds - modelDetails[modelSignature]['startTime']) + if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame): + csv_path = os.path.join(modelpath,modelSignature+'.csv') + #predictedData = pd.read_csv(csv_path) + ##predictedData = predictedData.to_json(orient=""records"") + index = Process(modelSignature,jsonData,csv_path,modelpath) + index.start() + modelDetails[modelSignature]['startTime'] = secondsSinceEpoch + + import os +import shutil +import sys +import subprocess +from os.path import expanduser +import platform +import json + +def createDockerImage(model_name,model_version,module,folderpath): + command = 'docker pull python:3.8-slim-buster' + os.system(command); + subprocess.check_call([""docker"", ""build"", ""-t"",module+'_'+model_name.lower()+"":""+model_version,"".""], cwd=folderpath) + +def local_docker_build(config): + print(config) + config = json.loads(config) + model_name = config['usecase'] + model_version = config['version'] + mlaac__code_path = config['mlacPath'] + docker_images = {} + docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version + dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring') + createDockerImage(model_name,model_version,'modelmonitoring',dataset_addr) + docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version + dataset_addr = os.path.join(mlaac__code_path,'DataIngestion') + createDockerImage(model_name,model_version,'dataingestion',dataset_addr) + transformer_addr = os.path.join(mlaac__code_path,'DataTransformation') + docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version + createDockerImage(model_name,model_version,'datatransformation',transformer_addr) + featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering') + docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version + createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr) + from os import listdir + arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith(""ModelTraining"")] + docker_training_images = [] + for x in arr: + dockertraing={} + dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version + docker_training_images.append(dockertraing) + training_addri = os.path.join(mlaac__code_path,x) + createDockerImage(model_name,model_version,str(x).lower(),training_addri) + docker_images['ModelTraining'] = docker_training_images + docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version + deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry') + createDockerImage(model_name,model_version,'modelregistry',deploy_addr) + docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version + deploy_addr = os.path.join(mlaac__code_path,'ModelServing') + createDockerImage(model_name,model_version,'modelserving',deploy_addr) + outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json') + with open(outputjsonFile, 'w') as f: + json.dump(docker_images, f) + f.close() + output = {'Status':'Success','Msg':outputjsonFile} + output = json.dumps(output) + print(""aion_build_container:"",output) import docker +import json +import logging +def read_json(file_path): + data = None + " +"with open(file_path,'r') as f: + data = json.load(f) + return data + +def run_pipeline(inputconfig): + inputconfig = json.loads(inputconfig) + logfilepath = inputconfig['logfilepath'] + logging.basicConfig(level=logging.INFO,filename =logfilepath) + usecasename = inputconfig['usecase'] + logging.info(""UseCaseName :""+str(usecasename)) + version = inputconfig['version'] + logging.info(""version :""+str(version)) + config = inputconfig['dockerlist'] + persistancevolume = inputconfig['persistancevolume'] + logging.info(""PersistanceVolume :""+str(persistancevolume)) + datasetpath = inputconfig['datasetpath'] + logging.info(""DataSet Path :""+str(datasetpath)) + config = read_json(config) + client = docker.from_env() + inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath} + inputconfig = json.dumps(inputconfig) + inputconfig = inputconfig.replace('""', '\\\\""') + logging.info(""===== Model Monitoring Container Start ====="") + outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('ModelMonitoring: '+str(outputStr)) + print('ModelMonitoring: '+str(outputStr)) + logging.info(""===== ModelMonitoring Stop ====="") + logging.info(""===== Data Ingestion Container Start ====="") + outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('DataIngestion: '+str(outputStr)) + print('DataIngestion: '+str(outputStr)) + logging.info(""===== Data Ingestion Container Stop ====="") + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + if status != 'Success': + output = {'Status':'Error','Msg':'Data Ingestion Fails'} + logging.info(""===== Transformation Container Start ====="") + outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('Data Transformations: '+str(outputStr)) + print('Data Transformations: '+str(outputStr)) + logging.info(""===== Transformation Container Done ====="") + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + if status != 'Success': + output = {'Status':'Error','Msg':'Data Transformations Fails'} + logging.info(""===== Feature Engineering Container Start ====="") + outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('FeatureEngineering: '+str(outputStr)) + print('FeatureEngineering: '+str(outputStr)) + logging.info(""===== Feature Engineering Container Done ====="") + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + modeltraining = config['ModelTraining'] + for mt in modeltraining: + logging.info(""===== Training Container Start ====="") + outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('ModelTraining: '+str(outputStr)) + print('ModelTraining: '+str(outputStr)) + logging.info(""===== Training Container Done ====="") + outputStr = outputStr.strip() + try: + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + except Exception as inst: + logging.info(inst) + logging.info(""===== Model Registry Start ====="") + outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('ModelRegistry: '+str(outputStr)) + print('ModelRegistry: '+str(outputStr)) + logging.info(""===== ModelRegistry Done ====="") + logging.info(""===== ModelServing Start ====="") + outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('Prediction: '+str(outputStr)) + print('Prediction: '+str(outputStr)) + logging.info(""===== ModelServing Done ====="") import os +import sys +import json +from pathlib import Path +import subprocess +import shutil +import argparse + +def create_and_save_yaml(git_storage_path, container_label,usecasepath): + file_name_prefix = 'gh-acr-' + yaml_file = f""""""\\ +name: gh-acr-{container_label} +on: + push: + branches: main + paths: {container_label}/** + workflow_dispatch: +jobs: + gh-acr-build-push: + runs-on: ubuntu-latest + steps: + - name: 'checkout action' + uses: actions/checkout@main + + - name: 'azure login' + uses: azure/login@v1 + with: + creds: ${{{{ secrets.AZURE_CREDENTIALS }}}} + + - name: 'build and push image' + uses: azure/docker-login@v1 + with: + login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}} + username: ${{{{ secrets.REGISTRY_USERNAME }}}} + password: ${{{{ secrets.REGISTRY_PASSWORD }}}} + - run: | + docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} + docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} + docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} + docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} + docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} + docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} +"""""" + arr = [filename for filename in os.listdir(usecasepath) if filename.startswith(""ModelTraining"")] + for x in arr: + yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label + yaml_file+='\\n' + yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label + yaml_file+='\\n' + with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f: + f.write(yaml_file) + +def run_cmd(cmd): + try: + subprocess.check_output(cmd, stderr=subprocess.PIPE) + except subprocess.CalledProcessError as e: + if e.stderr: + if isinstance(e.stderr, bytes): + err_msg = e.stderr.decode(sys.getfilesystemencoding()) + else: + err_msg = e.stderr + elif e.output: + if isinstance(e.output, bytes): + err_msg = e.output.decode(sys.getfilesystemencoding()) + else: + err_msg = e.output + else: + err_msg = str(e) + return False, err_msg + return True, """" + +def validate_config(config): + non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName'] + missing_keys = [k for k in non_null_keys if k not in config.keys()] + if missing_keys: + raise ValueError(f""following fields are missing in config file: {missing_keys}"") + for k,v in config.items(): + if k in non_null_keys and not v: + raise ValueError(f""Please provide value for '{k}' in config file."") + +def upload(config): + + validate_config(config) + url_type = config.get('url_type','https') + if url_type == 'https': + https_str = ""https://"" + url = https_str + config['username'] + "":"" + config['token'] + ""@"" + config['url'][len(https_str):] + else: + url = config['url'] + model_location = Path(config['location']) + git_folder_location = Path(config['gitFolderLocation']) + git_folder_location.mkdir(parents=True, exist_ok=True) + (git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True) + if not model_location.exists(): + raise ValueError('Trained model data not found') + + os.chdir(str(git_folder_location)) + (git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True) + shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True) + create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location']) + if (Path(git_folder_location)/'.git').exists(): + first_upload = False + else: + first_upload = True + if first_upload: + cmd = ['git','init'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','config','user.name',config['username']] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','config','user.email',config['email']] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','add', '-A'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','commit','-m',f""commit {config['modelName']}""] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','branch','-M','main'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + if first_upload: + cmd = ['git','remote','add','origin', url] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','push','-f','-u','origin', 'main'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + else: + cmd = ['git','push'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + return json.dumps({'Status':'SUCCESS'}) + +if __name__ == '__main__': + try: + if shutil.which('git') is None: + raise ValueError(""git is not installed on this system"") + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--config', help='Config file location or as a string') + + args = parser.parse_args() + if Path(args.config).is_file() and Path(args.config).suffix == '.json': + with open(args.config,'r') as f: + config = json.load(f) + else: + config = json.loads(args.config) + print(upload(config)) + except Exception as e: + status = {'Status':'Failure','msg':str(e)} + print(json.dumps(status)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import logging +logging.getLogger('tensorflow').disabled = True +import json +#from nltk.corpus import stopwords +from collections import Counter +from numpy import mean +from numpy import std +from pandas import read_csv +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import OneHotEncoder +from sklearn.compose import ColumnTransformer +from learner.machinelearning import machinelearning +# from sklearn.dummy import DummyClassifier +# create histograms of numeric input variables +import sys +import os +import re +import pandas as pd +import numpy as np +from learner.aion_matrix import aion_matrix +import tensorflow as tf +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +import autokeras as ak +# load the sonar dataset +from sklearn.model_selection import train_test_split +# from sklearn.metrics import cohen_kappa_score +# from sklearn.metrics import roc_auc_score +# from" +"sklearn.metrics import confusion_matrix +from sklearn.metrics import roc_curve +from math import sqrt +from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error +from sklearn import metrics + + +class aionNAS: + def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation): + try: + self.dfFeatures=None + self.nas_class=nas_class + self.nas_params=nas_params + self.targetFeature=None + self.log = logging.getLogger('eion') + self.n_models=int(self.nas_params['n_models']) + self.n_epochs=int(self.nas_params['n_epochs']) + self.optimizer=self.nas_params['optimizer'] + self.metrics=self.nas_params['metrics'] + self.tuner=self.nas_params['tuner'] + self.seed=int(self.nas_params['seed']) + self.xtrain = xtrain1 + self.xtest = xtest1 + self.ytrain = ytrain1 + self.ytest = ytest1 + #self.labelMaps = labelMaps + self.deployLocation=deployLocation + + except Exception as e: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def paramCheck(self): + try: + if not (self.nas_class): + self.log.info(' ') + if not (self.nas_params): + self.log.info(' ') + if not (self.targetFeature): + self.log.info(' ') + if (self.n_models < 1): + self.n_models=1 + if not (self.dfFeatures): + self.log.info(' ') + if (self.n_epochs < 1): + self.n_models=1 + if not (self.optimizer): + self.optimizer=""adam"" + if not (self.tuner): + self.tuner=""greedy"" + if (self.seed < 1): + self.seed=0 + if not (self.metrics): + self.metrics=None + except ValueError: + self.log.info('<------------------ NAS config file error. --------------->') + + + + + + def recall_m(self,y_true, y_pred): + true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) + possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1))) + recall = true_positives / (possible_positives + tf.keras.backend.epsilon()) + return recall + + def precision_m(self,y_true, y_pred): + true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) + predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1))) + precision = true_positives / (predicted_positives + tf.keras.backend.epsilon()) + return precision + + def f1_score(self,y_true, y_pred): + precision = self.precision_m(y_true, y_pred) + recall = self.recall_m(y_true, y_pred) + return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon())) + + def nasStructdataPreprocess(self): + df=self.data + self.paramCheck() + target=df[self.targetFeature].values + counter = Counter(target) + for k,v in counter.items(): + per = v / len(target) * 100 + self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per)) + # select columns with numerical data types + num_ix = df.select_dtypes(include=['int64', 'float64']).columns + subset = df[num_ix] + last_ix = len(df.columns) - 1 + y=df[self.targetFeature] + X = df.drop(self.targetFeature, axis=1) + #Using Pearson Correlation + # plt.figure(figsize=(12,10)) + # cor = df.corr() + # sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) + # plt.show() + # select categorical features + cat_ix = X.select_dtypes(include=['object', 'bool']).columns + # one hot encode cat features only + ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough') + X = X.reset_index() + X=X.replace(to_replace=""NULL"",value=0) + X = X.dropna(how='any',axis=0) + X = ct.fit_transform(X) + from sklearn.preprocessing import scale + X = scale(X) + # label encode the target variable to have the classes 0 and 1 + y = LabelEncoder().fit_transform(y) + # separate into train and test sets + X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1) + return X_train, X_test, y_train, y_test + + + def nasStructClassification(self,scoreParam): + try: + objClf = aion_matrix() + X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest + modelName=""nas_structdata_classifier"" + self.log.info(""Processing structured data block...\\n"") + s_in = ak.StructuredDataInput() + #s_in = Flatten()(s_in) + s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) + self.log.info(""Data pipe via autokeras Classification Dense layers ...\\n"") + s_out = ak.ClassificationHead()(s_out) + self.log.info(""applying autokeras automodel to run different neural models...\\n"") + try: + tuner = str(self.tuner).lower() + except UnicodeEncodeError: + tuner = (self.tuner.encode('utf8')).lower() + nasclf = ak.AutoModel( + inputs=s_in, + outputs=s_out, + overwrite=True, + tuner=tuner, + max_trials=self.n_models, + seed=self.seed) + # compile the model + #nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m]) + nasclf.fit(X_train, y_train, epochs=self.n_epochs) + best_model = nasclf.export_model() + mpredict=best_model.predict(X_test) + mtpredict=best_model.predict(X_train) + #loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0) + #from sklearn.metrics import classification_report + #Classification report + y_pred_bool = np.argmax(mpredict, axis=1) + y_train_pred_bool = np.argmax(mtpredict, axis=1) + score = objClf.get_score(scoreParam,y_test, y_pred_bool) + #best_model = nasclf.export_model() + best_model_summary=best_model.summary() + filename = os.path.join(self.deployLocation,'log','summary.txt') + with open(filename,'w') as f: + best_model.summary(print_fn=lambda x: f.write(x + '\\n')) + f.close() + #self.log.info(""=========="") + #self.log.info(best_model_summary) + self.log.info(""NAS struct data classification, best model summary: \\n""+str(best_model.summary(print_fn=self.log.info))) + #self.log.info(""=========="") + #Save and load model + # # #try: + # try: + # best_model.save(""model_class_autokeras"", save_format=""tf"") + # except Exception: + # best_model.save(""model_class_autokeras.h5"") + # loaded_model = load_model(""model_class_autokeras"", custom_objects=ak.CUSTOM_OBJECTS) + # loadedmodel_predict=loaded_model.predict(X_test) + loss,accuracy_m=nasclf.evaluate(X_test, y_test) + #mpredict_classes = mpredict.argmax(axis=-1) + #accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int)) + # precision tp / (tp + fp) + #precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro') + # recall: tp / (tp + fn) + #recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro') + #f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average=""macro"") + self.log.info(""Autokeras struct data classification metrics: \\n"") + except Exception as inst: + self.log.info(""Error: NAS failed ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(inst) + return modelName,nasclf,score + + def nasStructRegressor(self,scoreParam): + objClf = aion_matrix() + modelName=""nas_struct_regressor"" + #self.paramCheck() + X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest + # Autokeras alg + s_in = ak.StructuredDataInput() + #tf.keras.layers.GlobalMaxPooling2D()(s_in) + s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) + self.log.info(""Data pipe via autokeras Regression Dense layers ...\\n"") + s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out) + self.log.info(""applying autokeras automodel to evaluate different neural models...\\n"") + try: + tuner = str(self.tuner).lower() + except UnicodeEncodeError: + tuner = (self.tuner.encode('utf8')).lower() + nas_reg = ak.AutoModel( + inputs=s_in, + outputs=s_out, + overwrite=True, + tuner=tuner, + max_trials=self.n_models) + nas_reg.fit(X_train, y_train, epochs=self.n_epochs) + best_model = nas_reg.export_model() + self.log.info(""NAS struct data regression best model summary: \\n"") + best_model_summary=best_model.summary(print_fn=self.log.info) + self.log.info(best_model_summary) + predictm=best_model.predict(X_test) + mtpredict=best_model.predict(X_train) + score = objClf.get_score(scoreParam,y_test, predictm) + self.log.info(""Autokeras struct data regression metrics: \\n"") + + return modelName,nas_reg,score + + def nasMain(self,scoreParam): + modelName = """" + nasclf=None + nas_reg=None + #text_reg_model=None + mse_value=0 + reg_rmse=0 + mape_reg=0 + huber_loss_reg=0 + accuracy=0 + precision=0 + recall=0 + #Dummy values to return main for classification problems + dummy_score_1=int(0) + #dummy_score_2=int(0) + try: + if ((self.nas_class.lower() == ""classification"")): + modelName,nasclf,score=self.nasStructClassification(scoreParam) + self.log.info('NAS Struct Classification score: '+str(score)) + best_model_nas = nasclf.export_model() + scoredetails = '{""Model"":""NAS"",""Score"":'+str(round(score,2))+'}' + return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1 + elif (self.nas_class.lower() == ""regression""): + modelName,nas_reg,score =self.nasStructRegressor(scoreParam) + self.log.info('NAS Struct Regression score: '+str(score)) + best_model_nas = nas_reg.export_model() + ''' + filename = os.path.join(self.deployLocation,'model','autoKerasModel') + best_model_nas = nas_reg.export_model() + try: + best_model_nas.save(filename, save_format=""tf"") + modelName = 'autoKerasModel' + except Exception: + filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5') + best_model_nas.save(filename) + modelName = 'autoKerasModel.h5' + ''' + scoredetails = '{""Model"":""NAS"",""Score"":'+str(round(score,2))+'}' + ''' + error_matrix = '""MSE"":""'+str(round(mse_value,2))+'"",""RMSE"":""'+str(round(reg_rmse,2))+'"",""MAPE"":""'+str(round(mape_reg,2))+'"",""MSLE"":""'+str(round(msle_reg,2))+'""' + ''' + return best_model_nas,self.nas_params,score,'NAS' + else: + pass + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + output = {""status"":""FA" +"IL"",""message"":str(inst).strip('""')} + output = json.dumps(output) + import itertools +import logging +from typing import Optional, Dict, Union + +from nltk import sent_tokenize + +import torch +from transformers import( + AutoModelForSeq2SeqLM, + AutoTokenizer, + PreTrainedModel, + PreTrainedTokenizer, +) + +logger = logging.getLogger(__name__) + +class QGPipeline: + """"""Poor man's QG pipeline"""""" + def __init__( + self, + model: PreTrainedModel, + tokenizer: PreTrainedTokenizer, + ans_model: PreTrainedModel, + ans_tokenizer: PreTrainedTokenizer, + qg_format: str, + use_cuda: bool + ): + self.model = model + self.tokenizer = tokenizer + + self.ans_model = ans_model + self.ans_tokenizer = ans_tokenizer + + self.qg_format = qg_format + + self.device = ""cuda"" if torch.cuda.is_available() and use_cuda else ""cpu"" + self.model.to(self.device) + + if self.ans_model is not self.model: + self.ans_model.to(self.device) + + assert self.model.__class__.__name__ in [""T5ForConditionalGeneration"", ""BartForConditionalGeneration""] + + if ""T5ForConditionalGeneration"" in self.model.__class__.__name__: + self.model_type = ""t5"" + else: + self.model_type = ""bart"" + + def __call__(self, inputs: str): + inputs = "" "".join(inputs.split()) + sents, answers = self._extract_answers(inputs) + flat_answers = list(itertools.chain(*answers)) + + if len(flat_answers) == 0: + return [] + + if self.qg_format == ""prepend"": + qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers) + else: + qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers) + + qg_inputs = [example['source_text'] for example in qg_examples] + questions = self._generate_questions(qg_inputs) + output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)] + return output + + def _generate_questions(self, inputs): + inputs = self._tokenize(inputs, padding=True, truncation=True) + + outs = self.model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + max_length=32, + num_beams=4, + ) + + questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs] + return questions + + def _extract_answers(self, context): + sents, inputs = self._prepare_inputs_for_ans_extraction(context) + inputs = self._tokenize(inputs, padding=True, truncation=True) + + outs = self.ans_model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + max_length=32, + ) + + dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs] + answers = [item.split('') for item in dec] + answers = [i[:-1] for i in answers] + + return sents, answers + + def _tokenize(self, + inputs, + padding=True, + truncation=True, + add_special_tokens=True, + max_length=512 + ): + inputs = self.tokenizer.batch_encode_plus( + inputs, + max_length=max_length, + add_special_tokens=add_special_tokens, + truncation=truncation, + padding=""max_length"" if padding else False, + pad_to_max_length=padding, + return_tensors=""pt"" + ) + return inputs + + def _prepare_inputs_for_ans_extraction(self, text): + sents = sent_tokenize(text) + + inputs = [] + for i in range(len(sents)): + source_text = ""extract answers:"" + for j, sent in enumerate(sents): + if i == j: + sent = "" %s "" % sent + source_text = ""%s %s"" % (source_text, sent) + source_text = source_text.strip() + + if self.model_type == ""t5"": + source_text = source_text + "" "" + inputs.append(source_text) + + return sents, inputs + + def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers): + inputs = [] + for i, answer in enumerate(answers): + if len(answer) == 0: continue + for answer_text in answer: + sent = sents[i] + sents_copy = sents[:] + + answer_text = answer_text.strip() + + ans_start_idx = 0 + # ans_start_idx = sent.index(answer_text) + + # if answer_text in sent: + # ans_start_idx = sent.index(answer_text) + # else: + # continue + + sent = f""{sent[:ans_start_idx]} {answer_text} {sent[ans_start_idx + len(answer_text): ]}"" + sents_copy[i] = sent + + source_text = "" "".join(sents_copy) + source_text = f""generate question: {source_text}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + + inputs.append({""answer"": answer_text, ""source_text"": source_text}) + + return inputs + + def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers): + flat_answers = list(itertools.chain(*answers)) + examples = [] + for answer in flat_answers: + source_text = f""answer: {answer} context: {context}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + + examples.append({""answer"": answer, ""source_text"": source_text}) + return examples + + +class MultiTaskQAQGPipeline(QGPipeline): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def __call__(self, inputs: Union[Dict, str]): + if type(inputs) is str: + # do qg + return super().__call__(inputs) + else: + # do qa + return self._extract_answer(inputs[""question""], inputs[""context""]) + + def _prepare_inputs_for_qa(self, question, context): + source_text = f""question: {question} context: {context}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + return source_text + + def _extract_answer(self, question, context): + source_text = self._prepare_inputs_for_qa(question, context) + inputs = self._tokenize([source_text], padding=False) + + outs = self.model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + max_length=16, + ) + + answer = self.tokenizer.decode(outs[0], skip_special_tokens=True) + return answer + + +class E2EQGPipeline: + def __init__( + self, + model: PreTrainedModel, + tokenizer: PreTrainedTokenizer, + use_cuda: bool + ) : + + self.model = model + self.tokenizer = tokenizer + + self.device = ""cuda"" if torch.cuda.is_available() and use_cuda else ""cpu"" + self.model.to(self.device) + + assert self.model.__class__.__name__ in [""T5ForConditionalGeneration"", ""BartForConditionalGeneration""] + + if ""T5ForConditionalGeneration"" in self.model.__class__.__name__: + self.model_type = ""t5"" + else: + self.model_type = ""bart"" + + self.default_generate_kwargs = { + ""max_length"": 256, + ""num_beams"": 4, + ""length_penalty"": 1.5, + ""no_repeat_ngram_size"": 3, + ""early_stopping"": True, + } + + def __call__(self, context: str, **generate_kwargs): + inputs = self._prepare_inputs_for_e2e_qg(context) + + # TODO: when overrding default_generate_kwargs all other arguments need to be passsed + # find a better way to do this + if not generate_kwargs: + generate_kwargs = self.default_generate_kwargs + + input_length = inputs[""input_ids""].shape[-1] + + # max_length = generate_kwargs.get(""max_length"", 256) + # if input_length < max_length: + # logger.warning( + # ""Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)"".format( + # max_length, input_length + # ) + # ) + + outs = self.model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + **generate_kwargs + ) + + prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True) + questions = prediction.split("""") + questions = [question.strip() for question in questions[:-1]] + return questions + + def _prepare_inputs_for_e2e_qg(self, context): + source_text = f""generate questions: {context}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + + inputs = self._tokenize([source_text], padding=False) + return inputs + + def _tokenize( + self, + inputs, + padding=True, + truncation=True, + add_special_tokens=True, + max_length=512 + ): + inputs = self.tokenizer.batch_encode_plus( + inputs, + max_length=max_length, + add_special_tokens=add_special_tokens, + truncation=truncation, + padding=""max_length"" if padding else False, + pad_to_max_length=padding, + return_tensors=""pt"" + ) + return inputs + + +SUPPORTED_TASKS = { + ""question-generation"": { + ""impl"": QGPipeline, + ""default"": { + ""model"": ""valhalla/t5-small-qg-hl"", + ""ans_model"": ""valhalla/t5-small-qa-qg-hl"", + } + }, + ""multitask-qa-qg"": { + ""impl"": MultiTaskQAQGPipeline, + ""default"": { + ""model"": ""valhalla/t5-small-qa-qg-hl"", + } + }, + ""e2e-qg"": { + ""impl"": E2EQGPipeline, + ""default"": { + ""model"": ""valhalla/t5-small-e2e-qg"", + } + } +} + +def pipeline( + task: str, + model: Optional = None, + tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, + qg_format: Optional[str] = ""highlight"", + ans_model: Optional = None, + ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, + use_cuda: Optional[bool] = True, + **kwargs, +): + # Retrieve the task + if task not in SUPPORTED_TASKS: + raise KeyError(""Unknown task {}, available tasks are {}"".format(task, list(SUPPORTED_TASKS.keys()))) + + targeted_task = SUPPORTED_TASKS[task] + task_class = targeted_task[""impl""] + + # Use default model/config/tokenizer for the task if no model is provided + if model is None: + model = targeted_task[""default""][""model""] + + # Try to infer tokenizer from model or config name (if provided as str) + if tokenizer is None: + if isinstance(model, str): + tokenizer = model + else: + # Impossible to guest what is the right tokenizer here + raise Exception( + ""Impossible to guess which tokenizer to use. "" + ""Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."" + ) + + # Instantiate tokenizer if needed + if isinstance(tokenizer, (str, tuple)): + if isinstance(tokenizer, tuple): + # For tuple we have (tokenizer name, {kwargs}) + tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) + else: + tokenizer = AutoTokenizer.from_pretrained(tokenizer) + + # Instantiate model if needed + if isinstance(model, str): + model = AutoModelForSeq2SeqLM.from_pretrained(model) + + if task == ""question-generation"": + if ans_model is None: + # load default ans model + ans_model = targeted_task[""default""][""ans_model""] + ans_tokenizer = AutoTokenizer.from_pretrained(ans_model) + ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) + else: + # Try to infer tokenizer from model or config name (if provided as str) + if ans_tokenizer is None: + if isinstance(ans_model, str): + ans_tokenizer = ans_model + else: + # Impossible to guest what is the right tokenizer here + raise Exception( + ""Impossible to guess which tokenizer to use. "" + ""Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."" + ) + + # Instantiate tokenizer if needed + if isinstance(ans_tokenizer, (str, tuple)): + if isinstance(ans_tokenizer, tuple): + # For tuple we have (tokenizer name, {kwargs}) + ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1]) + else: + ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer) + + if isinstance(ans_model, str): + ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) + + if task == ""e2e-qg"": + return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) + elif task == ""question-generation"": + return task_class" +"(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda) + else: + return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import socket +import os +import rsa +from os.path import expanduser +from pathlib import Path +import requests +import platform +from appbe.dataPath import DATA_DIR +import socket +import getmac +import subprocess +import sys +import json +from datetime import datetime +import binascii +computername = socket.getfqdn() +global_key = ''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb +k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue +N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+ +NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit +NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b +fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB +-----END RSA PUBLIC KEY----- +''' + +quarter_key = ''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1 +08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh +NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ +WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9 +njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01 +gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB +-----END RSA PUBLIC KEY----- +''' + +halfYear_key=''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv +wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF +tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh +R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp +7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn +wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB +-----END RSA PUBLIC KEY----- +''' +oneYear_key=''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP +Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG +ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB +Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/ +fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0 +KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB +-----END RSA PUBLIC KEY----- +''' +full_key=''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn +ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0 +elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN +4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+ +t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ +Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB +-----END RSA PUBLIC KEY----- +''' +def validate_key_Pair(privatepath,publickey): + with open(privatepath, 'rb') as privatefile: + keydata = privatefile.read() + privatefile.close() + try: + privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM') + data = 'Validate Global License' + signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1') + pubkey = rsa.PublicKey.load_pkcs1(publickey) + except: + return False + try: + rsa.verify(data.encode('utf-8'), signature, pubkey) + return True + except Exception as e: + return False + +def updateDRecord(licensepath): + domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic') + if(os.path.isfile(licensepath)): + with open(licensepath, 'rb') as f: + licensekey = f.read() + f.close() + with open(domain_license_path, 'wb') as f: + f.write(licensekey) + f.close() + if(validate_key_Pair(domain_license_path,global_key)): + return True,'Valid Domain License' + else: + return False,'Invalid Domain License' + else: + return False,'File Not Exists' + +def generateLicenseKey(userKey): + record = {'UserKey':userKey} + record = json.dumps(record) + status = 'Error' + url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license' + try: + response = requests.post(url, data=record,headers={""x-api-key"":""3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM"",""Content-Type"":""application/json"",}) + if response.status_code == 200: + outputStr=response.content + outputStr = outputStr.decode('utf-8','ignore') + outputStr = outputStr.strip() + license_dict = json.loads(str(outputStr)) + if license_dict['status'] == 'success': + status = 'Success' + licenseKey = license_dict['msg'] + else: + status = 'Error' + licenseKey = '' + else: + status = 'Error' + licenseKey = '' + except Exception as inst: + print(inst) + status = 'Error' + licenseKey = '' + msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''} + return msg +def updateRecord(licensepath): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + license_path = os.path.join(currentDirectory,'..','lic','license.lic') + if(os.path.isfile(licensepath)): + with open(licensepath, 'rb') as f: + licensekey = f.read() + f.close() + with open(license_path, 'wb') as f: + f.write(licensekey) + f.close() + status,msg = check_domain_license() + if status: + status,msg = getdaysfromstartdate() + if status: + status,msg = check_days_license(int(msg)) + return status,msg + else: + return False,'File Not Exists' + + +def check_domain_license(): + if 'CORP.HCL.IN' in computername: + return True,'HCL Domain' + else: + return True,'HCL Domain' + +def diff_month(d1, d2): + return (d1.year - d2.year) * 12 + d1.month - d2.month + + +def getdaysfromstartdate(): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt') + + if(os.path.isfile(startdatePath)): + with open(startdatePath, ""rb"") as fl: + encrypted_message = fl.read() + fl.close() + + privkey = '''-----BEGIN RSA PRIVATE KEY----- +MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+ +GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr +38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp +WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP +ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL +xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH +2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet +XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK +AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4 +Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY +vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs +dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl +MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+ +gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1 +uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw +3NVWCIalMia+aKnvRHqHy/cQfEo" +"2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ +msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc +sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps +hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu +V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk +EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4 +UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy +Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo +LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu ++edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ= +-----END RSA PRIVATE KEY----- + ''' + privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') + decrypted_message = rsa.decrypt(encrypted_message, privkey) + decrypted_message = decrypted_message.decode() + import datetime + start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d') + + current_date = datetime.datetime.today().strftime('%Y-%m-%d') + current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d') + + Months = diff_month(current_date,start_time) + return True,Months + else: + return False,'Start Date Not Exists' +def check_days_license(months): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + license_path = os.path.join(currentDirectory,'..','lic','license.lic') + if(os.path.isfile(license_path)): + if(validate_key_Pair(license_path,full_key)): + return True,'Valid License' + elif(validate_key_Pair(license_path,oneYear_key)): + if months <= 12: + return True,'Valid License' + else: + return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' + elif(validate_key_Pair(license_path,halfYear_key)): + if months <= 6: + return True,'Valid License' + else: + return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' + elif(validate_key_Pair(license_path,quarter_key)): + if months <= 3: + return True,'Valid License' + else: + return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' + else: + return False,'Invalid License' + else: + return False,'License Not exists.Please contact ERS Research for renewal.' + +def checklicense(): + import binascii + license_path = os.path.join(DATA_DIR,'License','license.lic') + if(os.path.isfile(license_path)): + try: + with open(license_path, 'r') as privatefile: + license_key = privatefile.read() + privatefile.close() + encrypted_message = binascii.unhexlify(license_key.encode()) + privkey = '''-----BEGIN RSA PRIVATE KEY----- + MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY + m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3 + anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh + ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc + kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf + AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO + CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC + cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83 + rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa + A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7 + NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV + F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn + FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07 + AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh + vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p + aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k + 6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe + fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw + Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k + MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy + vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2 + 3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a + IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA + lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+ + 9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2 + -----END RSA PRIVATE KEY----- + ''' + privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') + decrypted_message = rsa.decrypt(encrypted_message, privkey) + msg = decrypted_message.decode().split('####') + product = msg[0] + computernameLicense = msg[1] + computername = socket.getfqdn() + licenseValid = False + if product.lower() == 'aion': + if computernameLicense == computername: + uuidlicense = msg[3] + uuid = guid() + if uuidlicense == uuid: + current_date = datetime.now() + license_expiry_date = msg[5] + license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S') + if current_date > license_expiry_date: + return False,'License Expire' + else: + return True,'' + return False,'License Error' + except Exception as e: + print(e) + return False,'License Error' + else: + return False,'Generate License' +def generate_record_key(product,version): + computername = socket.getfqdn() + macaddress = getmac.get_mac_address() + license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S') + try: + user = os.getlogin() + except: + user = 'NA' + uuid = guid() + msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date + pkeydata='''-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1 +kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq +ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME +oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG +T9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN +RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB +-----END RSA PUBLIC KEY----- +''' + pubkey = rsa.PublicKey.load_pkcs1(pkeydata) + encrypted_message = rsa.encrypt(msg.encode(), pubkey) + encrypted_message = binascii.hexlify(encrypted_message).decode() + return(encrypted_message) + +def run(cmd): + try: + return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding=""utf-8"").stdout.strip() + except Exception as e: + print(e) + return None + +def guid(): + if sys.platform == 'darwin': + return run( + ""ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\\\\"" '/IOPlatformUUID/{print $(NF-1)}'"", + ) + + if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys': + return run('wmic csproduct get uuid').split('\\n')[2].strip() + + if sys.platform.startswith('linux'): + return run('cat /var/lib/dbus/machine-id') or \\ + run('cat /etc/machine-id') + + if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'): + return run('cat /etc/hostid') or \\ + run('kenv -q smbios.system.uuid') + + +def updateLicense(licensekey): + license_folder = os.path.join(DATA_DIR,'License') + license_folder = Path(license_folder) + license_folder.mkdir(parents=True, exist_ok=True) + license_file = license_folder/'license.lic' + with open(license_file, ""w"") as fl: + fl.write(licensekey) + fl.close() + + +def enterRecord(version): + validLicense,msg = checklicense() + if not validLicense: + key = generate_record_key('AION',version) + msg = {'status':msg,'key':key,'licenseKey':'','link':''} + return validLicense,msg + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import numpy as np +import os +import datetime, time, timeit +from sklearn.model_selection import KFold +from sklearn.metrics import confusion_matrix" +" +from sklearn.metrics import accuracy_score +from sklearn.metrics import classification_report +import pickle +import logging + +class recommendersystem(): + def __init__(self,features,svd_params): + self.features = features + self.svd_input = svd_params + self.log = logging.getLogger('eion') + print (""recommendersystem starts \\n"") + + #To extract dict key,values + def extract_params(self,dict): + self.dict=dict + for k,v in self.dict.items(): + return k,v + + + + def recommender_model(self,df,outputfile): + from sklearn.metrics.pairwise import cosine_similarity + from utils.file_ops import save_csv + USER_ITEM_MATRIX = 'user_item_matrix' + ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix' + selectedColumns = self.features.split(',') + data = pd.DataFrame() + for i in range(0,len(selectedColumns)): + data[selectedColumns[i]] = df[selectedColumns[i]] + dataset = data + self.log.info('-------> Top(5) Rows') + self.log.info(data.head(5)) + start = time.time() + self.log.info('\\n----------- Recommender System Training Starts -----------') + #--------------- Task 11190:recommender system changes Start ---Usnish------------------# + # selectedColumns = ['userId', 'movieId', 'rating'] + df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns[2], 'count')).reset_index() + + self.log.info('-------> Top 10 most rated Items:') + self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10)) + + matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2]) + relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv') + matrix.to_csv(relative_file) + item_similarity_cosine = cosine_similarity(matrix.fillna(0)) + item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId')) + self.log.info('---------> Item-Item Similarity matrix created:') + self.log.info(item_similarity_cosine.head(5)) + + relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv') + save_csv(item_similarity_cosine,relative_file) + + + # --------------- recommender system changes End ---Usnish------------------# + + + + executionTime=time.time() - start + self.log.info(""------->Execution Time: ""+str(executionTime)) + self.log.info('----------- Recommender System Training End -----------\\n') + + return ""filename"",matrix,""NA"","""","""" ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import pickle +import pandas as pd +import sys +import time +import os +from os.path import expanduser +import platform +from sklearn.preprocessing import binarize +import logging +import tensorflow as tf +from sklearn.model_selection import train_test_split +from tensorflow.keras import preprocessing +from sklearn.metrics import roc_auc_score +from sklearn.metrics import accuracy_score +from tensorflow.keras.preprocessing.sequence import pad_sequences +from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda +import tensorflow.keras.backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.layers import Concatenate +from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D +from sklearn.metrics.pairwise import cosine_similarity, cosine_distances + +import tensorflow.keras.backend as K +from tensorflow.keras.models import Model, Sequential +from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers + +## Keras subclassing based siamese network +class siameseNetwork(Model): + def __init__(self, activation,inputShape, num_iterations): + self.activation=activation + self.log = logging.getLogger('eion') + super(siameseNetwork, self).__init__() + i1 = layers.Input(shape=inputShape) + i2 = layers.Input(shape=inputShape) + featureExtractor = self.build_feature_extractor(inputShape, num_iterations) + f1 = featureExtractor(i1) + f2 = featureExtractor(i2) + #distance vect + distance = layers.Concatenate()([f1, f2]) + cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) + c_loss=cosine_loss(f1, f2) + similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2]) + outputs = layers.Dense(1, activation=""sigmoid"")(distance) + self.model = Model(inputs=[i1, i2], outputs=outputs) + ##Build dense sequential layers + def build_feature_extractor(self, inputShape, num_iterations): + layers_config = [layers.Input(inputShape)] + for i, n_units in enumerate(num_iterations): + layers_config.append(layers.Dense(n_units)) + layers_config.append(layers.Dropout(0.2)) + layers_config.append(layers.BatchNormalization()) + layers_config.append(layers.Activation(self.activation)) + model = Sequential(layers_config, name='feature_extractor') + + return model + + def call(self, x): + return self.model(x) + +def euclidean_distance(vectors): + (f1, f2) = vectors + sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True) + return K.sqrt(K.maximum(sumSquared, K.epsilon())) + +def cosine_similarity(vectors): + (f1, f2) = vectors + f1 = K.l2_normalize(f1, axis=-1) + f2 = K.l2_normalize(f2, axis=-1) + return K.mean(f1 * f2, axis=-1, keepdims=True) + + + +def cos_dist_output_shape(shapes): + shape1, shape2 = shapes + return (shape1[0],1) + + +class eion_similarity_siamese: + def __init__(self): + self.log = logging.getLogger('eion') + + def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file): + try: + self.log.info('-------> Read Embedded File') + home = expanduser(""~"") + if platform.system() == 'Windows': + modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity') + else: + modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity') + if os.path.isdir(modelsPath) == False: + os.makedirs(modelsPath) + embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt') + if not os.path.exists(embedding_file_path): + from pathlib import Path + import urllib.request + import zipfile + location = modelsPath + local_file_path = os.path.join(location,""glove.6B.zip"") + file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path) + with zipfile.ZipFile(local_file_path, 'r') as zip_ref: + zip_ref.extractall(location) + os.unlink(os.path.join(location,""glove.6B.zip"")) + if os.path.isfile(os.path.join(location,""glove.6B.50d.txt"")): + os.unlink(os.path.join(location,""glove.6B.50d.txt"")) + if os.path.isfile(os.path.join(location,""glove.6B.300d.txt"")): + os.unlink(os.path.join(location,""glove.6B.300d.txt"")) + if os.path.isfile(os.path.join(location,""glove.6B.200d.txt"")): + os.unlink(os.path.join(location,""glove.6B.200d.txt"")) + X = df[[col1,col2]] + Y = df[targetColumn] + testPercentage = testPercentage + self.log.info('\\n-------------- Test Train Split ----------------') + if testPercentage == 0: + xtrain=X + ytrain=Y + xtest=X + ytest=Y + else: + testSize=testPercentage/100 + self.log.info('-------> Split Type: Random Split') + self.log.info('-------> Train Percentage: '+str(testSize)) + X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize) + self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') + self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->') + self.log.info('-------------- Test Train Split End ----------------\\n') + self.log.info('\\n-------------- Train Validate Split ----------------') + X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42) + self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') + self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->') + self.log.info('-------------- Train Validate Split End----------------\\n') + self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') + train_sentence1 = pipe.texts_to_sequences(X_train[col1].values) + train_sentence2 = pipe.texts_to_sequences(X_train[col2].values) + val_sentence1 = pipe.texts_to_sequences(X_val[col1].values) + val_sentence2 = pipe.texts_to_sequences(X_val[col2].values) + len_vec = [len(sent_vec) for sent_vec in train_sentence1] + max_len = np.max(len_vec) + len_vec = [len(sent_vec) for sent_vec in train_sentence2] + if (max_len < np.max(len_vec)): + max_len = np.max(len_vec) + train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post') + train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post') + val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post') + val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post') + y_train = y_train.values + y_val = y_val.values + activation = str(conf['activation']) + model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10]) + model.compile( + loss=""binary_crossentropy"", + optimizer=optimizers.Adam(learning_rate=0.0001), + metrics=[""accuracy""]) + es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True) + rlp = callbacks.ReduceLROnPlateau( + monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1 + ) + x_valid=X_val + y_valid=y_val + n_epoch = int(conf['num_epochs']) + batch_size = int(conf['batch_size']) + similarityIndex = conf['similarityIndex'] + model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size, + validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp]) + scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0) + self.log.info('-------> Model Score Matrix: Accuracy') + self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) + self.log.info('Status:- |... Algorithm applied: SIAMESE') + test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) + test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) + test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') + test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') + prediction = model.predict([test_sentence1, test_sentence2 ]) + n_epoch = conf['num_epochs'] + batch_size = conf['batch_size'] + activation = conf['activation'] + similarityIndex = conf['similarityIndex'] + self.log.info('-------> similarityIndex : '+str(similarityIndex)) + prediction = np.where(prediction > similarityIndex,1,0) + rocauc_sco = roc_auc_score(y_test," +"prediction) + acc_sco = accuracy_score(y_test, prediction) + predict_df = pd.DataFrame() + predict_df['actual'] = y_test + predict_df['predict'] = prediction + predict_df.to_csv(predicted_data_file) + self.log.info('-------> Model Score Matrix: Accuracy') + self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) + self.log.info('Status:- |... Algorithm applied: SIAMESE') + test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) + test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) + test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') + test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') + prediction = model.predict([test_sentence1, test_sentence2 ]) + prediction = np.where(prediction > similarityIndex,1,0) + rocauc_sco = roc_auc_score(y_test,prediction) + acc_sco = accuracy_score(y_test, prediction) + predict_df = pd.DataFrame() + predict_df['actual'] = y_test + predict_df['predict'] = prediction + predict_df.to_csv(predicted_data_file) + self.log.info(""predict_df: \\n""+str(predict_df)) + sco = acc_sco + self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco)) + self.log.info('Status:- |... Testing Score: '+str(acc_sco)) + self.log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco)) + matrix = '""Accuracy"":'+str(acc_sco)+',""ROC AUC"":'+str(rocauc_sco) + + prediction = model.predict([train_sentence1, train_sentence2]) + prediction = np.where(prediction > similarityIndex,1,0) + train_rocauc_sco = roc_auc_score(y_train,prediction) + train_acc_sco = accuracy_score(y_train, prediction) + self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco)) + self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco)) + trainmatrix = '""Accuracy"":'+str(train_acc_sco)+',""ROC AUC"":'+str(train_rocauc_sco) + model_tried = '{""Model"":""SIAMESE"",""Score"":'+str(sco)+'}' + saved_model = 'textsimilarity_'+iterName+'_'+iterVersion + # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav') + # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5') + ## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model. + filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion) + model.save(filename) + # model.save_weights(filename) + model_name = 'SIAMESE MODEL' + + return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex) + except Exception as inst: + self.log.info(""SIAMESE failed "" + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json + +#Python sklearn & std libraries +import numpy as np +import pandas as pd +from time_series.ts_arima_eion import eion_arima +from statsmodels.tsa.vector_ar.vecm import coint_johansen +from statsmodels.tsa.vector_ar.var_model import VAR +from math import * +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from math import sqrt +import logging +import os +import sys +import time +from statsmodels.tsa.arima_model import ARIMA +from sklearn.metrics import mean_squared_error +from pandas import read_csv +from statsmodels.tsa.stattools import adfuller +import pmdarima as pm +from statsmodels.tsa.stattools import grangercausalitytests +from statsmodels.stats.stattools import durbin_watson +from sklearn.utils import check_array + + +class timeseriesModelTests(): + def __init__(self,data,targetFeature,datetimeFeature,count): + #self.tsConfig = tsConfig + #self.modelconfig = modelconfig + #self.modelList = modelList + self.data = data + self.targetFeature = targetFeature + self.dateTimeFeature = datetimeFeature + self.count=count + self.log = logging.getLogger('eion') + def StatinaryChecks(self,dictDiffCount): + self.log.info(""\\n---------------Start Stationary Checks-----------"") + tFeature = self.targetFeature.split(',') + tFeature.append(self.dateTimeFeature) + self.data=self.data[tFeature] + tFeature.remove(self.dateTimeFeature) + lengthtFeature=len(tFeature) + diffCount=0 + try : + for features in (tFeature): + XSt = self.data[features] + XSt=XSt.values + resultSt = adfuller(XSt,autolag='AIC') + stationaryFlag = False + #print(resultSt) + self.log.info('-------> Features: '+str(features)) + self.log.info('----------> ADF Statistic: '+str(resultSt[0])) + self.log.info('----------> p-value: %f' % resultSt[1]) + if resultSt[1]<= 0.05: + self.log.info(""-------------> Converted As Stationary Data"") + stationaryFlag = True + else: + self.log.info(""-------------> Stationary Conversion Required"") + stationaryFlag = False + + self.log.info('----------> Critical Values') + for key, value in resultSt[4].items(): + self.log.info('----------> '+str(key)+': '+str(value)) + + if stationaryFlag == False: + self.data[features]=self.data[features].diff() + self.data=self.data.dropna() + dictDiffCount[features]=1 + XStt = self.data[features] + XStt=XStt.values + resultStt = adfuller(XStt) + if resultStt[1] > 0.05: + self.data[features]=self.data[features].diff() + self.data=self.data.dropna() + dictDiffCount[features]=2 + XSttt = self.data[features] + XSttt=XSttt.values + resultSttt = adfuller(XSttt) + if resultSttt[1]<= 0.05: + stationaryFlag = True + else: + stationaryFlag = True + self.log.info(""------------->""+str(dictDiffCount)) + + + if stationaryFlag == True: + self.log.info(""----------> Equals to Stationary Data"") + else: + self.log.info(""----------> Not Equal To Stationary Data"") + + self.log.info(""-------> Stationary data diff()"") + self.log.info(dictDiffCount) + self.log.info(""---------------Start Stationary Checks Ends-----------\\n"") + return self.data,dictDiffCount + + + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def varTimeseriesModelTests(self,data): + try : + tFeature = self.targetFeature.split(',') + self.log.info(""\\n--------- Start Granger Causality Test Results ------------"") + gtest=grangercausalitytests(data[tFeature], maxlag=15, addconst=True, verbose=True) + self.log.info(""-------> GrangerCausalitytest Results ""+str(gtest.values())) + self.log.info(""--------- End Granger Causality Test Results ------------\\n"") + return gtest + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def grangersCausationMatrix(self,data, variables, test='ssr_chi2test', verbose=False): + + try : + countVariables=0 + self.log.info(len(variables)) + self.log.info(""\\n--------------Start GrangersCausationMatrix---------------"") + df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables) + for c in df.columns: + for r in df.index: + test_result = grangercausalitytests(data[[r, c]], maxlag=12, verbose=False) + p_values = [round(test_result[i+1][0][test][1],4) for i in range(12)] + if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}') + min_p_value = np.min(p_values) + df.loc[r, c] = min_p_value + df.columns = [var + '_x' for var in variables] + df.index = [var + '_y' for var in variables] + self.log.info(df) + for i in range(len(variables)): + for j in range(len(variables)): + if i!=j and df.iloc[i][j]<0.05 and df.iloc[i][j]<0.05: + countVariables=countVariables+1 + self.log.info(""--------------End GrangersCausationMatrix---------------\\n"") + return df,countVariables + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return df,countVariables + + def coIntegrationTest(self,data): + try : + tdata = data.drop([self.dateTimeFeature], axis=1) + tdata.index = data[self.dateTimeFeature] + cols = tdata.columns + self.log.info(""\\n-------------- Start of the Co Integration test ---------------"") + lenTargetFeature=len(self.targetFeature) + countIntegrationFeature=0 + + N, l = tdata.shape + jres = coint_johansen(tdata, 0, 1) + trstat = jres.lr1 + tsignf = jres.cvt + + for i in range(l): + if trstat[i] > tsignf[i, 1]: + r = i + 1 + jres.r = r + jres.evecr = jres.evec[:, :r] + + jres.r = r + countIntegrationFeature=jres.r + jres.evecr = jres.evec[:, :r] + + self.log.info('------->coint_johansen trace statistics: '+str(trstat)) + self.log.info('------->coint_johansen critical values:') + self.log.info(tsignf) + self.log.info(""------->There are ""+str(countIntegrationFeature)+"" Co-Integration vectors"") + self.log.info(""-------------- End of the Co Integration test ---------------\\n"") + + return countIntegrationFeature + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname =" +"os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + + + + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +# For timeseries pyramid pdaarima module +import json + +#Python sklearn & std libraries +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split +from sklearn.feature_selection import VarianceThreshold +from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error +#from sklearn.metrics import mean_absolute_percentage_error +from sklearn.linear_model import LinearRegression +from math import sqrt +import warnings +# For serialization. +#from sklearn.externals import joblib +import pickle +import os,sys +# For ploting (mathlab) +import matplotlib.pyplot as plt +import plotly +#Import eion config manager module +import logging +from sklearn import metrics +from sklearn.metrics import accuracy_score +import time +import random +import statsmodels.api as sm +# prophet by Facebook +# time series analysis +#from statsmodels.tsa.seasonal import seasonal_decompose +#from statsmodels.graphics.tsaplots import plot_acf, plot_pacf +from prophet.plot import plot_plotly,plot_components_plotly +#import seaborn as sns +from sklearn.model_selection import ParameterGrid +import holidays +#from prophet.diagnostics import performance_metrics +#from prophet.diagnostics import cross_validation +from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error +import logging,sys +from scipy.special import inv_boxcox +from prophet.diagnostics import cross_validation + +#from sklearn.metrics import mean_absolute_percentage_error + +warnings.filterwarnings(""ignore"") +# Aion Prophet module +class aion_fbprophet (): + #Constructor + def __init__(self,configfile,testpercentage,data,targetFeature,dateTimeFeature): + try: + self.tsprophet_params = configfile + self.data=data + self.targetFeature=targetFeature + self.dateTimeFeature=dateTimeFeature + self.testpercentage = testpercentage + self.log = logging.getLogger('eion') + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + #Find datetime column + def get_datetime_col(self,data): + df=data + dt_col=[] + categorical_features=[] + discrete_features=[] + # Here, I am checking each column type, whether it is object type or float or int. Then I am trying to convert the + # Object type to datetime format using python pd.to_datetime() function. If the column converts , it is datetime format, else it is some other format (categorical or discrete) + for col in df.columns: + if (df[col].dtype == 'object' or df[col].dtype == 'datetime64[ns]' ): + try: + df[col] = pd.to_datetime(df[col]) + dt_col.append(col) + except ValueError: + categorical_features.append(col) + pass + elif (df[col].dtype == 'float64' or 'int64' or 'int' or 'float64' or 'float'): #('int' or 'float' or 'int64' or 'float64')): + #print(""discrete features found..\\n"") + discrete_features.append(col) + else: + pass + #Uncomment to know the datetime, categorical and continuous cols + # print (""Date time colms: dt_col: \\n"",dt_col) + # print(""categorical features: \\n"",categorical_features) + # print(""continuous features: \\n"",discrete_features) + return dt_col + + def get_predict_frequency(self,df,datetime_col_name): + #dt_col=pd.to_datetime(df[datetime_col_name], format='%m/%d/%Y %H:%M:%S') + dt_col=pd.to_datetime(df[datetime_col_name]) + #df['tvalue'] = df[datetime_col_name] + df['time_diff'] = (df[datetime_col_name]-df[datetime_col_name].shift()).fillna(pd.Timedelta('0')) + mean_diff_dt=df['time_diff'].mean() + time_diff_secs=mean_diff_dt.total_seconds() + time_sec_2_hr=((time_diff_secs/60)/60) + pred_freq="""" + time_sec_2_hr=round(time_sec_2_hr) + #For abbreviation ,refer https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases + if (time_sec_2_hr < 1): + pred_freq=""min"" + else: + if (time_sec_2_hr >= 24): + if (time_sec_2_hr > 168): + if(time_sec_2_hr > 696 or time_sec_2_hr < 744): # based on 29 days, to 31 days + if(time_sec_2_hr > 8760): + pred_freq=""Y"" + else: + pred_freq=""M"" + else: + pred_freq=""W"" + else: + pred_freq=""D"" + + else: + pred_freq=""H"" + + pass + return pred_freq + + #To extract dict key,values + def extract_params(self,dict): + self.dict=dict + for k,v in self.dict.items(): + return k,v + + def mean_absolute_percentage_error(self,y_true, y_pred): + if (y_true.isin([0]).sum() > 0): + y_true=y_true.mask(y_true==0).fillna(y_true.mean()) + + try: + y_true, y_pred=np.array(y_true), np.array(y_pred) + #return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100 + return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 + except Exception as inst: + self.log.info('<------------- mean_absolute_percentage_error ---------------> ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def regressor_list(self,regressorstr): + lst = regressorstr.split ("","") + reg_list=[] + for i in lst: + reg_list.append(i) + #print(reg_list) + + return reg_list + + + + # def get_regressors(self,reg): + # print(""get extra inputs for prophet...\\n"") + + + + def aion_probhet(self,train_data,datetime_col_name,predicted_data_file,dataFolderLocation): + from prophet import Prophet + #Getting prophet params + #key,val = self.extract_params(self.tsprophet_params) + val=self.tsprophet_params + self.log.info('-------> The given prophet algorithm parameters:>>') + self.log.info("" ""+str(val)) + changepoint_prior_scale=[] + changepoint_range=[] + mcmc_samples=[] + interval_width=[] + holidays_prior_scale=[] + n_changepoints=[] + uncertainty_samples=[] + seasonality_prior_scale=[] + + seasonality_mode="""" + yearly_seasonality=None + weekly_seasonality=None + daily_seasonality=None + additional_regressors="""" + holiday_country_name="""" + holiday_years=[] + no_of_periods=0 + pred_frequncy="""" + + for k,v in val.items(): + try: + if (k == ""seasonality_mode""): + seasonality_mode=v + elif (k == ""changepoint_prior_scale""): + changepoint_prior_scale=[float(i) for i in v.split(',')] + elif (k == ""changepoint_range""): + changepoint_range=[float(i) for i in v.split(',')] + elif (k == ""yearly_seasonality""): + if v.lower() == 'true': + yearly_seasonality=True + elif v.lower() == 'false': + yearly_seasonality=False + elif v.lower() == 'auto': + yearly_seasonality=v + else: + yearly_seasonality=True + elif (k == ""weekly_seasonality""): + if v.lower() == 'true': + weekly_seasonality=True + elif v.lower() == 'false': + weekly_seasonality=False + elif v.lower() == 'auto': + weekly_seasonality=v + else: + weekly_seasonality=False + #weekly_seasonality=v + elif (k == ""daily_seasonality""): + if v.lower() == 'true': + daily_seasonality=True + elif v.lower() == 'false': + daily_seasonality=False + elif v.lower() == 'auto': + daily_seasonality=v + else: + daily_seasonality=False + elif (k == ""mcmc_samples""): + mcmc_samples=[float(i) for i in v.split(',')] + elif (k == ""interval_width""): + interval_width=[float(i) for i in v.split(',')] + elif (k == ""holidays_prior_scale""): + #holidays_prior_scale=float(v) + holidays_prior_scale=[float(i) for i in v.split(',')] + elif (k == ""n_changepoints""): + n_changepoints=[int(i) for i in v.split(',')] + elif (k == ""uncertainty_samples""): + uncertainty_samples=[float(i) for i in v.split(',')] + elif (k == ""seasonality_prior_scale""): + seasonality_prior_scale=[float(i) for i in v.split(',')] + elif (k == ""additional_regressors""): + additional_regressors=str(v) + elif (k == ""holiday_country_name""): + holiday_country_name=v + elif (k == ""holiday_years""): + holiday_years=[int(i) for i in v.split(',')] + elif (k == ""no_of_periods""): + no_of_periods=int(v) + elif (k == ""pred_frequncy""): + pred_frequncy=v + else: + self.log.info(""Invalid string."") + except Exception: + continue + + try: + start = time.time() + datetime_col_name=str(datetime_col_name) + target_col=str(self.targetFeature) + #extra_regressors=additional_regressors + reg_list=self.regressor_list(additional_regressors) + get_dtcol="""" + get_dtcol=self.get_datetime_col(self.data)[0] + #get predict frequency for user data + pred_freq= str(self.get_predict_frequency(self.data,datetime_col_name)) + if (pred_frequncy): + pred_frequncy=pred_frequncy + else: + #If user not defined predict_freq in aion config or GUI, our algorithm will find automatically by get_predict_frequency() method + pred_frequncy=pred_freq + self.log.info(""Auto Predict frequency period (Hour-H/Day-D/Week-W/Month-M/Year-Y): \\n""+str(pred_frequncy)) + + #For proper datetime format check. + self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature]) + filterd_df = self.data.filter([get_dtcol,target_col]) + holiday = pd.DataFrame([]) + holiday_specified=holidays.CountryHoliday(holiday_country_name,years=holiday_years) + for date, name in sorted(holiday_specified.items()): + holiday = holiday.append(pd.DataFrame({'ds': date, 'holiday': ""Holidays""}, index=[0]), ignore_index=True) + + holiday['ds'] = pd.to_datetime(holiday['ds'], format='%Y-%m-%d %H:%M:%S', errors='ignore') + filterd_df=filterd_df.rename(columns={self.dateTimeFeature:'ds',target_col:'y'}) + #Set seasonality model + try: + if not seasonality_mode: + self.log.info('empty input for seasonality_mode parameter in aion configuration file.Please check. Setting default mode: additive. \\n') + seasonality_mode=[] + seasonality_mode=['additive'] + multiplicative_s=""multiplicative"" + additive_s=""additive"" + else: + seasonality_mode = seasonality_mode.split(',') + len_seasonality_mode=len(seasonality_mode) + except ValueError as e: + self.log.info(e) + params_grid = {'seasonality_mode':(seasonality_mode), + 'changepoint_prior_scale':changepoint_prior_scale, + 'changepoint_range': changepoint_range, + 'yearly_seasonality': [yearly_seasonality], + 'weekly_seasonality': [weekly_seasonality], + 'daily_seasonality': [daily_seasonality], + 'mcmc_samples': mcmc_samples, + 'interval_width': interval_width, + 'holidays_prior_scale':holidays_prior_scale, + 'n_changepoints' : n_changepoints, + 'uncertainty_samples': uncertainty_samples, + 'seasonality_prior_scale': seasonality_" +"prior_scale} + grid = ParameterGrid(params_grid) + p_cnt = 0 + for p in grid: + p_cnt = p_cnt+1 + self.log.info(""--------------- Total Possible prophet iterations: --------------- \\n"") + self.log.info(p_cnt) + self.log.info(""\\n--------------- Modal Validation Start ---------------"") + size = int(len(filterd_df) * (100 - self.testpercentage)/100) + train = filterd_df.loc[0:size] + valid = filterd_df.loc[size:len(filterd_df)] + self.log.info(""------->Train Data Shape: ""+str(train.shape)) + self.log.info(""------->Valid Data Shape""+str(valid.shape)) + X_train = train + X_test = valid + len_test=len(X_test) + #For add_regressor,copy the add_regressor columns to use. + if (additional_regressors): + df1=pd.DataFrame() + df1[additional_regressors]=self.data[additional_regressors] + + model_parameters_mape = pd.DataFrame(columns = ['MAPE','Parameters']) + model_parameters_rmse = pd.DataFrame(columns = ['rmse','Parameters']) + model_parameters_mse = pd.DataFrame(columns = ['mse','Parameters']) + model_parameters_mae = pd.DataFrame(columns = ['MAE','Parameters']) + model_parameters_r2 = pd.DataFrame(columns = ['r2','Parameters']) + for P in grid: + pred_forecast = pd.DataFrame() + random.seed(0) + train_model =Prophet(changepoint_prior_scale = P['changepoint_prior_scale'], + seasonality_mode=P['seasonality_mode'], + changepoint_range=P['changepoint_range'], + holidays_prior_scale = P['holidays_prior_scale'], + n_changepoints = P['n_changepoints'], + mcmc_samples=P['mcmc_samples'], + interval_width=P['interval_width'], + uncertainty_samples=P['uncertainty_samples'], + seasonality_prior_scale= P['seasonality_prior_scale'], + holidays=holiday, + weekly_seasonality=P['weekly_seasonality'], + daily_seasonality = P['daily_seasonality'], + yearly_seasonality = P['yearly_seasonality'] + + ) + + train_forecast=pd.DataFrame() + try: + train_model.fit(X_train) + train_forecast = train_model.make_future_dataframe(periods=len_test, freq=pred_frequncy,include_history = False) + train_forecast = train_model.predict(train_forecast) + except ValueError as e: + self.log.info(e) + self.log.info (""------->Check mcmc_samples value in aion confiuration, either 0 (default) or defined value,e.g.mcmc_samples:'300' to be set.If no idea on value, set to default.\\n"") + pred_forecast=train_forecast[['ds','yhat']] + Actual=X_test + len_act=len(Actual['y']) + len_pred=len(pred_forecast['yhat']) + MAPE = self.mean_absolute_percentage_error(Actual['y'],abs(pred_forecast['yhat'])) + model_parameters_mape = model_parameters_mape.append({'MAPE':MAPE,'Parameters':p},ignore_index=True) + #MAE + MAE = mean_absolute_error(Actual['y'],abs(pred_forecast['yhat'])) + rmse = sqrt(mean_squared_error(Actual['y'],abs(pred_forecast['yhat']))) + mse = mean_squared_error(Actual['y'],abs(pred_forecast['yhat'])) + r2 = r2_score(Actual['y'],abs(pred_forecast['yhat'])) + # self.log.info (""------->Prophet RMSE :""+str(rmse)) + # self.log.info (""------->Prophet MSE :""+str(mse)) + # self.log.info (""------->Prophet MAE :""+str(MAE)) + # self.log.info (""------->Prophet R2 :""+str(r2)) + model_parameters_mape = model_parameters_mape.append({'MAPE':MAPE,'Parameters':p},ignore_index=True) + model_parameters_rmse = model_parameters_rmse.append({'rmse':rmse,'Parameters':p},ignore_index=True) + model_parameters_mse = model_parameters_mse.append({'mse':mse,'Parameters':p},ignore_index=True) + model_parameters_mae = model_parameters_mae.append({'MAE':MAE,'Parameters':p},ignore_index=True) + model_parameters_r2 = model_parameters_r2.append({'r2':r2,'Parameters':p},ignore_index=True) + #end of for loop + + parameters_mape = model_parameters_mape.sort_values(by=['MAPE']) + parameters_mape = parameters_mape.reset_index(drop=True) + best_params_mape=parameters_mape['Parameters'][0] + # print(""Best Parameters on which the model has the least MAPE is: \\n"",best_params_mape) + best_mape_score=parameters_mape['MAPE'].iloc[0] + #self.log.info('------->Mean absolute percent error log: \\n ') + #self.log.info('------->best_mape_score: \\n '+str(best_mape_score)) + parameters_rmse = model_parameters_rmse.sort_values(by=['rmse']) + parameters_rmse = parameters_rmse.reset_index(drop=True) + best_params_rmse=parameters_rmse['Parameters'][0] + best_rmse_score=parameters_rmse['rmse'].iloc[0] + #self.log.info('------->Root Man Squared Error log (Prophet timeseries): \\n ') + #self.log.info('------->best_rmse_score ((Prophet timeseries)): \\n '+str(best_rmse_score)) + #mse + parameters_mse = model_parameters_mse.sort_values(by=['mse']) + parameters_mse = parameters_mse.reset_index(drop=True) + best_params_mse = parameters_mse['Parameters'][0] + best_mse_score=parameters_mse['mse'].iloc[0] + #MAE + parameters_mae = model_parameters_mae.sort_values(by=['MAE']) + parameters_mae = parameters_mae.reset_index(drop=True) + best_params_mae = parameters_mae['Parameters'][0] + best_mae_score=parameters_mae['MAE'].iloc[0] + # R2 score + parameters_r2 = model_parameters_r2.sort_values(by=['r2']) + parameters_r2 = parameters_r2.reset_index(drop=False) + best_params_r2 = parameters_r2['Parameters'][0] + best_r2_score=parameters_r2['r2'].iloc[0] + #Final best prophet mse,rmse,mape scores + # self.log.info (""------->Prophet RMSE :""+str(best_rmse_score)) + # self.log.info (""------->Prophet MSE :""+str(best_mse_score)) + # self.log.info (""------->Prophet MAE :""+str(best_mae_score)) + # self.log.info (""------->Prophet R2 :""+str(best_r2_score)) + + #Extracting best model parameters + + for k,v in best_params_mape.items(): + try: + if (k == ""changepoint_prior_scale""): + changepoint_prior_scale=float(v) + elif (k == ""changepoint_range""): + changepoint_range=float(v) + elif (k == ""daily_seasonality""): + daily_seasonality=v + elif (k == ""holidays_prior_scale""): + holidays_prior_scale=float(v) + elif (k == ""interval_width""): + interval_width=float(v) + elif (k == ""mcmc_samples""): + mcmc_samples=float(v) + elif (k == ""n_changepoints""): + n_changepoints=int(v) + elif (k == ""seasonality_mode""): + seasonality_mode=str(v) + elif (k == ""seasonality_prior_scale""): + seasonality_prior_scale=int(v) + elif (k == ""uncertainty_samples""): + uncertainty_samples=float(v) + elif (k == ""weekly_seasonality""): + weekly_seasonality=v + elif (k == ""yearly_seasonality""): + yearly_seasonality=v + else: + pass + except Exception as e: + self.log.info(""\\n prophet time series config param parsing error""+str(e)) + #continue + self.log.info(""\\n Best prophet model accuracy parameters.\\n "") + + #Prophet model based on mape best params. + best_prophet_model = Prophet(holidays=holiday, + changepoint_prior_scale= changepoint_prior_scale, + holidays_prior_scale = holidays_prior_scale, + n_changepoints = n_changepoints, + seasonality_mode = seasonality_mode, + weekly_seasonality= weekly_seasonality, + daily_seasonality = daily_seasonality, + yearly_seasonality = yearly_seasonality, + interval_width=interval_width, + mcmc_samples=mcmc_samples, + changepoint_range=changepoint_range) + + # If holiday not set using prophet model,we can add as below. + # best_prophet_model.add_country_holidays(country_name=holiday_country_name) + + #prophet add_regressor ,adding additional influencer (regressor) features, but it different from multivariant model. + if (additional_regressors): + filterd_df[additional_regressors] = df1[additional_regressors] + filterd_df.reset_index(drop=True) + for v in reg_list: + best_prophet_model=best_prophet_model.add_regressor(v) + #best_prophet_model.fit(X_train) + else: + pass + + #Model prophet fit, it should be done before make_future_dataframe + best_prophet_model.fit(filterd_df) + future = best_prophet_model.make_future_dataframe(periods=no_of_periods, freq=pred_frequncy,include_history = False) + if (additional_regressors): + future[additional_regressors] = filterd_df[additional_regressors] + future.reset_index(drop=True) + future=future.dropna() + else: + pass + #Final prediction + forecast = best_prophet_model.predict(future) + # forecast_df=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']] + # #Save forecast as csv file + # forecast_df.to_csv(r""prophet_realtime_user_steps.csv"",index = False, header=True) + #Plot the predition and save in file + forecast_plot = best_prophet_model.plot(forecast) + imagefilename = os.path.join(dataFolderLocation,'log','img','prophet_fig.png') + forecast_plot.savefig(imagefilename) + + #The below part is used to compare prophet predicted with actual value + #For train data + #Prophet model with train and test data, based on mape best params. + best_prophet_model_new = Prophet(holidays=holiday, + changepoint_prior_scale= changepoint_prior_scale, + holidays_prior_scale = holidays_prior_scale, + n_changepoints = n_changepoints, + seasonality_mode = seasonality_mode, + weekly_seasonality= weekly_seasonality, + daily_seasonality = daily_seasonality, + yearly_seasonality = yearly_seasonality, + interval_width=interval_width, + mcmc_samples=mcmc_samples, + changepoint_range=changepoint_range) + + fp_forecast=pd.DataFrame() + try: + best_prophet_model_new.fit(X_train) + fp_forecast = best_prophet_model_new.make_future_dataframe(periods=len_test, freq=pred_frequncy,include_history = False) + fp_forecast = best_prophet_model_new.predict(fp_forecast) + except ValueError as e: + self.log.info(e) + self.log.info (""------->Check mcmc_samples value in aion confiuration, either 0 (default) or defined value,e.g.mcmc_samples:'300' to be set.If no idea on value, set to default.\\n"") + + pred_forecast=fp_forecast[['ds','yhat']] + pred_forecast['ds']=Actual['ds'].to_numpy() + Actual.ds = pd.to_datetime(Actual.ds) + pred_forecast.ds = pd.to_datetime(pred_forecast.ds) + MAE = mean_absolute_error(Actual['y'],abs(pred_forecast['yhat'])) + rmse = sqrt(mean_squared_error(Actual['y'],abs(pred_forecast['yhat']))) + mse = mean_squared_error(Actual['y'],abs(pred_forecast['yhat'])) + r2 = r2_score(Actual['y'],abs(pred_forecast['yhat'])) + MAPE = self.mean_absolute_percentage_error(Actual['y'],abs(pred_forecast['yhat'])) + #Final best prophet mse,rmse,mape scores + self.log.info (""------->Prophet RMSE : ""+str(rmse)) + self.log.info (""------->Prophet MSE : ""+str(mse)) + self.log.info (""------->Prophet MAE : ""+str(MAE)) + self.log.info (""------->Prophet R2 : ""+str(r2)) + self.log.info(""------->Prophet MAPE: ""+str(MAPE)) + #self.log.info(MAPE) + #self.log.info('------->best_mape_score: \\n '+str(best_mape_score)) + prophet_df = pd.merge(Actual,pred_forecast, on=['ds'], how='left') + cols = ['ds','y','yhat'] + prophet_df_new = prophet_df[cols] + prophet_df_new.dropna(inplace=True) + actualfeature = target_" +"col+'_actual' + predictfeature = target_col+'_pred' + prophet_df_new=prophet_df_new.rename(columns={'ds': 'datetime', 'y': actualfeature,'yhat': predictfeature}) + #prophet_df_new.to_csv(predicted_data_file) + + #cv_results = cross_validation( model = best_prophet_model, initial = pd.to_timedelta(no_of_periods,unit=pred_frequncy), horizon = pd.to_timedelta(no_of_periods,unit=pred_frequncy)) + #forecast_df=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']] + #Save forecast as csv file + #forecast_df.to_csv(r""prophet_realtime_Output.csv"",index = False, header=True) + # self.log.info('------->Prophet time series forecast (last 7 prediction for user view): \\n ') + # self.log.info(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7)) + plot_prd=plot_plotly(best_prophet_model, forecast) + imagefilename = os.path.join(dataFolderLocation,'log','img','1_ppm_plot') + plotly.offline.plot(plot_prd, filename=imagefilename,auto_open = False) + plot_prd_components=plot_components_plotly(best_prophet_model, forecast) + imagefilename = os.path.join(dataFolderLocation,'log','img','2_ppm_plot') + plotly.offline.plot(plot_prd_components, filename=imagefilename,auto_open = False) + executionTime=(time.time() - start) + self.log.info('-------> Time: '+str(executionTime)) + return best_prophet_model,best_mae_score,best_rmse_score,best_mse_score,best_mape_score,best_r2_score,pred_frequncy,additional_regressors,prophet_df_new + + except Exception as inst: + #print(""********** aion_fbprophet exception ************* \\n"") + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +# For timeseries pyramid pdaarima module +from pmdarima.arima import auto_arima +import pmdarima as pm +import json + +#Python sklearn & std libraries +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split +from sklearn.feature_selection import VarianceThreshold +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import mean_squared_error +#from sklearn.metrics import mean_absolute_percentage_error +from sklearn.linear_model import LinearRegression +from math import sqrt +import warnings +# For serialization. +#from sklearn.externals import joblib +import pickle +import os,sys +# For ploting (mathlab) +import matplotlib.pyplot as plt +#Import eion config manager module +import logging +from sklearn import metrics +from sklearn.metrics import accuracy_score +import time +import os +import sys + +# Eion arima module +class eion_arima (): + + #Constructor + def __init__(self,configfile,testpercentage,sesonalityChecks,stationaryChecks): # eaobj - eion arima class object + try: + tsarima_params = configfile + self.testpercentage = testpercentage + self.start_p= int(tsarima_params['start_p']) + self.start_q= int(tsarima_params['start_q']) + self.max_p= int(tsarima_params['max_p']) + self.max_q= int(tsarima_params['max_q']) + + self.max_d= int(tsarima_params['max_d']) + self.max_order= int(tsarima_params['max_order']) + self.start_Q= int(tsarima_params['start_Q']) + self.max_P= int(tsarima_params['max_P']) + self.max_D= int(tsarima_params['max_D']) + self.max_Q= int(tsarima_params['max_Q']) + + self.m= int(tsarima_params['m']) + self.start_P= int(tsarima_params['start_P']) + self.seasonal= tsarima_params['seasonal'] + #self.seasonal= sesonalityChecks + self.stationary=stationaryChecks + #print(""self.seasonal: \\n"",self.seasonal) + #print(""self.stationary: \\n"",self.stationary) + + if self.seasonal and not self.seasonal.isspace(): + if (self.seasonal.lower() == 'true'): + self.seasonal=True + elif (self.seasonal.lower() == 'false'): + self.seasonal=False + else: + self.seasonal=True + else: + self.seasonal=True + + + self.d= int(tsarima_params['d']) + self.D= int(tsarima_params['D']) + #self.trace= tsarima_params['trace'] + self.error_action= tsarima_params['error_action'] + self.suppress_warnings= tsarima_params['suppress_warnings'] + self.stepwise= tsarima_params['stepwise'] + #self.random= tsarima_params['random'] + self.log = logging.getLogger('eion') + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + def mean_absolute_percentage_error(self,y_true, y_pred): + try: + y_true, y_pred=np.array(y_true), np.array(y_pred) + return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100 + + except Exception as inst: + self.log.info('<------------- mean_absolute_percentage_error ---------------> ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def eion_arima(self,train_data): + try: + start = time.time() + auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=self.seasonal,stationary=self.stationary,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,stepwise=self.stepwise) + #auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=True,stationary=True,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,random_state=20,stepwise=True) + aic_score = auto_arima_stepwise_fit.aic() + self.log.info('------->AIC Score: '+str(aic_score)) + self.log.info('\\n--------- Fit Summary --------------') + self.log.info (auto_arima_stepwise_fit.summary()) + self.log.info('--------- Fit Summary End--------------\\n') + self.log.info(""\\n--------------- Modal Validation Start ---------------"") + size = int(len(train_data) * (100 - self.testpercentage)/100) + + train = train_data.loc[0:size] + + valid = train_data.loc[size:len(train_data)] + # valid_perc=((100-self.testpercentage)/100) + # valid_perc=round(valid_perc, 1) + # print(""valid_perc: \\n"", valid_perc) + + self.log.info(""------->Train Data Shape: ""+str(train.shape)) + self.log.info(""------->Valid Data Shape""+str(valid.shape)) + start1=len(train) + end1=len(train_data) + modelfit = auto_arima_stepwise_fit.fit(train) + a_prediction = auto_arima_stepwise_fit.predict(valid.shape[0]) + #a_prediction = auto_arima_stepwise_fit.predict(n_periods=len(valid)) + #a_prediction = auto_arima_stepwise_fit.predict(start=start1,end=end1) + #print(""a_prediction: \\n"",a_prediction) + #self.log.info(a_prediction) + mae = metrics.mean_absolute_error(valid, a_prediction) + self.log.info (""------->MAE: ""+str(mae)) + mape = self.mean_absolute_percentage_error(valid, a_prediction) + + #mape=np.mean(np.abs((valid - a_prediction) / valid)) * 100 + + self.log.info (""------->MAPE :""+str(mape)) + #RMSE + rmse = sqrt(mean_squared_error(valid,a_prediction)) + mse = mean_squared_error(valid,a_prediction) + self.log.info (""------->RMSE :""+str(rmse)) + self.log.info (""------->MSE :""+str(mse)) + from sklearn.metrics import r2_score + r2 = r2_score(valid,a_prediction) + ########### End #################### + + # now we have the model + auto_arima_stepwise_fit.fit(train_data) + self.log.info(""------------- Validate Model End----------------\\n"") + executionTime=time.time() - start + self.log.info('-------> Time: '+str(executionTime)+'\\n') + return auto_arima_stepwise_fit,mae,rmse,mse,r2,aic_score,mape,valid,a_prediction + except Exception as inst: + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json + +#Python sklearn & std libraries +import numpy as np +import pandas as pd +from time_series.ts_arima_eion import eion_arima +from time_series.aion_fbprophet import aion_fbprophet +from time_series.timeseriesDLUnivariate import timeseriesDLUnivariate +from time_series.timeseriesDLMultivariate import timeseriesDLMultivariate +from time_series.tsDLMultiVrtInUniVrtOut import tsDLMultiVrtInUniVrtOut +from statsmodels.tsa.vector_ar.vecm import coint_johansen +from statsmodels.tsa.vector_ar.var_model import VAR +from math import * +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from math import sqrt +import logging +import os +import sys +import time +import pickle +from statsmodels.tsa.arima_model import ARIMA +from sklearn.metrics import mean_squared_error +from statsmodels.tsa.stattools import adfuller +import pmdarima as pm +from statsmodels.tsa.stattools import grangercausalitytests +from statsmodels.stats.stattools import durbin_watson +from time_series.ts_modelvalidation import timeseriesModelTests +from sklearn.utils import check_array +from time_series.tsStationarySeasonalityTest import tsStationarySeasonalityTest + +class timeseries(): + def __init__(self,ts" +"Config,modelconfig,modelList,data,targetFeature,dateTimeFeature,modelName,trainPercentage,usecasename,version,deployLocation,scoreParam): + self.tsConfig = tsConfig + self.modelconfig = modelconfig + self.modelList = modelList + self.data = data + self.data1=data + self.pred_freq = '' + self.additional_regressors='' + self.trainPercentage = trainPercentage + self.targetFeature = targetFeature + self.dateTimeFeature = dateTimeFeature + self.modelName=modelName + self.usecasename=usecasename + self.model_fit=None + self.selectedColumns = '' + self.version=version + self.deployLocation=deployLocation + self.dictDiffCount={} + self.log = logging.getLogger('eion') + self.scoreParam=str(scoreParam) + try: + ##For bug:12280 + self.data.dropna(how='all',axis=1,inplace=True) + except Exception as e: + self.data.fillna(0) + self.log.info(""data empty feature process error info:, check any text column contain empty records. if yes, please remove the column and upload the data for time series forecasting. \\n""+str(e)) + + def var_prediction(self,no_of_prediction): + tdata = self.data.drop([self.dateTimeFeature], axis=1) + tdata.index = self.data[self.dateTimeFeature] + lag_order = self.model_fit.k_ar + predictions = self.model_fit.forecast(tdata.values[-lag_order:],steps=no_of_prediction) + predictions = predictions.round(2) + col = self.targetFeature.split("","") + pred = pd.DataFrame(index=range(0,len(predictions)),columns=col) + for j in range(0,len(col)): + for i in range(0, len(predictions)): + pred.iloc[i][j] = predictions[i][j] + predictions = pred + pred=self.invertTransformation(tdata,self.targetFeature,predictions,self.dictDiffCount) + return pred + def save_dl_model(self,smodel,scaler_model): + try: + saved_model = self.usecasename+'_'+self.version + filename = os.path.join(self.deployLocation,'model',saved_model) + smodel.save(filename) + if scaler_model != 'NA' and scaler_model != '': + scaler_filename = os.path.join(self.deployLocation,'model',saved_model+'_scaler.pkl') + with open(scaler_filename, 'wb') as f: + pickle.dump(scaler_model,f) + f.close() + else: + scaler_filename = 'NA' + return filename,saved_model,scaler_filename + except Exception as e: + print(e) + + def save_model(self,smodel): + try: + saved_model = self.usecasename+'_'+self.version+'.sav' + filename = os.path.join(self.deployLocation,'model',saved_model) + with open(filename, 'wb') as f: + pickle.dump(smodel,f) + f.close() + return filename,saved_model + except Exception as e: + print(e) + + def mean_absolute_percentage_error(self,y_true, y_pred): + try: + y_true, y_pred=np.array(y_true), np.array(y_pred) + mape=np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100 + return mape + + except Exception as inst: + self.log.info('------------- mean_absolute_percentage_error ---------------') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + ## Fbprophet model + def getfbprophetmodel(self,predicted_data_file,dataFolderLocation,tFeature): + try: + modelName='fbprophet' + modelconfig = self.modelconfig['fbprophet'] + self.targetFeature=tFeature[0] + X_Train = pd.DataFrame(self.data[self.targetFeature]) + try: + # self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') + except: + #for utc timestamp + self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True) + self.data = self.data.dropna() + except: + pass + aion_prophet_obj = aion_fbprophet(modelconfig,self.trainPercentage,self.data,self.targetFeature,self.dateTimeFeature) + self.log.info('Status:- |... TimeSeries Algorithm applied: FBPROPHET') + self.model_fit,mae,rmse_prophet,mse,mape,r2,pred_freq,additional_regressors,prophet_df_new = aion_prophet_obj.aion_probhet(X_Train,self.dateTimeFeature,predicted_data_file,dataFolderLocation) + ## Added for additional scoring params + if (self.scoreParam.lower() == ""r2""): + scoringparam_v=r2 + self.log.info(""fbprophet User selected scoring parameter is r2. r2 value: ""+str(r2)) + elif (self.scoreParam.lower() == ""rmse""): + scoringparam_v=rmse_prophet + self.log.info(""fbprophet User selected scoring parameter is RMSE. RMSE value: ""+str(rmse_prophet)) + elif (self.scoreParam.lower() == ""mse""): + scoringparam_v=mse + self.log.info(""fbprophet User selected scoring parameter is MSE. MSE value: ""+str(mse)) + elif (self.scoreParam.lower() == ""mae""): + scoringparam_v=mae + self.log.info(""fbprophet User selected scoring parameter is MAE. MAE value: ""+str(mae)) + else: + scoringparam_v=rmse_prophet + self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs + error_matrix = '""RMSE"":""'+str(round(rmse_prophet,2))+'"",""MAPE"":""'+str(round(mape,2))+'"",""R2"":""'+str(round(r2,2))+'"",""MAE"":""'+str(round(mae,2))+'"",""MSE"":""'+str(round(mse,2))+'""' + self.log.info(""fbprophet all scoring parameter results: ""+str(error_matrix)) + scoredetails = '{""Model"":""FBProphet "",""Score"":'+str(scoringparam_v)+',""Scoring Param"": ""'+str(self.scoreParam.lower())+'""}' + self.selectedColumns = self.targetFeature+','+self.dateTimeFeature + self.selectedColumns = self.selectedColumns.split("","") + self.pred_freq = pred_freq + self.additional_regressors=additional_regressors + self.log.info('------------- End FBPROPHET Model -------------\\n') + return('Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,self.model_fit,self.selectedColumns,error_matrix,scoredetails,self.dictDiffCount,self.pred_freq,self.additional_regressors,prophet_df_new) + except Exception as e: + self.log.info(""FBProphet operation failed. error: ""+str(e)) + return('Error',modelName.upper(),self.scoreParam.lower(),0,None,self.selectedColumns,'','{}',self.dictDiffCount,self.pred_freq,self.additional_regressors,pd.DataFrame()) + ## Arima model + def get_arima_values(self): + try: + tFeature = self.targetFeature.split(',') + if(len(tFeature) == 1): + model_name = 'arima' + else: + self.log.info(""Note: ARIMA model is going to perform only on first feature of provided target features due to data not met the VAR model constraints"") + self.targetFeature=tFeature[0] + sesonalityChecks=True + stationaryChecks=False + #start checking sessonality using ch test and ocsb + self.log.info(self.data.head(5)) + res = pm.arima.nsdiffs(self.data[self.targetFeature], m=355, max_D=5, test=""ch"") # 365 since daily + self.log.info('-------> Seasonality checks: %f' % res) + if res >=4: + self.log.info(""-----------> Data is following Seasonality "") + self.log.info('Status:- |... Seasonality Check Done. Data is following Seasonality ') + sesonalityChecks=True + else: + self.log.info(""-----------> Data is not following Seasonality "") + self.log.info('Status:- |... Seasonality Check Done. Data is not following Seasonality') + sesonalityChecks=False + + # end checking sessonality using ch test and ocsb + # start checking stationary data for time Series + series=self.data[self.targetFeature] + adf_test = pm.arima.ADFTest(alpha=0.05) + + resultSt = adfuller(self.data[self.targetFeature]) + self.log.info('ADF Statistic: %f' % resultSt[0]) + self.log.info('p-value: %f' % resultSt[1]) + if resultSt[1]<= 0.05: + stationaryChecks=True + self.log.info(""the data does not have a unit root and is stationary."") + self.log.info('Status:- |... Stationary Check Done. Data is stationary') + else: + stationaryChecks=False + self.log.info(""the data has a unit root and is non-stationary."") + self.log.info('Status:- |... Stationary Check Done. Data is non-stationary') + + # End of stationary checks + self.log.info('\\n------------- Start Arima Model -------------') + self.log.info('-------> Top 5 Rows: ') + self.log.info(self.data.head(5)) + eion_arima_obj = eion_arima(self.modelconfig['arima'],self.trainPercentage,sesonalityChecks,stationaryChecks) + return 'Success',eion_arima_obj + except Exception as e: + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return 'Error',None + + def getEncDecLSTMMultVrtInUniVrtOut(self): + try: + self.log.info('Status:- |... TimeSeries Algorithm applied: Encoder Decoder LSTM') + modelName='encoder_decoder_lstm_mvi_uvo' + modelconfig = self.modelconfig['encoder_decoder_lstm_mvi_uvo'] + df = self.data + targetFeature = list(self.targetFeature.split("","")) + + try: + # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + except: + #for utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) + df = df.dropna() + except: + pass + df = df.groupby(self.dateTimeFeature).mean() + df = df.reset_index() + tdata = df.drop([self.dateTimeFeature], axis=1) + tdata.index = df[self.dateTimeFeature] + #tdata = tdata[tdata.columns[tdata.columns.isin(targetFeature)]] + #selectedColumns = self.targetFeature+','+self.dateTimeFeature + #selectedColumns = selectedColumns.split("","") + selectedColumns = tdata.columns + df_predicted=None + + aion_dlts_obj = tsDLMultiVrtInUniVrtOut(modelconfig,self.trainPercentage,targetFeature,self.dateTimeFeature) + status,mse,rmse,r2,mae,model,df_predicted,lag_order,scaler = aion_dlts_obj.lstm_encdec_mvin_uvout(tdata) + if status.lower() == 'success': + ## Added for additional scoring params + if (self.scoreParam.lower() == ""r2""): + scoringparam_v=r2 + self.log.info('Status:- |... Score R2(Avg) '+str(r2)) + elif (self.scoreParam.lower() == ""rmse""): + scoringparam_v=rmse + self.log.info(""Status:- |... Score RMSE(Avg) ""+str(rmse)) + elif (self.scoreParam.lower() == ""mse""): + scoringparam_v=mse + self.log.info(""Status:- |... Score MSE(Avg) ""+str(mse)) + elif (self.scoreParam.lower() == ""mae""): + scoringparam_v=mae + self.log.info(""Status:- |... Score MAE(Avg) : ""+str(mae)) + else: + scoringparam_v=rmse + error_matrix = '""RMSE"":""'+str(round(rmse,2))+'"",""MSE"":""'+str(round(mse,2))+'""' + error_matrix=error_matrix+',""R2"":""'+str(round(r2,2))+'"",""MAE"":""'+str(round(mae,2))+'""' + self.log.info(""LSTM Multivariant Input Univariate Output all scoring param results: ""+str(error_matrix)) + self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs + scoredetails = '{""Model"":""LSTM Multivariant"",""Score"":'+str(scoringparam_v)+',""Scoring Param"": ""'+str(self.scoreParam.lower())+'""}' + else: + return 'Error',modelName.upper()," +"self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None + except Exception as e: + self.log.info(""getEncDecLSTMMultVrtInUniVrtOut method error. Error msg: ""+str(e)) + return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None + return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,model,selectedColumns,error_matrix,scoredetails,df_predicted,lag_order,scaler + + def getLSTMMultivariate(self): + try: + self.log.info('Status:- |... TimeSeries Algorithm applied: LSTM') + modelName='lstm' + modelconfig = self.modelconfig['lstm'] + df = self.data + targetFeature = list(self.targetFeature.split("","")) + try: + # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + except: + #for utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) + df = df.dropna() + except: + pass + df = df.groupby(self.dateTimeFeature).mean() + df = df.reset_index() + tdata = df.drop([self.dateTimeFeature], axis=1) + tdata.index = df[self.dateTimeFeature] + tdata = tdata[tdata.columns[tdata.columns.isin(targetFeature)]] + selectedColumns = self.targetFeature+','+self.dateTimeFeature + selectedColumns = selectedColumns.split("","") + df_predicted=None + + aion_dlts_obj = timeseriesDLMultivariate(modelconfig,self.trainPercentage,targetFeature,self.dateTimeFeature) + status,mse,rmse,r2,mae,model,df_predicted,lag_order,scaler = aion_dlts_obj.lstm_multivariate(tdata) + if status.lower() == 'success': + ## Added for additional scoring params + if (self.scoreParam.lower() == ""r2""): + scoringparam_v=r2 + self.log.info('Status:- |... Score R2(Avg) '+str(r2)) + elif (self.scoreParam.lower() == ""rmse""): + scoringparam_v=rmse + self.log.info(""Status:- |... Score RMSE(Avg) ""+str(rmse)) + elif (self.scoreParam.lower() == ""mse""): + scoringparam_v=mse + self.log.info(""Status:- |... Score MSE(Avg) ""+str(mse)) + elif (self.scoreParam.lower() == ""mae""): + scoringparam_v=mae + self.log.info(""Status:- |... Score MAE(Avg) : ""+str(mae)) + else: + scoringparam_v=rmse + error_matrix = '""RMSE"":""'+str(round(rmse,2))+'"",""MSE"":""'+str(round(mse,2))+'""' + error_matrix=error_matrix+',""R2"":""'+str(round(r2,2))+'"",""MAE"":""'+str(round(mae,2))+'""' + self.log.info(""LSTM Multivariant all scoring param results: ""+str(error_matrix)) + self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs + scoredetails = '{""Model"":""LSTM Multivariant"",""Score"":'+str(scoringparam_v)+',""Scoring Param"": ""'+str(self.scoreParam.lower())+'""}' + else: + return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None + except Exception as e: + self.log.info(""getLSTMMultivariate method error. Error msg: ""+str(e)) + return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None + return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,model,selectedColumns,error_matrix,scoredetails,df_predicted,lag_order,scaler + + def getUniVarientLSTMModel(self): + try: + self.log.info('Status:- |... TimeSeries Algorithm applied: LSTM') + modelName='lstm' + lstmconfig = self.modelconfig['lstm'] + df = self.data + try: + # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + except: + #for utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) + df = df.dropna() + except: + pass + tdata = df.drop([self.dateTimeFeature], axis=1) + tdata.index = df[self.dateTimeFeature] + tdata = pd.DataFrame(tdata[self.targetFeature]) + selectedColumns = self.targetFeature+','+self.dateTimeFeature + selectedColumns = selectedColumns.split("","") + aion_dlts_obj = timeseriesDLUnivariate(lstmconfig,self.trainPercentage,self.targetFeature,self.dateTimeFeature,modelName) + status,lstm_mse,lstm_rmse,r2,mae,lstm_model,df_predicted_lstm,lag_order,scaler = aion_dlts_obj.ts_lstm(tdata) + if status.lower() == 'success': + ## Added for additional scoring params + if (self.scoreParam.lower() == ""r2""): + scoringparam_v=r2 + self.log.info(""LSTM Univariant User selected scoring parameter is r2. r2 value: ""+str(r2)) + elif (self.scoreParam.lower() == ""rmse""): + scoringparam_v=lstm_rmse + self.log.info(""LSTM Univariant User selected scoring parameter is RMSE. Rmse value: ""+str(lstm_rmse)) + elif (self.scoreParam.lower() == ""mse""): + scoringparam_v=lstm_mse + self.log.info(""LSTM Univariant User selected scoring parameter is MSE. Mse value: ""+str(lstm_mse)) + elif (self.scoreParam.lower() == ""mae""): + scoringparam_v=mae + self.log.info(""LSTM Univariant User selected scoring parameter is MAE. Mae value: ""+str(mae)) + else: + scoringparam_v=lstm_rmse + error_matrix = '""RMSE"":""'+str(round(lstm_rmse,2))+'"",""MSE"":""'+str(round(lstm_mse,2))+'""' + error_matrix=error_matrix+',""R2"":""'+str(round(r2,2))+'"",""MAE"":""'+str(round(mae,2))+'""' + self.log.info(""LSTM Univariant, all scoring param results: ""+str(error_matrix)) + self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs + scoredetails = '{""Model"":""LSTM Univariant"",""Score"":'+str(scoringparam_v)+',""Scoring Param"": ""'+str(self.scoreParam.lower())+'""}' + return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,lstm_model,selectedColumns,error_matrix,scoredetails,df_predicted_lstm,lag_order,scaler + else: + return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None + except Exception as inst: + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None + def getUniVarientMLPModel(self): + try: + self.log.info('Status:- |... TimeSeries Algorithm applied: MLP') + modelName='mlp' + lstmconfig = self.modelconfig['mlp'] + df = self.data + try: + # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') + except: + #for utc timestamp + df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) + df = df.dropna() + except: + pass + tdata = df.drop([self.dateTimeFeature], axis=1) + tdata.index = df[self.dateTimeFeature] + tdata = pd.DataFrame(tdata[self.targetFeature]) + selectedColumns = self.targetFeature+','+self.dateTimeFeature + selectedColumns = selectedColumns.split("","") + aion_dlts_obj = timeseriesDLUnivariate(lstmconfig,self.trainPercentage,self.targetFeature,self.dateTimeFeature,modelName) + mlp_mse,mlp_rmse,r2,mae,mlp_model,df_predicted_mlp,look_back,scaler = aion_dlts_obj.mlpDL(tdata) + ## Added for additional scoring params + if (self.scoreParam.lower() == ""r2""): + scoringparam_v=r2 + self.log.info(""MLP Univariant User selected scoring parameter is R2. R2 value: ""+str(r2)) + elif (self.scoreParam.lower() == ""rmse""): + scoringparam_v=mlp_rmse + self.log.info(""MLP Univariant User selected scoring parameter is RMSE. Rmse value: ""+str(mlp_rmse)) + elif (self.scoreParam.lower() == ""mse""): + scoringparam_v=mlp_mse + self.log.info(""MLP Univariant User selected scoring parameter is MSE. Mse value: ""+str(mlp_mse)) + elif (self.scoreParam.lower() == ""mae""): + scoringparam_v=mae + self.log.info(""MLP Univariant User selected scoring parameter is MAE. Mae value: ""+str(mae)) + else: + scoringparam_v=mlp_rmse + error_matrix = '""RMSE"":""'+str(round(mlp_rmse,2))+'"",""MSE"":""'+str(round(mlp_mse,2))+'""' + error_matrix=error_matrix+',""R2"":""'+str(round(r2,2))+'"",""MAE"":""'+str(round(mae,2))+'""' + self.log.info(""MLP Univariant, all scoring param results: ""+str(error_matrix)) + self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs + scoredetails = '{""Model"":""MLP"",""Score"":'+str(scoringparam_v)+',""Scoring Param"": ""'+str(self.scoreParam.lower())+'""}' + return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,mlp_model,selectedColumns,error_matrix,scoredetails,df_predicted_mlp,look_back,scaler + except Exception as inst: + import traceback + self.log.info(""MLP Error in timeseries module: \\n""+str(traceback.print_exc())) + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None + def getARIMAmodel(self,predicted_data_file): + try: + modelName='arima' + + status,eion_arima_obj = self.get_arima_values() + self.log.info('Status:- |... TimeSeries Algorithm applied: ARIMA') + selected_feature_list = self.data[self.targetFeature].values + selected_feature_list = selected_feature_list.astype('int32') + self.log.info('-------> Target Feature First 5 Rows: ') + self.log.info(self.data[self.targetFeature].head(5)) + X_Train = pd.DataFrame(self.data[self.targetFeature]) + try: + # self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') + except: + #for utc timestamp + self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True) + self.data = self.data.dropna() + except: + pass + if status.lower() == 'success': + self.model_fit,mae,rmse_arima,mse,r2,aic_score,mape,valid,pred = eion_arima_obj.eion_arima(X_Train) + ## Added for additional scoring params + if (self.scoreParam.lower() == ""r2""): + scoringparam_v=r2 + self.log.info(""ARIMA Univariant User selected scoring parameter is r2. r2 value: ""+str(r2)) + elif (self.scoreParam.lower() == ""rmse""): + scoringparam_v" +"=rmse_arima + self.log.info(""ARIMA Univariant User selected scoring parameter is RMSE. RMSE value: ""+str(rmse_arima)) + elif (self.scoreParam.lower() == ""mse""): + scoringparam_v=mse + sel" +".append(rmse_mlp) + modelScore.append(rmse_var) + if (min(modelScore) == rmse_arima and rmse_arima != 0xFFFF): + best_model='arima' + self.log.info('Status:- |... TimeSeries Best Algorithm: ARIMA') + return best_model + elif (min(modelScore) == rmse_prophet and rmse_prophet != 0xFFFF): + best_model='fbprophet' + self.log.info('Status:- |... TimeSeries Best Algorithm: FBPROPHET') + return best_model + elif (min(modelScore) == rmse_lstm and rmse_lstm != 0xFFFF): + best_model='lstm' + self.log.info('Status:- |... TimeSeries Best Algorithm: LSTM') + return best_model + elif (min(modelScore) == rmse_mlp and rmse_mlp != 0xFFFF): + best_model='mlp' + self.log.info('Status:- |... TimeSeries Best Algorithm: MLP') + return best_model + elif (min(modelScore) == rmse_var and rmse_var != 0xFFFF): + best_model='var' + self.log.info('Status:- |... TimeSeries Best Algorithm: VAR') + return best_model + else: + #'Both arima and fbprophet rmse are equal, so both models are performing equal. + ## So, selecting arima as best one. + best_model='arima' + return best_model + + + + + ## Selecting best model algorithm + def bestmodelProcess(self,modelNames,nfeatures,trained_data_file,tFeature,predicted_data_file,dataFolderLocation): + try: + best_model='' + lag_order = 1 + predict_var=None + predict_arima=None + predict_lstm=None + predict_mlp=None + predict_fbprophet=None + modelNames = modelNames + modelNames=[x.lower() for x in modelNames] + inputFeature_len=nfeatures + status = 'Success' + if 'fbprophet' in modelNames: + status,modelName_prophet,fbprophet,rmse_prophet,fp_model_fit,selectedColumns_prophet,error_matrix_prophet,scoredetails_prophet,dictDiffCount_prophet,pred_freq_prophet,additional_regressors_prophet,predict_fbprophet = self.getfbprophetmodel(predicted_data_file,dataFolderLocation,tFeature) + if status.lower() == 'error': + self.log.info('-------------> FBPROPHET RMSE Score: Error') + if (self.scoreParam.lower() == 'r2'): + rmse_prophet = -0xFFFF + else: + rmse_prophet = 0xFFFF + else: + self.log.info(""-------------> FBPROPHET RMSE Score:\\t""+str(round(rmse_prophet,2))) + else: + if (self.scoreParam.lower() == 'r2'): + rmse_prophet = -0xFFFF + else: + rmse_prophet = 0xFFFF + if 'arima' in modelNames: + status,modelName,aic,rmse_arima,ar_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,rmse_arima_act,predict_arima = self.getARIMAmodel(predicted_data_file) + if status.lower() == 'error': + self.log.info('-------------> ARIMA RMSE Score: Error') + if (self.scoreParam.lower() == 'r2'): + rmse_arima = -0xFFFF + else: + rmse_arima = 0xFFFF + else: + self.log.info('-------------> ARIMA RMSE Score:\\t'+str(round(rmse_arima,2))) + else: + if (self.scoreParam.lower() == 'r2'): + rmse_arima = -0xFFFF ## -65535 + else: + rmse_arima = 0xFFFF + if 'lstm' in modelNames: + if inputFeature_len == 1: + status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getUniVarientLSTMModel() + else: + status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getLSTMMultivariate() + if status.lower() == 'error': + self.log.info('-------------> LSTM RMSE Score: Error') + if (self.scoreParam.lower() == 'r2'): + rmse_lstm = -0xFFFF + else: + rmse_lstm = 0xFFFF + else: + self.log.info('-------------> LSTM RMSE Score:\\t'+str(round(rmse_lstm,2))) + + else: + if (self.scoreParam.lower() == 'r2'): + rmse_lstm = -0xFFFF + else: + rmse_lstm = 0xFFFF + if 'mlp' in modelNames: + status,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,predict_mlp,lag_order,mlp_scaler = self.getUniVarientMLPModel() + if status.lower() == 'error': + self.log.info('-------------> MLP Score: Error') + if (self.scoreParam.lower() == 'r2'): + rmse_mlp = -0xFFFF + else: + rmse_mlp = 0xFFFF + else: + self.log.info('-------------> MLP RMSE Score:\\t'+str(round(rmse_mlp,2))) + + else: + if (self.scoreParam.lower() == 'r2'): + rmse_mlp = -0xFFFF + else: + rmse_mlp = 0xFFFF + if 'var' in modelNames: + status,modelName_var,score_var_type,rmse_var,var_model,var_selectedColumns,error_matrix_var,scoredetails_var,predict_var,dictDiffCount,pred_freq,additional_regressors,lag_order = self.getVARmodel() + if status.lower() == 'error': + self.log.info('-------------> VAR Score: Error') + if (self.scoreParam.lower() == 'r2'): + rmse_var = -0xFFFF + else: + rmse_var = 0xFFFF + else: + if (self.scoreParam.lower() == 'r2'): + rmse_var = -0xFFFF + else: + rmse_var = 0xFFFF + best_model = self.getbestmodel(rmse_prophet,rmse_arima,rmse_lstm,rmse_mlp,rmse_var) + if (best_model.lower() == 'arima'): + self.log.info('Best model is ARIMA based on metric '+str(self.scoreParam.lower())) + predict_arima.to_csv(predicted_data_file) + filename,saved_model = self.save_model(ar_model_fit) + return best_model,modelName,aic,rmse_arima,ar_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' + elif (best_model.lower() == 'fbprophet'): + self.log.info('Best model is fbprophet based on metric '+str(self.scoreParam.lower())) + predict_fbprophet.to_csv(predicted_data_file) + filename,saved_model = self.save_model(fp_model_fit) + return best_model,modelName_prophet,fbprophet,rmse_prophet,fp_model_fit,selectedColumns_prophet,error_matrix_prophet,scoredetails_prophet,dictDiffCount_prophet,pred_freq_prophet,additional_regressors_prophet,filename,saved_model,lag_order,'NA' + elif (best_model.lower() == 'var'): + self.log.info('Best model is VAR based on metric '+str(self.scoreParam.lower())) + self.data.to_csv(trained_data_file) + predict_var.to_csv(predicted_data_file) + filename,saved_model = self.save_model(var_model) + return best_model,modelName_var,score_var_type,rmse_var,var_model,var_selectedColumns,error_matrix_var,scoredetails_var,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' + elif (best_model.lower() == 'lstm'): + self.log.info('Best model is LSTM based on metric '+str(self.scoreParam.lower())) + predict_lstm.to_csv(predicted_data_file) + filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler) + return best_model,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model + elif (best_model.lower() == 'mlp'): + self.log.info('Best model is MLP based on metric '+str(self.scoreParam.lower())) + predict_mlp.to_csv(predicted_data_file) + filename,saved_model,scaler_model = self.save_dl_model(mlp_model_fit,mlp_scaler) + return best_model,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model + else: + pass + except Exception as e: + self.log.info('Issue in running multi time series algorithm selection process..Please check the config params') + self.log.info('error: '+str(e)) + + #Method to determine seasonality and stationrity in the input data features. (Task:12622,12623) + def seasonality_stationarity_test(self): + ##The below part is to test stationarity and sessonality in the given time series data based on statsmodels lib. + #self.data,self.targetFeature,self.dateTimeFeature + self.log.info(""<-------------- Time series stationarity and seasonality test Started...---------------->\\n"") + ts_sstest=tsStationarySeasonalityTest(self.data,self.deployLocation) + ## Time series Stationary check + ## Currently stationarity check method set as Augmented dickey fuller, but kpss method also implemented. + stationary_method='adfuller' + if (isinstance(self.targetFeature,list)): + target=self.targetFeature + pass + elif (isinstance(self.targetFeature,str)): + target=list(self.targetFeature.split(',')) + stats_model,n_lags,p_value,stationary_result,stationary_combined_res=ts_sstest.stationary_check(target,self.dateTimeFeature,stationary_method) + ## Time series Seasonality check + ##Seasonal model default set as additive + seasonal_model=""additive"" + df,decompose_result_mult,seasonality_result,seasonality_combined_res=ts_sstest.seasonal_check(target,self.dateTimeFeature,seasonal_model) + self.log.info(""<-------------- Time series stationarity and seasonality test completed.---------------->\\n"") + return stationary_result,seasonality_result + #Main timeseries function. + def timeseries_learning(self,trained_data_file,predicted_data_file,dataFolderLocation): + dataFolderLocation=dataFolderLocation + lag_order = 1 + # ##The below part is to test stationarity and sessonality in the given time series data based on statsmodels lib. + stationary_result,seasonality_result=self.seasonality_stationarity_test() + + try : + tFeature = self.targetFeature.split(',') + lentFeature=len(tFeature) + try: + if lentFeature > 1: + if any('timeseriesforecasting' in x.lower() for x in self.modelName): #task 11997 + self.modelName.remove('timeseriesforecasting') + if 'arima' in self.modelName: + self.log.info('Status:- |... TimeSeries algorithm ARIMA not supported for multiple features') + self.modelName.remove('arima') + if 'fbprophet' in self.modelName: + self.log.info('Status:- |... TimeSeries algorithm FBPROPHET not supported for multiple features') + self.modelName.remove('fbprophet') + if 'mlp' in self.modelName: + self.log.info('Status:- |... TimeSeries algorithm MLP not supported for multiple features') + self.modelName.remove('mlp') + if len(self.modelName) == 0: + self.log.info('--------> Default Set to VAR') + self.modelName.append('var') + + if lentFeature == 1: + if any('timeseriesforecasting' in x.lower() for x in self.modelName): #task 11997 + self.modelName.remove('timeseriesforecasting') + if 'var' in self.modelName: + self.log.info('Status:- |... TimeSeries algorithm VAR not supported for single feature') + self.modelName.remove('var') + if len(self.modelName) == 0: + self.log.info('--------> Default Set to ARIMA,FBProphet') + self.modelName.append('arima') + + + except Exception as e: + self.log.info('input model name error: '+ str(e)) + self.log.info(""error in user selected model, may be wrong configuration, please check."") + + if (len(self.modelName) > 1): + try: + self.log.info('User selected models: '+str(self.modelName)) + best_model,modelName,score_type,score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scaler_transformation = self.bestmodel" +"Process(self.modelName,lentFeature,trained_data_file,tFeature,predicted_data_file,dataFolderLocation) + return best_model,modelName,score_type,score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scaler_transformation + except Exception as e: + self.log.info('multi model timeseries processing error '+str(e)) + else: + self.modelName = self.modelName[0] + ## Normal arima ,var or fbprophet model call (user selects only one model at a time) + if self.modelName.lower() == 'fbprophet': + try: + model_name='fbprophet' + status,modelName,fbprophet,rmse_prophet,fp_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,predict_output = self.getfbprophetmodel(predicted_data_file,dataFolderLocation,tFeature) + if status.lower() == 'success': + predict_output.to_csv(predicted_data_file) + filename,saved_model = self.save_model(fp_model_fit) + return 'self.modelName',modelName,fbprophet,rmse_prophet,fp_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' + else: + raise Exception('Exception during model training') + except Exception as e: + self.log.info('fbprophet error....') + self.log.info(e) + elif self.modelName.lower() == 'encoder_decoder_lstm_mvi_uvo': + try: + status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getEncDecLSTMMultVrtInUniVrtOut() + if status.lower() == 'success': + predict_lstm.to_csv(predicted_data_file) + filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler) + return self.modelName,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model + else: + raise Exception('Exception during model training') + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + elif self.modelName.lower() == 'lstm': + try: + if lentFeature == 1: + status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getUniVarientLSTMModel() + else: + status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getLSTMMultivariate() + if status.lower() == 'success': + predict_lstm.to_csv(predicted_data_file) + filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler) + return self.modelName,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model + else: + raise Exception('Exception during model training') + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + elif self.modelName.lower() == 'mlp': + try: + status,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,predict_mlp,lag_order,mlp_scaler = self.getUniVarientMLPModel() + if status.lower() == 'success': + predict_mlp.to_csv(predicted_data_file) + filename,saved_model,scaler_model = self.save_dl_model(mlp_model_fit,mlp_scaler) + return self.modelName,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model + else: + raise Exception('Exception during model training') + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + else: + #task 12627 time series profiler removed + if lentFeature>1: + self.modelName='var' + self.data.to_csv(trained_data_file) + else: + self.modelName='arima' + if self.modelName.lower()=='var': + tsModelTestObj=timeseriesModelTests(self.data,self.targetFeature,self.dateTimeFeature,0) + self.data,self.dictDiffCount=tsModelTestObj.StatinaryChecks(self.dictDiffCount) + #self.log.info('Status:- |... Stationary Check Done.') + gtestResults,countVariables=tsModelTestObj.grangersCausationMatrix(self.data,tFeature) + if countVariables >= (lentFeature*lentFeature)-(lentFeature) or ((lentFeature*lentFeature)-(lentFeature))/2 : + coIntegrationVectors=tsModelTestObj.coIntegrationTest(self.data) + if coIntegrationVectors<=lentFeature: + self.log.info(""There are statistically significant relationship in data "") + self.log.info('Status:- |... Statistically Check Done. Statistically significant relations') + else: + self.log.info(""There are no statistically significant relationship in data"") + self.log.info('Status:- |... Statistically Check Done. No statistically significant relations') + else: + self.modelName='arima' + if self.modelName.lower()=='var': + try: + self.log.info('ARIMA, FBProphet cannot apply, Input data contains more than one feature, only VAR algorithm can apply, applying VAR by AION \\n') + status,modelName,aic,aic_score,model_fit,selectedColumns,error_matrix,scoredetails,predict_var,dictDiffCount,pred_freq,additional_regressors,lag_order = self.getVARmodel() + if status.lower() == 'success': + filename,saved_model = self.save_model(model_fit) + predict_var.to_csv(predicted_data_file) + return self.modelName,modelName,aic,aic_score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' + else: + raise Exception('Exception during VAR model training') + except Exception as inst: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + if self.modelName.lower() == 'arima': + try: + status,modelName,aic,scoringparam_v,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,rmse_arima_act,predict_output = self.getARIMAmodel(predicted_data_file) + if status.lower() == 'success': + predict_output.to_csv(predicted_data_file) + filename,saved_model = self.save_model(model_fit) + lag_order=0 + return self.modelName,modelName,aic,scoringparam_v,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' + else: + raise Exception('Exception during model training') + except Exception as inst: + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + except Exception as inst: + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def invertTransformation(self,Xtrain,targetFeature, preddf,dictDiffCount): + + try: + dfforecast = preddf.copy() + self.log.info(dfforecast.head(5)) + columns =targetFeature.split("","") + self.log.info(columns) + self.log.info(dictDiffCount) + for col in columns: + if col in dictDiffCount: + if dictDiffCount[col]==2: + dfforecast[col] = (Xtrain[col].iloc[-1]-Xtrain[col].iloc[-2]) + dfforecast[col].cumsum() + dfforecast[col] = Xtrain[col].iloc[-1] + dfforecast[col].cumsum() + # Roll back 1st Diff + return dfforecast + except Exception as inst: + self.log.info(' '+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +# import os +import tensorflow as tf +import numpy as np +from sklearn.preprocessing import MinMaxScaler +from sklearn.model_selection import train_test_split +import math +from sklearn.metrics import mean_squared_error +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Dropout +from tensorflow.keras import Sequential +from tensorflow.keras.layers import LSTM +import logging +# import kerastuner +import keras_tuner +#from keras_tuner.engine.hyperparameters import HyperParameters +from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband + +import warnings +warnings.simplefilter(""ignore"", UserWarning) + +# from keras.models import load_model + +# from tensorflow.keras.optimizers import SGD +# from tensorflow.keras.utils import load_model +from tensorflow.keras.models import load_model + + +class timeseriesDLUnivariate: + def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature,modelName): + self.look_back=None + #Preprocessed dataframe + # self.df=df + self.savedmodelname=None + self.deploy_location=None + self.epochs=None + self.batch_size=None + self.hidden_layers=None + self.optimizer=None + self.activation_fn=None + self.loss_fn=None + self.first_layer=None + self.dropout=None + self.model_name=None + self.hpt_train=None + ##Below is model type (MLP or lstm) + self.model_type=modelName + #self.dataFolderLocation=str(dataFolderLocation) + ##Added for ts hpt + self.tuner_algorithm="""" + + self.dl_params = configfile + # self.data=data + self.targetFeature=targetFeature + self.dateTimeFeature=dateTimeFeature + self.testpercentage = testpercentage + self.log = logging.getLogger('eion') + + #To extract dict key,values + def extract_params(self,dict): + self.dict=dict + for k,v in self.dict.items(): + return k,v + + ##Get deep learning model hyperparameter from advanced config + def getdlparams(self): + val=self.dl_params + self." +"log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>') + self.log.info("" ""+str(val)) + for k,v in val.items(): + try: + if (k == ""tuner_algorithm""): + self.tuner_algorithm=str(v) + elif (k == ""activation""): + self.activation_fn=str(v) + elif (k == ""optimizer""): + self.optimizer=str(v) + elif (k == ""loss""): + self.loss_fn=str(v) + elif (k == ""first_layer""): + if not isinstance(k,list): + self.first_layer=str(v).split(',') + else: + self.first_layer=k + elif (k == ""lag_order""): + if isinstance(k,list): + k = ''.join(v) + k=int(float(str(v))) + else: + self.look_back=int(float(str(v))) + elif (k == ""hidden_layers""): + self.hidden_layers=int(v) + elif (k == ""dropout""): + if not isinstance(k,list): + self.dropout=str(v).split(',') + else: + self.dropout=k + elif (k == ""batch_size""): + self.batch_size=int(v) + elif (k == ""epochs""): + self.epochs=int(v) + elif (k == ""model_name""): + self.model_name=str(v) + + except Exception as e: + self.log.info('Exception occured in deeep learn param reading, setting up default params.') + self.activation_fn=""relu"" + self.optimizer=""adam"" + self.loss_fn=""mean_squared_error"" + self.first_layer=[8,512] + self.hidden_layers=1 + self.look_back=int(2) + self.dropout=[0.1,0.5] + self.batch_size=2 + self.epochs=50 + self.model_name=""lstmmodel.h5"" + continue + + ## Just use this if user need to create dataframe from input data. + def createdf(self,df): + target="""" + # splitting reframed to X and Y considering the first column to be out target featureX=reframed.drop(['var1(t)'],axis=1) + X=df.drop([target],axis=1) + Y=df[target] + X_values=X.values + Y_values=Y.values + n_predict=len(Y_values) + train_X,train_Y = X_values[:(X_values.shape[0]-n_predict),:],Y_values[:(X_values.shape[0]-n_predict)] + test_X,test_Y = X_values[(X_values.shape[0]-n_predict):,:],Y_values[(X_values.shape[0]-n_predict):] + + #reshaping train and test to feed to LSTM + train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1])) + test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) + + return train_X,train_Y,test_X,test_Y + + # convert an array of values into a dataset matrix + def numpydf(self,dataset, look_back): + dataX, dataY = [], [] + for i in range(len(dataset)-look_back-1): + a = dataset[i:(i+look_back), 0] + dataX.append(a) + dataY.append(dataset[i + look_back, 0]) + + # x,y=numpy.array(dataX), numpy.array(dataY) + return np.array(dataX), np.array(dataY) + + def model_save(self,model): + import os.path + savedmodelname=self.model_name + path = os.path.join(self.deploy_location,savedmodelname) + model.save(path) + return (savedmodelname) + + ## MLP model buid + def mlpDL(self,df): + self.log.info(""MLP timeseries learning starts....."") + try: + self.getdlparams() + # look_back = self.look_back + dataset = df.values + dataset = dataset.astype('float32') + ##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags. + ##number of lag calculated just for reference ,not used now. + #Dont delete this, just use in future. + from statsmodels.tsa.stattools import kpss + statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature]) + self.log.info(""Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \\t""+str(n_lags)) + scaler = MinMaxScaler(feature_range=(0, 1)) + dataset = scaler.fit_transform(dataset) + # split into train and test sets + train_size = int(len(dataset) * 0.80) + test_size = len(dataset) - train_size + train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] + self.hpt_train=train + tuner_alg=self.tuner_algorithm + try: + ## Remove untitled_project dir in AION root folder created by previous tuner search run + import shutil + shutil.rmtree(r"".\\untitled_project"") + except: + pass + if (tuner_alg.lower()==""randomsearch""): + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""bayesianoptimization""): + tuner=BayesianOptimization(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""hyperband""): + tuner=Hyperband(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_epochs=50,factor=3) + # tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis])) + stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5) + try: + tuner.search(x=train,y=train,validation_data=(test,test),callbacks=[stop_early]) + except: + tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early]) + # best_model=tuner.get_best_models(num_models=1)[0] + best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] + best_first_layer=best_hps.get('units') + best_dropout=best_hps.get('Dropout_rate') + best_learning_rate=float(best_hps.get('learning_rate')) + self.log.info(""best hyperparameter values for mlp: \\n""+str(best_hps.values)) + look_back = 1 ## Because univariate problemtype + trainX, trainY = self.numpydf(train, look_back) + testX, testY = self.numpydf(test, look_back) + best_hmodel=tuner.hypermodel.build(best_hps) + ##Added for mlp issue,because tuner build also need to compile. + try: + best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer) + except: + pass + model_fit = best_hmodel.fit(trainX, trainY, epochs=self.epochs, batch_size=self.batch_size, verbose=2) + val_acc_per_epoch = model_fit.history['loss'] + best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1 + self.log.info(""MLP best epochs value:\\n""+str(best_epoch)) + trainScore = best_hmodel.evaluate(trainX, trainY, verbose=0) + testScore = best_hmodel.evaluate(testX, testY, verbose=0) + #Scoring values for the model + mse_eval=testScore + try: + #If mse_eval is list of values + min_v=min(mse_eval) + except: + #If mse_eval is single value + min_v=mse_eval + rmse_eval = math.sqrt(min_v) + # generate predictions for training + trainPredict = best_hmodel.predict(trainX) + #print(testX) + testPredict = best_hmodel.predict(testX) + #print(testPredict) + # invert predictions, because we used mimanmax scaler + trainY = scaler.inverse_transform([trainY]) + trainPredict = scaler.inverse_transform(trainPredict) + ## For test data + testY = scaler.inverse_transform([testY]) + testPredict = scaler.inverse_transform(testPredict) + ## Creating dataframe for actual,predictions + predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred']) + actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual']) + df_predicted=pd.concat([actual,predictions],axis=1) + #print(df_predicted) + from math import sqrt + from sklearn.metrics import mean_squared_error + try: + mse_mlp = mean_squared_error(testY.T,testPredict) + rmse_mlp=sqrt(mse_mlp) + self.log.info('mse_mlp: '+str(mse_mlp)) + self.log.info('rmse_mlp: '+str(rmse_mlp)) + from sklearn.metrics import r2_score + from sklearn.metrics import mean_absolute_error + r2 = r2_score(testY.T,testPredict) + mae = mean_absolute_error(testY.T,testPredict) + self.log.info('r2_mlp: '+str(r2)) + self.log.info('mae_mlp: '+str(mae)) + except Exception as e: + import traceback + self.log.info(""MLP dataframe creation error traceback: \\n""+str(traceback.print_exc())) + self.log.info(e) + # df_predicted.to_csv('mlp_prediction.csv') + + except Exception as e: + self.log.info(""MLP timeseries model traceback error msg e: ""+str(e)) + self.log.info(""MLP training successfully completed.\\n"") + return mse_mlp,rmse_mlp,r2,mae,best_hmodel,df_predicted,look_back,scaler + + ## Added function for hyperparam tuning (TFSTask:7033) + def build_model(self,hp): + try: + loss=self.loss_fn + optimizer=self.optimizer + try: + if optimizer.lower() == ""adam"": + optimizer=tf.keras.optimizers.Adam + elif(optimizer.lower() == ""adadelta""): + optimizer=tf.keras.optimizers.experimental.Adadelta + elif(optimizer.lower() == ""nadam""): + optimizer=tf.keras.optimizers.experimental.Nadam + elif(optimizer.lower() == ""adagrad""): + optimizer=tf.keras.optimizers.experimental.Adagrad + elif(optimizer.lower() == ""adamax""): + optimizer=tf.keras.optimizers.experimental.Adamax + elif(optimizer.lower() == ""rmsprop""): + optimizer=tf.keras.optimizers.experimental.RMSprop + elif(optimizer.lower() == ""sgd""): + optimizer=tf.keras.optimizers.experimental.SGD + else: + optimizer=tf.keras.optimizers.Adam + except: + optimizer=tf.keras.optimizers.Adam + pass + first_layer_min=round(int(self.first_layer[0])) + first_layer_max=round(int(self.first_layer[1])) + dropout_min=float(self.dropout[0]) + dropout_max=float(self.dropout[1]) + model=tf.keras.Sequential() + if (self.model_type.lower() == 'lstm'): + model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.hpt_train.shape[1]), + activation=hp.Choice('dense_activation',values=['relu']))) + elif (self.model_type.lower() == 'mlp'): + # model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(hp.Int('time_steps',min_value=look_back_min,max_value=look_back_max,step=1)), + # activation='relu')) + ##input_dim is 1 because mlp is for univariate. + model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(1),activation='relu')) + model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) + model.add(Dense(units=1)) + model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[loss]) + except Exception as e: + import traceback + self.log.info(""lstm errorbuild_model traceback: \\n""+str(traceback.print_exc())) + return model + + ##LSTM timeseries function call + def ts_lstm(self,df): + self.log.info(""lstm network model learning starts.....\\n"") + try: + self.getdlparams() + dataset = df.values + dataset = dataset.astype('float32') + ##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags. + ##number of lag calculated just for reference ,not used now. + #Dont delete this, just use in future. + from statsmodels.tsa.stattools import kpss + statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature]) + self.log.info(""Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \\t""+str(" +"n_lags)) + # normalize the dataset + scaler = MinMaxScaler(feature_range=(0, 1)) + dataset = scaler.fit_transform(dataset) + # split into train and test sets + train_size = int(len(dataset) * 0.80) + test_size = len(dataset) - train_size + train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] + self.hpt_train=train + tuner_alg=self.tuner_algorithm + try: + ## Remove untitled_project dir in AION root folder created by previous tuner search run + import shutil + shutil.rmtree(r"".\\untitled_project"") + except: + pass + if (tuner_alg.lower()==""randomsearch""): + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""bayesianoptimization""): + tuner=BayesianOptimization(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""hyperband""): + tuner=Hyperband(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_epochs=50,factor=3) + # tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis])) + from keras.callbacks import EarlyStopping + stop_early = EarlyStopping(monitor='val_loss', patience=5) + ##Need both x and y with same dimention. + tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early]) + # tuner.search(x=train,y=test,validation_data=(test,test),callbacks=[stop_early]) + best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] + best_time_steps=self.look_back + self.log.info(""best lag order or lookback (time_steps) for LSTM: \\n""+str(best_time_steps)) + self.log.info(""best hyperparameter values for LSTM: \\n""+str(best_hps.values)) + look_back = best_time_steps + trainX, trainY = self.numpydf(train, look_back) + testX, testY = self.numpydf(test, look_back) + # reshape input to be [samples, time steps, features] + trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) + testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1)) + #create and fit the LSTM network + best_hmodel=tuner.hypermodel.build(best_hps) + try: + best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer) + except: + pass + model_fit = best_hmodel.fit(trainX, trainY, validation_split=0.2, epochs=self.epochs, batch_size=self.batch_size, verbose=2) + val_acc_per_epoch = model_fit.history['loss'] + best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1 + self.log.info(""best epochs value:\\n""+str(best_epoch)) + # best_hmodel=tuner.hypermodel.build(best_hps) + # best_hmodel.fit(x=trainX,y=trainY,validation_split=0.2,epochs=best_epoch) + ##Using model_evaluate,calculate mse + # mse_eval = model.evaluate(testX, testY, verbose=0) + mse_eval = best_hmodel.evaluate(testX, testY, verbose=0) + try: + #If mse_eval is list of values + min_v=min(mse_eval) + except: + #If mse_eval is single value + min_v=mse_eval + rmse_eval=math.sqrt(min_v) + # self.log.info('LSTM mse:'+str(mse_eval)) + # self.log.info('LSTM rmse:'+str(rmse_eval)) + # lstm time series predictions + trainPredict = best_hmodel.predict(trainX) + testPredict = best_hmodel.predict(testX) + # invert predictions, because we used mim=nmax scaler + trainY = scaler.inverse_transform([trainY]) + trainPredict = scaler.inverse_transform(trainPredict) + testY = scaler.inverse_transform([testY]) + testPredict = scaler.inverse_transform(testPredict) + ## Creating dataframe for actual,predictions + predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred']) + actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual']) + df_predicted=pd.concat([actual,predictions],axis=1) + from math import sqrt + from sklearn.metrics import mean_squared_error + try: + mse_lstm=None + mse_lstm = mean_squared_error(testY.T,testPredict) + rmse_lstm=sqrt(mse_lstm) + self.log.info(""mse_lstm: ""+str(mse_lstm)) + self.log.info(""rmse_lstm: ""+str(rmse_lstm)) + from sklearn.metrics import r2_score + from sklearn.metrics import mean_absolute_error + r2 = r2_score(testY.T,testPredict) + mae = mean_absolute_error(testY.T,testPredict) + self.log.info('r2_lstm: '+str(r2)) + self.log.info('mae_lstm: '+str(mae)) + except Exception as e: + self.log.info(""lstm error loss fns""+str(e)) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + except Exception as e: + import traceback + self.log.info(""lstm training error traceback: \\n""+str(traceback.print_exc())) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + return 'Success',mse_lstm,rmse_lstm,r2,mae,best_hmodel,df_predicted,look_back,scaler + + + +if __name__ == '__main__': + print('Inside timeseriesDLUnivariate main....\\n') +# tsdl_obj = timeseriesDLUnivariate() + + ## for testing purpose + ''' + df1= pd.read_csv(r""C:\\aiontest\\testPrograms\\Data\\energydemand.csv"",encoding='utf-8', engine='python') + dateTimeFeature = ""utcTimeStamp"" + targetFeature=""temperature"" + try: + df1[dateTimeFeature] = pd.to_datetime(df1[dateTimeFeature]) #, format = '%d/%m/%Y %H.%M') + except: + pass + tdata = df1.drop([dateTimeFeature], axis=1) + tdata.index = df1[dateTimeFeature] + tdata = pd.DataFrame(tdata[targetFeature]) + cols = tdata.columns + mse,rmse,model = tsdl_obj.mlpDL(tdata) + lmse,lrmse,lstmmodel = tsdl_obj.ts_lstm(tdata) + print(""mlp mse: \\n"",mse) + print(""mlp rmse: \\n"",rmse) + print(""lstm mse: \\n"",lmse) + print(""lstm rmse: \\n"",lrmse) + savedmodelname=tsdl_obj.model_save(lstmmodel) + ''' + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +import pandas as pd +import numpy as np +import numpy +import pandas +import math +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, RepeatVector +from sklearn.preprocessing import MinMaxScaler +import logging +import tensorflow as tf +import keras_tuner +#from keras_tuner.engine.hyperparameters import HyperParameters +from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband +from sklearn.model_selection import train_test_split +from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator +import warnings +warnings.simplefilter(""ignore"", UserWarning) +from sklearn.metrics import mean_absolute_percentage_error + + +class tsDLMultiVrtInUniVrtOut: + def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature): + self.look_back=None + self.look_forward=None + + # self.df=df + + self.epochs=None + self.batch_size=None + self.hidden_layers=None + self.optimizer=None + self.activation_fn=""relu"" + self.loss_fn=None + self.first_layer=None + self.dropout=None + self.model_name=None + self.dl_params = configfile + # self.data=data + self.targetFeature=targetFeature + self.dateTimeFeature=dateTimeFeature + self.testpercentage = float(testpercentage) + self.log = logging.getLogger('eion') + ##Added for ts hpt (TFSTask:7033) + self.tuner_algorithm="""" + self.num_features=0 + + ##Get deep learning model hyperparameter from advanced config + def getdlparams(self): + val=self.dl_params + self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>') + self.log.info("" ""+str(val)) + for k,v in val.items(): + try: + if (k == ""tuner_algorithm""): + self.tuner_algorithm=str(v) + elif (k == ""activation""): + if not isinstance(k,list): + self.activation_fn=str(v).split(',') + else: + self.activation_fn=k + elif (k == ""optimizer""): + self.optimizer=str(v) + elif (k == ""loss""): + self.loss_fn=str(v) + elif (k == ""first_layer""): + if not isinstance(k,list): + self.first_layer=str(v).split(',') + else: + self.first_layer=k + elif (k == ""lag_order""): + if isinstance(k,list): + k = ''.join(v) + k=int(float(str(v))) + else: + self.look_back=int(float(str(v))) + elif (k == ""forward_order""): + if isinstance(k,list): + k = ''.join(v) + k=int(float(str(v))) + else: + self.look_forward=int(float(str(v))) + elif (k == ""hidden_layers""): + self.hidden_layers=int(v) + elif (k == ""dropout""): + if not isinstance(k,list): + self.dropout=str(v).split(',') + else: + self.dropout=k + elif (k == ""batch_size""): + self.batch_size=int(v) + elif (k == ""epochs""): + self.epochs=int(v) + elif (k == ""model_name""): + self.model_name=str(v) + + except Exception as e: + self.log.info('Exception occured in deeep learn param reading, setting up default params.') + self.activation_fn=""relu"" + self.optimizer=""adam"" + self.loss_fn=""mean_squared_error"" + self.first_layer=[8,512] + self.hidden_layers=1 + self.look_back=int(2) + self.dropout=[0.0,0.1,0.01] + self.batch_size=2 + self.epochs=50 + self.model_name=""lstmmodel.h5"" + continue + + # Reshape the data to the required input shape of the LSTM model + def create_dataset(self,series, n_past, n_future, targetcolindx): + X, y = list(), list() + for window_start in range(len(series)): + past_end = window_start + n_past + future_end = past_end + n_future + if future_end > len(series): + break + # slicing the past and future parts of the window + past, future = series[window_start:past_end, :], series[past_end:future_end, targetcolindx] + X.append(past) + y.append(future) + return np.array(X), np.array(y) + #return X, y + + + ## Added function for hyperparam tuning (TFSTask:7033) + def build_model(self,hp): + n_features = self.num_features + try: + loss=self.loss_fn + optimizer=self.optimizer + # self.getdlparams() + try: + if optimizer.lower() == ""adam"": + optimizer=tensorflow.keras.optimizers.Adam + elif(optimizer.lower() == ""adadelta""): + optimizer=tensorflow.keras.optimizers.experimental.Adadelta + elif(optimizer.lower() == ""nadam""): + optimizer=tensorflow.keras.optimizers.experimental.Nadam + elif(optimizer.lower() == ""adagrad""): + optimizer=tensorflow.keras.optimizers.experimental.Adagrad + elif(optimizer.lower() == ""adamax""): + optimizer=tensorflow.keras.optimizers.experimental.Adamax + elif(optimizer.lower() == ""rmsprop""): + optimizer=tensorflow.keras.optimizers.experimental.RMSprop + elif(optimizer.lower() == ""sgd""): + optimizer=tensorflow.keras.optimizers.experimental.SGD + else: + optimizer=tensorflow.keras.optimizers.Adam + except: + optimizer=tf.keras.optimizers.Adam + pass + # look_back_min=int(self.look_back[0]) + # look_back_max=int(self.look_back[1]) + first_layer_min=round(int(self.first_layer[0])) + first_layer_max=" +"round(int(self.first_layer[1])) + dropout_min=float(self.dropout[0]) + dropout_max=float(self.dropout[1]) + dropout_step=float(self.dropout[2]) + + #import pdb; pdb.set_trace() + n_past= self.look_back + n_future = self.look_back + encoder_l = {} + encoder_outputs = {} + encoder_states = {} + decoder_l = {} + decoder_outputs = {} + encoder_inputs = Input(shape=(n_past, n_features)) + + try: + if(self.hidden_layers > 0): + encoder_l[0] = LSTM(units=hp.Int('enc_input_unit',min_value=first_layer_min,max_value=first_layer_max,step=32), activation = hp.Choice(f'enc_input_activation', values = self.activation_fn), return_sequences = True, return_state=True) + else: + encoder_l[0] = LSTM(units=hp.Int('enc_input_unit',min_value=first_layer_min,max_value=first_layer_max,step=32), activation = hp.Choice(f'enc_input_activation', values = self.activation_fn), return_state=True) + except Exception as e: + import traceback + self.log.info(""lstm build traceback: \\n""+str(traceback.print_exc())) + model=tf.keras.Sequential() + return model + + encoder_outputs[0] = encoder_l[0](encoder_inputs) + encoder_states[0] = encoder_outputs[0][1:] + + if(self.hidden_layers > 0): + for indx in range(self.hidden_layers): + lindx = indx + 1 + if lindx == self.hidden_layers: + encoder_l[lindx] = LSTM(units=hp.Int(f'enc_lstm_units_{lindx}',min_value=first_layer_min,max_value=first_layer_max,step=32), dropout=hp.Float(f'enc_lstm_dropout_{lindx}',min_value=dropout_min,max_value=dropout_max,step=dropout_step), activation = hp.Choice(f'enc_lstm_activation_{lindx}', values = self.activation_fn), return_state=True) + else: + encoder_l[lindx] = LSTM(units=hp.Int(f'enc_lstm_units_{lindx}',min_value=first_layer_min,max_value=first_layer_max,step=32), dropout=hp.Float(f'enc_lstm_dropout_{lindx}',min_value=dropout_min,max_value=dropout_max,step=dropout_step), activation = hp.Choice(f'enc_lstm_activation_{lindx}', values = self.activation_fn), return_sequences = True, return_state=True) + encoder_outputs[lindx] = encoder_l[lindx](encoder_outputs[indx][0]) + encoder_states[lindx] = encoder_outputs[lindx][1:] + decoder_inputs = RepeatVector(n_future)(encoder_outputs[self.hidden_layers][0]) + else: + decoder_inputs = RepeatVector(n_future)(encoder_outputs[0][0]) + # + + if(self.hidden_layers > 0): + decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = hp.Choice(f'dec_input_activation', values = self.activation_fn), return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) + else: + decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = hp.Choice(f'dec_input_activation', values = self.activation_fn), return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) + if(self.hidden_layers > 0): + for indx in range(self.hidden_layers): + lindx = indx + 1 + decoder_l[lindx] = LSTM(encoder_states[lindx][0].get_shape()[1], activation = hp.Choice(f'dec_lstm_activation_{lindx}', values = self.activation_fn), return_sequences=True)(decoder_l[indx],initial_state = encoder_states[lindx]) + decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[self.hidden_layers][0].get_shape()[1], activation = hp.Choice(f'dec_output_activation_1', values = self.activation_fn)))(decoder_l[self.hidden_layers]) + decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) + + + else: + # decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1]))(decoder_l[0]) + # decoder_outputs[1] = LSTM(200, return_sequences=True)(decoder_outputs[0]) + # decoder_outputs[2] = tf.keras.layers.Flatten()(decoder_outputs[1]) + # decoder_outputs[3] = tf.keras.layers.Dense(1)(decoder_outputs[2]) + + decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1], activation = hp.Choice(f'dec_output_activation_1', values = self.activation_fn)))(decoder_l[0]) + decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) + + + # + model = tf.keras.models.Model(encoder_inputs,decoder_outputs[1]) + self.log.info(model.summary()) + model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn]) + + + except Exception as e: + import traceback + self.log.info("",Hyperparam tuning build_model err msg: \\n""+ str(e)) + self.log.info(""Hyperparam tuning build_model err traceback: \\n""+str(traceback.print_exc())) + return model + ##LSTM ecncoder decoder with multivariate input and univarite output prediction function (lstm model, train, prediction, metrics) + def lstm_encdec_mvin_uvout(self,df): + try: + loss=self.loss_fn + self.getdlparams() + n_features = len(df.columns) + self.num_features=n_features + n_past= self.look_back + n_future = self.look_back + + + try: + if (type(self.targetFeature) is list): + pass + else: + self.targetFeature = list(self.targetFeature.split("","")) + except: + pass + + targetColIndx = [] + for target in self.targetFeature: + targetColIndx.append(df.columns.get_loc(target)) + + #if user doesnt applies any transformation, this will get applied + scaler=MinMaxScaler() + df_trnsf=scaler.fit_transform(df) + train_data, test_data = train_test_split(df_trnsf, test_size=0.2, shuffle=False) + tuner_alg=self.tuner_algorithm + #The below create_dataset only for getting best model and best hyperparameters + X_train, y_train = self.create_dataset(train_data, n_past, n_future, targetColIndx) + X_test, y_test = self.create_dataset(test_data, n_past, n_future, targetColIndx) + # X_train = X_train.reshape((X_train.shape[0], X_train.shape[1],n_features)) + # y_train = y_train.reshape((y_train.shape[0], y_train.shape[1], 1)) + self.log.info(""Hyperparameter tuning algorithm is given by user (AION->Advanced configuration -> timeSeriesForecasting->LSTM): \\n""+str(tuner_alg)) + try: + ## Remove untitled_project dir in AION root folder created by previous tuner search run + import shutil + shutil.rmtree(r"".\\untitled_project"") + except: + pass + try: + if (tuner_alg.lower()==""randomsearch""): + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=1,executions_per_trial=3) + elif (tuner_alg.lower()==""bayesianoptimization""): + tuner=BayesianOptimization(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""hyperband""): + tuner=Hyperband(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_epochs=50,factor=3) + else: + self.log.info(""The given alg is not implemented. Using default hyperparam tuning algorithm: RandomSearch.\\n"") + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + from keras.callbacks import EarlyStopping + stop_early = EarlyStopping(monitor='val_loss', patience=5) + except Exception as e: + import traceback + self.log.info(""The given alg have some issue, Using default hyperparam tuning algorithm: RandomSearch.\\n""+str(e)) + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=1,executions_per_trial=3) + self.log.info(""Started Exception default Random Search"") + #hpt search for best params + + try: + self.log.info(""First try: Tuner search started"") + tuner.search(X_train, y_train,validation_data=(X_test, y_test), callbacks=[stop_early]) + self.log.info(""First try: Tuner search ends"") + except Exception as e: + self.log.info(""Second try: Tuner search starts.\\n""+str(e)) + tuner.search(x=X_train,y=y_train,validation_split=0.2, callbacks=[stop_early]) + self.log.info(""Second try: Tuner search ends"") + + # best_model = tuner.get_best_models(num_models=1)[0] + #self.log.info(""best_model.summary(): \\n""+str(best_model.summary())) + best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] + self.log.info(""TS Multivariate LSTM best hyperparameter values:\\n""+str(best_hps.values)) + self.log.info(""Activation fn:\\n""+str(self.activation_fn)) + n_input=self.look_back + best_hmodel=tuner.hypermodel.build(best_hps) + optimizer=self.optimizer + learning_rate=float(best_hps.get('learning_rate')) + try: + ##TFSTask:7033, Added below try block for time series hyperparam tuning, here, for any optimizer, best learning_rate is provided from best_hps. + try: + if optimizer.lower() == ""adam"": + optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) + elif(optimizer.lower() == ""adadelta""): + optimizer=tensorflow.keras.optimizers.experimental.Adadelta(learning_rate=learning_rate) + elif(optimizer.lower() == ""nadam""): + optimizer=tensorflow.keras.optimizers.experimental.Nadam(learning_rate=learning_rate) + elif(optimizer.lower() == ""adagrad""): + optimizer=tensorflow.keras.optimizers.experimental.Adagrad(learning_rate=learning_rate) + elif(optimizer.lower() == ""adamax""): + optimizer=tensorflow.keras.optimizers.experimental.Adamax(learning_rate=learning_rate) + elif(optimizer.lower() == ""rmsprop""): + optimizer=tensorflow.keras.optimizers.experimental.RMSprop(learning_rate=learning_rate) + elif(optimizer.lower() == ""sgd""): + optimizer=tensorflow.keras.optimizers.experimental.SGD(learning_rate=learning_rate) + else: + optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) + except: + optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate) + pass + ##From best hyperparameter values, now creating multivariate time series model using time generator. + + generatorTrain=TimeseriesGenerator(X_train, y_train, length=n_past, batch_size=self.batch_size) + # generatorTest=TimeseriesGenerator(test,test,length=n_input,batch_size=self.batch_size) + batch_0=generatorTrain[0] + x,y=batch_0 + epochs=int(self.epochs) + ##Multivariate LSTM model + try: + + encoder_l = {} + encoder_outputs = {} + encoder_states = {} + decoder_l = {} + decoder_outputs = {} + enc_lstm_dropout = {} + + enc_input_unit = best_hps.get('enc_input_unit') + enc_input_activation = best_hps.get('enc_input_activation') + dec_input_activation = best_hps.get('dec_input_activation') + dec_output_activation_1 = best_hps.get('dec_output_activation_1') + + enc_lstm_units = {} + enc_lstm_activation = {} + dec_lstm_activation = {} + for indx in range(self.hidden_layers): + lindx = indx + 1 + enc_lstm_units[lindx] = best_hps.get('enc_lstm_units_'+str(lindx)) + enc_lstm_activation[lindx] = best_hps.get('enc_lstm_activation_'+str(lindx)) + dec_lstm_activation[lindx] = best_hps.get('dec_lstm_activation_'+str(lindx)) + enc_lstm" +"_dropout[lindx] = best_hps.get('enc_lstm_dropout_'+str(lindx)) + + encoder_inputs = Input(shape=(n_past, n_features)) + if(self.hidden_layers > 0): + encoder_l[0] = LSTM(enc_input_unit, activation = enc_input_activation, return_sequences = True, return_state=True) + else: + encoder_l[0] = LSTM(enc_input_unit, activation = enc_input_activation, return_state=True) + + encoder_outputs[0] = encoder_l[0](encoder_inputs) + encoder_states[0] = encoder_outputs[0][1:] + + if(self.hidden_layers > 0): + for indx in range(self.hidden_layers): + lindx = indx + 1 + if lindx == self.hidden_layers: + encoder_l[lindx] = LSTM(enc_lstm_units[lindx], dropout = enc_lstm_dropout[lindx], activation = enc_lstm_activation[lindx], return_state=True) + else: + encoder_l[lindx] = LSTM(enc_lstm_units[lindx], dropout = enc_lstm_dropout[lindx], activation = enc_lstm_activation[lindx], return_sequences = True, return_state=True) + encoder_outputs[lindx] = encoder_l[lindx](encoder_outputs[indx][0]) + encoder_states[lindx] = encoder_outputs[lindx][1:] + decoder_inputs = RepeatVector(n_future)(encoder_outputs[self.hidden_layers][0]) + else: + decoder_inputs = RepeatVector(n_future)(encoder_outputs[0][0]) + # + + if(self.hidden_layers > 0): + decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = dec_input_activation, return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) + else: + decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = dec_input_activation, return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) + if(self.hidden_layers > 0): + for indx in range(self.hidden_layers): + lindx = indx + 1 + decoder_l[lindx] = LSTM(encoder_states[lindx][0].get_shape()[1], activation = dec_lstm_activation[lindx], return_sequences=True)(decoder_l[indx],initial_state = encoder_states[lindx]) + + decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[self.hidden_layers][0].get_shape()[1], activation = dec_output_activation_1))(decoder_l[self.hidden_layers]) + decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) + else: + decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1], activation = dec_output_activation_1))(decoder_l[0]) + decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) + + # + model = tf.keras.models.Model(encoder_inputs,decoder_outputs[1]) + self.log.info(model.summary()) + self.log.info(""loss=""+self.loss_fn) + model.compile(optimizer=optimizer,loss=self.loss_fn,metrics=[self.loss_fn]) + + #model.fit_generator(generatorTrain, epochs=epochs,shuffle=False, verbose=0) + model.fit(X_train, y_train, batch_size=self.batch_size, epochs=epochs,shuffle=False, verbose=2) + except Exception as e: + import traceback + self.log.info(""multivariate model build error: error msg:: \\n""+str(e)) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + #predictions = model.predict_generator(generatorTest) + except Exception as e: + import traceback + self.log.info(""optimizer and timesereis generator build error: error msg:: \\n""+str(e)) + self.log.info(""optimizer and timesereis generator build error traceback: \\n""+str(traceback.print_exc())) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + try: + predictions=[] + X_test, y_test = self.create_dataset(test_data, n_past, n_future, targetColIndx) + predictions = model.predict(X_test) + self.log.info(predictions) + #convert the x test(includes target) to 2d as inverse transform accepts only 2d values + xtestlen = len(X_test) + xtest_2d = X_test.ravel().reshape(xtestlen * n_past, n_features) + #inverse tranform of actual value + xtest_2d = scaler.inverse_transform(xtest_2d) + actual = xtest_2d[:, targetColIndx] + #inverse tranform of predicted value + prediction_1d = predictions.ravel() + prediction_1d = prediction_1d.reshape(len(prediction_1d),1) + self.log.info(prediction_1d) + xtest_2d[:, targetColIndx] = prediction_1d + xtest_2d = scaler.inverse_transform(xtest_2d) + predictions = xtest_2d[:, targetColIndx] + + mse=None + rmse=None + ## Creating dataframe for actual,predictions + try: + pred_cols=list() + actual_cols=list() + for i in range(len(self.targetFeature)): + pred_cols.append(self.targetFeature[i]+'_pred') + actual_cols.append(self.targetFeature[i]+'_actual') + + predictions = pd.DataFrame(predictions.ravel(), columns=pred_cols) + actual = pd.DataFrame(actual.ravel(), columns=actual_cols) + df_predicted=pd.concat([actual,predictions],axis=1) + self.log.info(""LSTM Multivariate prediction dataframe: \\n""+str(df_predicted)) + from math import sqrt + from sklearn.metrics import mean_squared_error + from sklearn.metrics import r2_score + from sklearn.metrics import mean_absolute_error + target=self.targetFeature + mse_dict={} + rmse_dict={} + mae_dict={} + mape_dict={} + r2_dict={} + lstm_var = 0 + self.log.info(actual.shape) + self.log.info(actual) + self.log.info(predictions.shape) + self.log.info(predictions) + mse = mean_squared_error(actual,predictions) + mse_dict[self.targetFeature[0]]=mse + rmse=sqrt(mse) + rmse_dict[self.targetFeature[0]]=rmse + lstm_var = lstm_var+rmse + self.log.info(""Name of the target feature: ""+str(self.targetFeature)) + self.log.info(""RMSE of the target feature: ""+str(rmse)) + r2 = r2_score(actual,predictions) + r2_dict[self.targetFeature[0]]=r2 + mae = mean_absolute_error(actual,predictions) + mae_dict[self.targetFeature[0]]=mae + mape = mean_absolute_percentage_error(actual,predictions) + mape_dict[self.targetFeature[0]]=mape + ## For VAR comparison, send last target mse and rmse from above dict + lstm_var = lstm_var/len(target) + select_msekey=list(mse_dict.keys())[-1] + l_mse=list(mse_dict.values())[-1] + select_rmsekey=list(rmse_dict.keys())[-1] + l_rmse=list(rmse_dict.values())[-1] + select_r2key=list(r2_dict.keys())[-1] + l_r2=list(r2_dict.values())[-1] + select_maekey=list(mae_dict.keys())[-1] + l_mae=list(mae_dict.values())[-1] + l_mape=list(mape_dict.values())[-1] + self.log.info(""Selected target feature of LSTM for best model selection: ""+str(select_rmsekey)) + self.log.info(""lstm rmse: ""+str(l_rmse)) + self.log.info(""lstm mse: ""+str(l_mse)) + self.log.info(""lstm r2: ""+str(l_r2)) + self.log.info(""lstm mae: ""+str(l_mae)) + self.log.info(""lstm mape: ""+str(l_mape)) + except Exception as e: + import traceback + self.log.info(""prediction error traceback: \\n""+str(traceback.print_exc())) + except Exception as e: + import traceback + self.log.info(""dataframe creation error. err.msg: ""+str(e)) + self.log.info(""dataframe creation error traceback: \\n""+str(traceback.print_exc())) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + return 'Success',round(l_mse,2),round(l_rmse,2),round(l_r2,2),round(l_mae,2),model,df_predicted,n_input,scaler + # import os + #predicted_file_name='lstm_prediction_df.csv' + #predicted_file_path=os.path.join(self.dataFolderLocation,predicted_file_name) + #df_predicted.to_csv(predicted_file_path) + ##save model + #model_path = os.path.join(self.dataFolderLocation,self.model_name) + #self.log.info(""mlp model saved at: ""+str(model_path)) + #model.save(model_path) + except Exception as e: + import traceback + ## Just use below traceback print to get detailed error information. + # import traceback + # print("" traceback error 7:\\n"",traceback.print_exc()) + ## Enable traceback for debugging + self.log.info(""dataframe creation error. err.msg: ""+str(e)) + self.log.info(""Final exception traceback: \\n""+str(traceback.print_exc())) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import os +import numpy as np +import numpy +import pandas +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import LSTM +from sklearn.preprocessing import MinMaxScaler +import logging +import tensorflow as tf +from tensorflow.keras.layers import Dropout +import math +import tensorflow as tf +import keras_tuner +#from keras_tuner.engine.hyperparameters import HyperParameters +from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband +from sklearn.model_selection import train_test_split +from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator +import warnings +warnings.simplefilter(""ignore"", UserWarning) + + +class timeseriesDLMultivariate: + def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature): + self.look_back=None + + # self.df=df + + self.epochs=None + self.batch_size=None + self.hidden_layers=None + self.optimizer=None + self.activation_fn=""relu"" + self.loss_fn=None + self.first_layer=None + self.dropout=None + self.model_name=None + self.dl_params = configfile + # self.data=data + self.targetFeature=targetFeature + self.dateTimeFeature=dateTimeFeature + self.testpercentage = float(testpercentage) + self.log = logging.getLogger('eion') + ##Added for ts hpt (TFSTask:7033) + self.tuner_algorithm="""" + self.num_features=0 + + ##Get deep learning model hyperparameter from advanced config + def getdlparams(self): + val=self.dl_params + self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>') + self.log.info("" ""+str(val)) + for k,v in val.items(): + try: + if (k == ""tuner_algorithm""): + self.tuner_algorithm=str(v) + elif (k == ""activation""): + self.activation_fn=str(v) + elif (k == ""optimizer""): + self.optimizer=str(v) + elif (k == ""loss""): + self.loss_fn=str(v) + elif (k == ""first_layer""): + if not isinstance(k,list): + self.first_layer=str(v).split(',') + else: + self.first_layer=k + elif (k == ""lag_order""): + if isinstance(k,list): + k = ''.join(v) + k=int(float(str(v))) + else: + self.look_back=int(float(str(v))) + elif (k == ""hidden_layers""): + self.hidden_layers=int(v) + elif (k ==" +"""dropout""): + if not isinstance(k,list): + self.dropout=str(v).split(',') + else: + self.dropout=k + elif (k == ""batch_size""): + self.batch_size=int(v) + elif (k == ""epochs""): + self.epochs=int(v) + elif (k == ""model_name""): + self.model_name=str(v) + + except Exception as e: + self.log.info('Exception occured in deeep learn param reading, setting up default params.') + self.activation_fn=""relu"" + self.optimizer=""adam"" + self.loss_fn=""mean_squared_error"" + self.first_layer=[8,512] + self.hidden_layers=1 + self.look_back=int(2) + self.dropout=[0.1,0.5] + self.batch_size=2 + self.epochs=50 + self.model_name=""lstmmodel.h5"" + continue + + # Reshape the data to the required input shape of the LSTM model + def create_dataset(self,X, y, n_steps): + Xs, ys = [], [] + for i in range(len(X) - n_steps): + v = X.iloc[i:(i + n_steps)].values + Xs.append(v) + ys.append(y.iloc[i + n_steps]) + return np.array(Xs), np.array(ys) + + + ## Added function for hyperparam tuning (TFSTask:7033) + def build_model(self,hp): + n_features = len(self.targetFeature) + try: + loss=self.loss_fn + optimizer=self.optimizer + # self.getdlparams() + try: + if optimizer.lower() == ""adam"": + optimizer=tensorflow.keras.optimizers.Adam + elif(optimizer.lower() == ""adadelta""): + optimizer=tensorflow.keras.optimizers.experimental.Adadelta + elif(optimizer.lower() == ""nadam""): + optimizer=tensorflow.keras.optimizers.experimental.Nadam + elif(optimizer.lower() == ""adagrad""): + optimizer=tensorflow.keras.optimizers.experimental.Adagrad + elif(optimizer.lower() == ""adamax""): + optimizer=tensorflow.keras.optimizers.experimental.Adamax + elif(optimizer.lower() == ""rmsprop""): + optimizer=tensorflow.keras.optimizers.experimental.RMSprop + elif(optimizer.lower() == ""sgd""): + optimizer=tensorflow.keras.optimizers.experimental.SGD + else: + optimizer=tensorflow.keras.optimizers.Adam + except: + optimizer=tf.keras.optimizers.Adam + pass + # look_back_min=int(self.look_back[0]) + # look_back_max=int(self.look_back[1]) + first_layer_min=round(int(self.first_layer[0])) + first_layer_max=round(int(self.first_layer[1])) + dropout_min=float(self.dropout[0]) + dropout_max=float(self.dropout[1]) + model=tf.keras.Sequential() + try: + model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.num_features))) + except Exception as e: + import traceback + self.log.info(""lstm build traceback: \\n""+str(traceback.print_exc())) + return model + model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) + model.add(Dense(units=n_features)) + model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn]) + except Exception as e: + self.log.info("",Hyperparam tuning build_model err msg: \\n""+ str(e)) + return model + ##Multivariate lstm prediction function (lstm model, train, prediction, metrics) + def lstm_multivariate(self,df): + try: + self.getdlparams() + n_features = len(self.targetFeature) + self.num_features=n_features + try: + if (type(self.targetFeature) is list): + pass + else: + self.targetFeature = list(self.targetFeature.split("","")) + except: + pass + + df_new = df[df.columns[df.columns.isin(self.targetFeature)]] + scaler=MinMaxScaler() + df_transformed=scaler.fit_transform(df_new) + ## For hyperparam tuning below part is added.only for getting best model and best hyperparameters + train_size = int(len(df) * 0.80) + train_data, test_data = train_test_split(df, test_size=0.2, shuffle=False) + self.hpt_train=train_data + time_steps=self.look_back ## Just for initialization before hyperparameter tuning. + tuner_alg=self.tuner_algorithm + #The below create_dataset only for getting best model and best hyperparameters + X_train, y_train = self.create_dataset(train_data, train_data, time_steps) + X_test, y_test = self.create_dataset(test_data, test_data, time_steps) + self.log.info(""Hyperparameter tuning algorithm is given by user (AION->Advanced configuration -> timeSeriesForecasting->LSTM): \\n""+str(tuner_alg)) + try: + ## Remove untitled_project dir in AION root folder created by previous tuner search run + import shutil + shutil.rmtree(r"".\\untitled_project"") + except: + pass + try: + if (tuner_alg.lower()==""randomsearch""): + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""bayesianoptimization""): + tuner=BayesianOptimization(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + elif (tuner_alg.lower()==""hyperband""): + tuner=Hyperband(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_epochs=50,factor=3) + else: + self.log.info(""The given alg is not implemented. Using default hyperparam tuning algorithm: RandomSearch.\\n"") + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + from keras.callbacks import EarlyStopping + stop_early = EarlyStopping(monitor='val_loss', patience=5) + except Exception as e: + self.log.info(""The given alg have some issue, Using default hyperparam tuning algorithm: RandomSearch.\\n"") + tuner=RandomSearch(self.build_model,keras_tuner.Objective(""val_loss"", direction=""min""),max_trials=5,executions_per_trial=3) + self.log.info(""tuner errmsg:\\n""+str(e)) + #hpt search for best params + try: + tuner.search(X_train, y_train,validation_data=(X_test, y_test),callbacks=[stop_early]) + except: + tuner.search(x=X_train,y=y_train,validation_split=0.2,callbacks=[stop_early]) + # best_model = tuner.get_best_models(num_models=1)[0] + # self.log.info(""best_model.summary(): \\n""+str(best_model.summary())) + best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] + self.log.info(""TS Multivariate LSTM best hyperparameter values:\\n""+str(best_hps.values)) + self.log.info(""Activation fn:\\n""+str(self.activation_fn)) + # time_steps_best=best_hps.get('time_steps') + n_input=self.look_back + best_hmodel=tuner.hypermodel.build(best_hps) + optimizer=self.optimizer + self.first_layer=best_hps.get('units') + self.dropout=best_hps.get('Dropout_rate') + learning_rate=float(best_hps.get('learning_rate')) + try: + ##TFSTask:7033, Added below try block for time series hyperparam tuning, here, for any optimizer, best learning_rate is provided from best_hps. + try: + if optimizer.lower() == ""adam"": + optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) + elif(optimizer.lower() == ""adadelta""): + optimizer=tensorflow.keras.optimizers.experimental.Adadelta(learning_rate=learning_rate) + elif(optimizer.lower() == ""nadam""): + optimizer=tensorflow.keras.optimizers.experimental.Nadam(learning_rate=learning_rate) + elif(optimizer.lower() == ""adagrad""): + optimizer=tensorflow.keras.optimizers.experimental.Adagrad(learning_rate=learning_rate) + elif(optimizer.lower() == ""adamax""): + optimizer=tensorflow.keras.optimizers.experimental.Adamax(learning_rate=learning_rate) + elif(optimizer.lower() == ""rmsprop""): + optimizer=tensorflow.keras.optimizers.experimental.RMSprop(learning_rate=learning_rate) + elif(optimizer.lower() == ""sgd""): + optimizer=tensorflow.keras.optimizers.experimental.SGD(learning_rate=learning_rate) + else: + optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) + except: + optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate) + pass + ##From best hyperparameter values, now creating multivariate time series model using time generator. + t_lb=1 + test_size=t_lb+1 + train,test = train_test_split(df_transformed,test_size=0.2,shuffle=False) + generatorTrain=TimeseriesGenerator(df_transformed,df_transformed,length=n_input,batch_size=self.batch_size) + # generatorTest=TimeseriesGenerator(test,test,length=n_input,batch_size=self.batch_size) + batch_0=generatorTrain[0] + x,y=batch_0 + epochs=int(self.epochs) + ##Multivariate LSTM model + try: + from tensorflow.keras.layers import Dropout + model=Sequential() + model.add(LSTM(self.first_layer,activation=self.activation_fn,input_shape=(n_input,n_features))) + model.add(Dropout(self.dropout)) + model.add(Dense(n_features)) + model.compile(optimizer=self.optimizer,loss=self.loss_fn) + #model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False) + model.fit_generator(generatorTrain, epochs=epochs,shuffle=False, verbose=0) + # lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0) + except Exception as e: + self.log.info(""multivariate model build error: error msg:: \\n""+str(e)) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + #predictions = model.predict_generator(generatorTest) + except Exception as e: + self.log.info(""multivariate model build error: error msg:: \\n""+str(e)) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + try: + predictions=[] + future_pred_len=n_input + #To get values for prediction,taking look_back steps of rows + first_batch=test[-future_pred_len:] + c_batch = first_batch.reshape((1,future_pred_len,n_features)) + current_pred=None + for i in range(len(test)): + #get pred for firstbatch + current_pred=model.predict_generator(c_batch)[0] + predictions.append(current_pred) + #remove first val + c_batch_rmv_first=c_batch[:,1:,:] + #update + c_batch=np.append(c_batch_rmv_first,[[current_pred]],axis=1) + prediction_actual=scaler.inverse_transform(predictions) + test_data_actual=scaler.inverse_transform(test) + mse=None + rmse=None + ## Creating dataframe for actual,predictions + try: + pred_cols=list() + for i in range(len(self.targetFeature)): + pred_cols.append(self.targetFeature[i]+'_pred') + + predictions = pd.DataFrame(prediction_actual, columns=pred_cols) + actual = pd.DataFrame(test_data_actual, columns=self.targetFeature) + actual.columns = [str(col) + '_actual' for col in df.columns] + df_predicted=pd.concat([actual,predictions],axis=1) + self.log.info(""LSTM Multivariate prediction dataframe: \\n""+str(df_predicted)) + from math import sqrt + from sklearn.metrics import mean_squared_error + from sklearn.metrics import r2_score + from sklearn.metrics import mean_absolute_error + target=self.targetFeature + mse_dict={} + rmse_dict={} + mae_dict={} + r2_dict={} + lstm_var = 0 + for name in target: + index = df.columns.get_loc(name) + mse = mean_squared_error(test_data_actual[:,index],prediction_actual[:,index]) + mse_dict[name]=mse + rmse=sqrt(mse) + rmse_dict[name]=rmse + lstm_var = lstm_var+rmse + self.log.info(""Name of the target feature: ""+str(name)) + self.log.info(""RMSE of the target feature: ""+str(rmse)) + r2 = r2_score(test_data_actual[:,index],prediction_actual[:,index]) + r2_dict[name]=r2 + mae = mean_absolute_error(test_data_actual[:,index],prediction_actual[:,index]) + " +"mae_dict[name]=mae + ## For VAR comparison, send last target mse and rmse from above dict + lstm_var = lstm_var/len(target) + select_msekey=list(mse_dict.keys())[-1] + l_mse=list(mse_dict.values())[-1] + select_rmsekey=list(rmse_dict.keys())[-1] + l_rmse=list(rmse_dict.values())[-1] + select_r2key=list(r2_dict.keys())[-1] + l_r2=list(r2_dict.values())[-1] + select_maekey=list(mae_dict.keys())[-1] + l_mae=list(mae_dict.values())[-1] + self.log.info(""Selected target feature of LSTM for best model selection: ""+str(select_rmsekey)) + self.log.info(""lstm rmse: ""+str(l_rmse)) + self.log.info(""lstm mse: ""+str(l_mse)) + self.log.info(""lstm r2: ""+str(l_r2)) + self.log.info(""lstm mae: ""+str(l_mae)) + except Exception as e: + import traceback + print("" traceback error:\\n"",traceback.print_exc()) + self.log.info(""prediction error traceback: \\n""+str(traceback.print_exc())) + except Exception as e: + self.log.info(""dataframe creation error. err.msg: ""+str(e)) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + return 'Success',round(l_mse,2),round(l_rmse,2),round(l_r2,2),round(l_mae,2),model,df_predicted,n_input,scaler + # import os + #predicted_file_name='lstm_prediction_df.csv' + #predicted_file_path=os.path.join(self.dataFolderLocation,predicted_file_name) + #df_predicted.to_csv(predicted_file_path) + ##save model + #model_path = os.path.join(self.dataFolderLocation,self.model_name) + #self.log.info(""mlp model saved at: ""+str(model_path)) + #model.save(model_path) + except Exception as e: + ## Just use below traceback print to get detailed error information. + # import traceback + # print("" traceback error 7:\\n"",traceback.print_exc()) + ## Enable traceback for debugging + self.log.info(""dataframe creation error. err.msg: ""+str(e)) + return 'Error',0,0,0,0,None,pd.DataFrame(),0,None + + + + import pandas as pd +import numpy as np +from statsmodels.tsa.stattools import adfuller +from statsmodels.tsa.stattools import kpss +from statsmodels.tsa.seasonal import seasonal_decompose +import logging +import os +import warnings +warnings.filterwarnings('ignore') + +## Main class to find out seassonality and stationary in timeseries data. +class tsStationarySeasonalityTest: + def __init__(self,df,deployLocation): + self.df=df + self.deployLocation=deployLocation + self.log = logging.getLogger('eion') + + + ## to get the timeseries data stationary information + def stationary_model(self,df,target_features,stationary_check_method): + self.log.info(""<------ Time Series stationary test started.....------------->\\n"") + self.log.info(""<------ Feature used:------------->\\t""+str(target_features)) + stationary_status=None + if (stationary_check_method.lower()=='adfuller'): + stats_model=adfuller(df[target_features]) + # p_val=adf_result[1] + statistic, p_value, n_lags, num_bservations,critical_values,info_criterion_best=stats_model[0],stats_model[1],stats_model[2],stats_model[3],stats_model[4],stats_model[5] + ##Uncomment below logs when required. + self.log.info(""Adfuller test (time series stationary test) p_value: \\t""+str(p_value)) + # self.log.info(""Adfuller test (time series stationary test) statistics: \\t""+str(statistic)) + # self.log.info(""Adfuller test (time series stationary test) number of lags (time steps): \\t""+str(n_lags)) + # self.log.info(""Adfuller test (time series stationary test) Critical values: \\n"") + ##To display critical values + # for key, value in stats_model[4].items(): + # self.log.info("" \\t""+str(key)+""\\t""+str(value)) + + if (p_value>0.05): + stationary_status=""feature is non-stationary"" + self.log.info('Status:- |... '+str(target_features)+' is non stationary') + elif(p_value<0.05): + stationary_status=""feature is stationary"" + self.log.info('Status:- |... '+str(target_features)+' is stationary') + + ##kpss is opposite to ADF in considering null hypothesis. In KPSS, if null hypothesis,then it is stationary as oppose to ADF. + elif (stationary_check_method.lower()=='kpss'): + from statsmodels.tsa.stattools import kpss + stats_model = kpss(df[target_features]) + statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] + self.log.info(""kpss test (time series stationary test) p_value: \\t""+str(p_value)) + self.log.info(""kpss test (time series stationary test) statistics: \\t""+str(statistic)) + self.log.info(""kpss test (time series stationary test) number of lags (time steps): \\t""+str(n_lags)) + self.log.info(""kpss test (time series stationary test) Critical values: \\n"") + + for key, value in stats_model[3].items(): + self.log.info("" \\t""+str(key)+""\\t""+str(value)) + ##In kpss, the stationary condition is opposite to Adafuller. + if (p_value>0.05): + self.log.info('Status:- |... '+str(target_features)+' is stationary') + else: + self.log.info('Status:- |... '+str(target_features)+' is non stationary') + + return stats_model,n_lags,p_value,stationary_status + + ## Get stationary details + def stationary_check(self,target_features,time_col,method): + df=self.df + try: + df[time_col]=pd.to_datetime(df[time_col]) + except Exception as e: + self.log.info(""issue in datetime conversion...\\n""+str(e)) + df=df.set_index(time_col) + try: + stationary_check_method=method + except: + stationary_check_method='adfuller' + if (len(target_features) == 1): + try: + if isinstance(target_features,list): + target_features=''.join(target_features) + elif isinstance(target_features,int): + target_features=str(target_features) + elif isinstance(target_features,str): + pass + except Exception as e: + self.log.info(""stationary check target feature error: \\t""+str(e)) + stationary_result={} + stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,target_features,stationary_check_method) + stationary_result[target_features]=stationary_status + elif(len(target_features) > 1): + stationary_result={} + for col in df.columns: + # self.log.info(""Multivariate feature for Stationary check:\\t""+str(col)) + stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,col,stationary_check_method) + stationary_result[col]=stationary_status + else: + self.log.info(""TS Stationarity Test: Error in target feature, pls check.\\n."") + + # self.log.info(""Feature based stationarity_result:\\n""+str(stationary_result)) + # ## Stationary component for whole dataset + stationary_combined_res=dict() + # stats_model,n_lags,p_value,stationary_status=self.stationary_all_features(time_col,'adfuller') + c_dict=[k for k,v in stationary_result.items() if 'non-stationary' in v] + if (len(c_dict)>=1): + stationary_combined_res['dataframe_stationarity']='Non-Stationary' + self.log.info('Status:- |... Data is non stationarity') + else: + stationary_combined_res['dataframe_stationarity']='Stationary' + # self.log.info(""Stationarity information for whole dataset:\\n""+str(stationary_combined_res)) + self.log.info(""Time series Stationarity test completed.\\n"") + + return stats_model,n_lags,p_value,stationary_result,stationary_combined_res + + #Get seasonality by using seasonal_decompose lib. + def seasonality_model(self,target_features,df): + self.log.info(""<------ Time Series Seasonality test started.....------------->\\n"") + self.log.info(""<------ Feature used:------------->\\n""+str(target_features)) + seasonality_status=None + try: + try: + stats_model = kpss(df[target_features]) + statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] + except: + n_lags=1 + pass + try: + df_target=self.df[target_features] + decompose_result_mult = seasonal_decompose(df_target,model='additive', extrapolate_trend='freq', period=n_lags) + except Exception as e: + self.log.info(""Logging seasonality_model decompose_result_mult: \\t""+str(e)) + ##If additive model (type of seasonal component) failed, use multiplicative + decompose_result_mult = seasonal_decompose(df_target,model='multiplicative', extrapolate_trend='freq', period=1) + trend = decompose_result_mult.trend + observed=decompose_result_mult.observed + seasonal = decompose_result_mult.seasonal + residual = decompose_result_mult.resid + try: + if isinstance(df_target, pd.Series): + auto_correlation = df_target.autocorr(lag=n_lags) + # self.log.info(""seasonality test: auto_correlation value:\\n""+str(auto_correlation)) + elif isinstance(df_target, pd.DataFrame): + df_target = df_target.squeeze() + auto_correlation = df_target.autocorr(lag=n_lags) + # self.log.info(""seasonality test: auto_correlation value:\\n""+str(auto_correlation)) + except: + pass + self.log.info(""<------------------ Time series Seasonality test result:------------------>"") + if (seasonal.sum()==0): + seasonality_status=""feature don't have seasonality (non seasonality)."" + self.log.info('Status:- |... '+str(target_features)+' does not have seasonality') + self.log.info(""<----- The model feature: ""+str(target_features)+"" does not have significant seasonality.----->\\n"") + else: + seasonality_status=""feature has seasonality."" + self.log.info('Status:- |... '+str(target_features)+' have seasonality') + + ##Please use the below plot for GUI show (seasonality components) + # decompose_result_mult.plot() + df['observed'] = decompose_result_mult.observed + df['residual'] = decompose_result_mult.resid + df['seasonal'] = decompose_result_mult.seasonal + df['trend'] = decompose_result_mult.trend + df_name='timeseries_seasonality_check_'+f""{target_features}""+'.csv' + dir_n = os.path.join(self.deployLocation,'data','seasonality') + if not os.path.exists(dir_n): + os.makedirs(dir_n) + model_path=os.path.join(dir_n,df_name) + self.log.info(""Seasonality information saved as dataframe at:\\t ""+str(model_path)) + ## Seasonal component for whole dataset + df.to_csv(model_path) + except Exception as e: + self.log.info(""Seasonality function exception: \\t""+str(e)) + return df,decompose_result_mult,seasonality_status + + ##Main function to check seasonlity in data + def seasonal_check(self,target_features,time_col,seasonal_model): + df=self.df + # self.log.info(""seasonal check started... \\n"") + try: + df[time_col]=pd.to_datetime(df[time_col]) + except Exception as e: + self.log.info(""Issue in datetime conversion...\\n""+str(e)) + df=df.set_index(time_col) + + if (len(target_features)==1): + try: + if isinstance(target_features,list): + target_features=''.join(target_features) + elif isinstance(target_features,int): + target_features=str(target_features) + elif isinstance(target_features,str): + pass + except Exception as e: + self.log.info(""stationary check target feature error: \\t""+str(e)) + ## Seasonal component for individual feature based. + seasonality_result=dict() + df,decompose_result_mult,seasonality_status = self.seasonality_model(target_features,df) + seasonality_result[target_features]=seasonality_status + elif(len(target_features) > 1): + seasonality_result=dict() + self.log.info(""TS seasonality Test: The problem type is time series Multivariate."") + for col in df.columns: + df,decompose_result_mult,seasonality_status = self.seasonality_model(col,df) + seasonality_result[col]=seasonality_status + else: + self.log.info(""TS seasonality Test: Error in target feature, pls check.\\n."") + + # self.log.info(""Feature based seasonality_result:\\n""+str(seasonality_result)) + # ## Seasonal component for whole dataset + seasonality_combined_res=dict() + c_dict=[k for k,v in seasonality_result.items() if 'non seasonality' in v] + if (len(c_dict)>=1): + seasonality_combined_res['dataframe_seasonality']='No Seasonal elements' + else: + seasonality_combined_res['dataframe_seasonality']='contains seasonal elements.' + # self.log.info(""Seasonality information for whole dataset:\\n""+str(season" +"ality_combined_res)) + + + self.log.info(""Time series Seasonality test completed.\\n"") + + return df,decompose_result_mult,seasonality_result,seasonality_combined_res + + + + +#Main fn for standalone test purpose +if __name__=='__main__': + print(""Inside seasonality-stationary test main function..."") + print(""Below code used for standalone test purpose."") + # df=pd.read_csv(r""C:\\AION_Works\\Data\\order_forecast_ts.csv"") + # print(""df info: \\n"",df.info()) + # df=df.drop('index',axis=1) + # time_col=""DateTime"" + # target='order1' + # stationary_method='adfuller' + # seasonal_model=""additive"" ## two models are available: 1.multiplicative, 2.additive + # if (isinstance(target,list)): + # pass + # elif (isinstance(target,str)): + # target=list(target.split(',')) + # cls_ins=aion_ts_stationary_seassonality_test(df) + # stats_model,n_lags,p_value=cls_ins.stationary_check(target,time_col,stationary_method) + # df,decompose_result_mult=cls_ins.seasonal_check(target,time_col,seasonal_model) + # print("" Time series stationary and seasonality check completed."") """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from .imports import importModule + +supported_reader = ['sqlite', 'influx','s3'] + + + +functions_code = { + 'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""""" + +class dataReader(): + + def get_reader(self, reader_type, target_path=None, config=None): + if reader_type == 'sqlite': + return sqlite_writer(target_path=target_path) + elif reader_type == 'influx': + return Influx_writer(config=config) + elif reader_type == 'gcs': + return gcs(config=config) + elif reader_type == 'azure': + return azure(config=config) + elif reader_type == 's3': + return s3bucket(config=config) + else: + raise ValueError(reader_type) +"""""" + }, + 'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':""""""\\n\\ +class sqlite_writer(): + def __init__(self, target_path): + self.target_path = Path(target_path) + database_file = self.target_path.stem + '.db' + self.db = sqlite_db(self.target_path, database_file) + + def file_exists(self, file): + if file: + return self.db.table_exists(file) + else: + return False + + def read(self, file): + return self.db.read(file) + + def write(self, data, file): + self.db.write(data, file) + + def close(self): + self.db.close() + +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + '.db' + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + self.tables = [] + + def table_exists(self, name): + if name in self.tables: + return True + elif name: + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + if len(listOfTables) > 0 : + self.tables.append(name) + return True + return False + + def read(self, table_name): + return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + + def write(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def delete(self, name): + pass + + def close(self): + self.conn.close() + + """""" + }, + 'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':""""""\\n\\ +class Influx_writer(): + + def __init__(self, config): + self.db = influx_db(config) + + def file_exists(self, file): + if file: + return self.db.table_exists(file) + else: + return False + + def read(self, file): + query = ""SELECT * FROM {}"".format(file) + if 'read_time' in self.db_config.keys() and self.db_config['read_time']: + query += f"" time > now() - {self.db_config['read_time']}"" + return self.db.read(query) + + def write(self, data, file): + self.db.write(data, file) + + def close(self): + pass + + +class influx_db(): + + def __init__(self, config): + self.host = config['host'] + self.port = config['port'] + self.user = config.get('user', None) + self.password = config.get('password', None) + self.token = config.get('token', None) + self.database = config['database'] + self.measurement = config['measurement'] + self.tags = config['tags'] + self.client = self.get_client() + + def table_exists(self, name): + query = f""SHOW MEASUREMENTS ON {self.database}"" + result = self.client(query) + for measurement in result['measurements']: + if measurement['name'] == name: + return True + return False + + def read(self, query)->pd.DataFrame: + cursor = self.client.query(query) + points = cursor.get_points() + my_list=list(points) + df=pd.DataFrame(my_list) + return df + + def get_client(self): + headers = None + if self.token: + headers={""Authorization"": self.token} + client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers) + databases = client.get_list_database() + databases = [x['name'] for x in databases] + if self.database not in databases: + client.create_database(self.database) + return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers) + + def write(self,data, measurement=None): + if isinstance(data, pd.DataFrame): + sorted_col = data.columns.tolist() + sorted_col.sort() + data = data[sorted_col] + data = data.to_dict(orient='records') + if not measurement: + measurement = self.measurement + for row in data: + if 'time' in row.keys(): + p = '%Y-%m-%dT%H:%M:%S.%fZ' + time_str = datetime.strptime(row['time'], p) + del row['time'] + else: + time_str = None + if 'model_ver' in row.keys(): + self.tags['model_ver']= row['model_ver'] + del row['model_ver'] + json_body = [{ + 'measurement': measurement, + 'time': time_str, + 'tags': self.tags, + 'fields': row + }] + self.client.write_points(json_body) + + def delete(self, name): + pass + + def close(self): + self.client.close() +"""""" +}, + 's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':""""""\\n\\ +class s3bucket(): + + def __init__(self, config={}): + if 's3' in config.keys(): + config = config['s3'] + aws_access_key_id = config.get('aws_access_key_id','') + aws_secret_access_key = config.get('aws_secret_access_key','') + bucket_name = config.get('bucket_name','') + if not aws_access_key_id: + raise ValueError('aws_access_key_id can not be empty') + if not aws_secret_access_key: + raise ValueError('aws_secret_access_key can not be empty') + self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key)) + self.bucket_name = bucket_name + + def read(self, file_name): + try: + response = self.client.get_object(Bucket=self.bucket_name, Key=file_name) + return pd.read_csv(response['Body']) + except ClientError as ex: + if ex.response['Error']['Code'] == 'NoSuchBucket': + raise ValueError(f""Bucket '{self.bucket_name}' not found in aws s3 storage"") + elif ex.response['Error']['Code'] == 'NoSuchKey': + raise ValueError(f""File '{file_name}' not found in s3 bucket '{self.bucket_name}'"") + else: + raise + + """""" +}, + 'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':""""""\\n\\ +def azure(): + + def __init__(self,config={}): + if 'azure' in config.keys(): + config = config['azure'] + account_name = config.get('account_name','') + account_key = config.get('account_key','') + container_name = config.get('container_name','') + if not account_name: + raise ValueError('Account name can not be empty') + if not account_key: + raise ValueError('Account key can not be empty') + if not container_name: + raise ValueError('Container name can not be empty') + service_client = DataLakeServiceClient(account_url=""{}://{}.dfs.core.windows.net"".format(""https"", account_name), credential=account_key) + self.file_system_client = service_client.get_file_system_client(container_name) + + def read(self, directory_name): + root_dir = str(directory_name) + file_paths = self.file_system_client.get_paths(path=root_dir) + main_df = pd.DataFrame() + for path in file_paths: + if not path.is_directory: + file_client = file_system_client.get_file_client(path.name) + file_ext = Path(path.name).suffix + if file_ext in ["".csv"", "".tsv""]: + with open(csv_local, ""wb"") as my_file: + file_client.download_file().readinto(my_file) + with open(csv_local, 'r') as file: + data = file.read() + row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t']) + processed_df = pd.read_csv(csv_local, sep=row_delimiter) + elif file_ext == "".parquet"": + stream = io.BytesIO() + file_client.download_file().readinto(stream) + processed_df = pd.read_parquet(stream, engine='pyarrow') + elif file_ext == "".avro"": + with open(avro_local, ""wb"") as my_file: + file_client.download_file().readinto(my_file) + processed_df = pdx.read_avro(avro_local) + if main_df.empty: + main_df = pd.DataFrame(processed_df) + else: + main_df = main_df.append(processed_df, ignore_index=True) + return main_df + + """""" + }, + 'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':""""""\\n\\ +class gcs(): + + def __" +"init__(self, config={}): + if 'gcs' in config.keys(): + config = config['gcs'] + account_key = config.get('account_key','') + bucket_name = config.get('bucket_name','') + if not account_key: + raise ValueError('Account key can not be empty') + if not bucket_name: + raise ValueError('bucket name can not be empty') + storage_client = storage.Client.from_service_account_json(account_key) + self.bucket = storage_client.get_bucket(bucket_name) + + def read(self, bucket_name, file_name): + data = self.bucket.blob(file_name).download_as_text() + return pd.read_csv(data, encoding = 'utf-8', sep = ',') + """""" + } +} + +class data_reader(): + + def __init__(self, reader_type=[]): + self.supported_readers = supported_reader + if isinstance(reader_type, str): + self.readers = [reader_type] + elif not reader_type: + self.readers = self.supported_readers + else: + self.readers = reader_type + unsupported_reader = [ x for x in self.readers if x not in self.supported_readers] + if unsupported_reader: + raise ValueError(f""reader type '{unsupported_reader}' is not supported\\nSupported readers are {self.supported_readers}"") + self.codeText = """" + self.importer = importModule() + + def get_reader_code(self, readers): + reader_code = { + 'sqlite': 'return sqlite_writer(target_path=target_path)', + 'influx': 'return Influx_writer(config=config)', + 'gcs': 'return gcs(config=config)', + 'azure': 'return azure(config=config)', + 's3': 'return s3bucket(config=config)' + } + code = ""\\n\\ndef dataReader(reader_type, target_path=None, config=None):\\n"" + for i, reader in enumerate(readers): + if not i: + code += f"" if reader_type == '{reader}':\\n"" + else: + code += f"" elif reader_type == '{reader}':\\n"" + code += f"" {reader_code[reader]}\\n"" + if readers: + code += "" else:\\n"" + code += f"""""" raise ValueError(""'{{reader_type}}' not added during code generation"")\\n"""""" + else: + code += f"""""" raise ValueError(""'{{reader_type}}' not added during code generation"")\\n"""""" + return code + + def get_code(self): + code = self.get_reader_code(self.readers) + functions = [] + for reader in self.readers: + functions.append(reader) + for function in functions: + code += self.get_function_code(function) + self.codeText += self.importer.getCode() + self.codeText += code + return self.codeText + + def get_function_code(self, name): + code = """" + if name in functions_code.keys(): + code += functions_code[name]['code'] + if self.importer: + if 'imports' in functions_code[name].keys(): + for module in functions_code[name]['imports']: + mod_name = module['mod'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + return code + + def get_importer(self): + return self.importer + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +class output_drift(): + + def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4): + self.tab = ' ' * tab_size + self.codeText = '' + self.missing = missing + self.word2num_features = word2num_features + self.cat_encoder = cat_encoder + self.target_encoder = target_encoder + self.normalizer = normalizer + self.text_profiler = text_profiler + self.feature_reducer = feature_reducer + self.score_smaller_is_better = score_smaller_is_better + self.problem_type = problem_type + + def addDatabaseClass(self, indent=0): + text = ""\\ + \\nclass database():\\ + \\n def __init__(self, config):\\ + \\n self.host = config['host']\\ + \\n self.port = config['port']\\ + \\n self.user = config['user']\\ + \\n self.password = config['password']\\ + \\n self.database = config['database']\\ + \\n self.measurement = config['measurement']\\ + \\n self.tags = config['tags']\\ + \\n self.client = self.get_client()\\ + \\n\\ + \\n def read_data(self, query)->pd.DataFrame:\\ + \\n cursor = self.client.query(query)\\ + \\n points = cursor.get_points()\\ + \\n my_list=list(points)\\ + \\n df=pd.DataFrame(my_list)\\ + \\n return df\\ + \\n\\ + \\n def get_client(self):\\ + \\n client = InfluxDBClient(self.host,self.port,self.user,self.password)\\ + \\n databases = client.get_list_database()\\ + \\n databases = [x['name'] for x in databases]\\ + \\n if self.database not in databases:\\ + \\n client.create_database(self.database)\\ + \\n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\\ + \\n\\ + \\n def write_data(self,data):\\ + \\n if isinstance(data, pd.DataFrame):\\ + \\n sorted_col = data.columns.tolist()\\ + \\n sorted_col.sort()\\ + \\n data = data[sorted_col]\\ + \\n data = data.to_dict(orient='records')\\ + \\n for row in data:\\ + \\n if 'time' in row.keys():\\ + \\n p = '%Y-%m-%dT%H:%M:%S.%fZ'\\ + \\n time_str = datetime.strptime(row['time'], p)\\ + \\n del row['time']\\ + \\n else:\\ + \\n time_str = None\\ + \\n if 'model_ver' in row.keys():\\ + \\n self.tags['model_ver']= row['model_ver']\\ + \\n del row['model_ver']\\ + \\n json_body = [{\\ + \\n 'measurement': self.measurement,\\ + \\n 'time': time_str,\\ + \\n 'tags': self.tags,\\ + \\n 'fields': row\\ + \\n }]\\ + \\n self.client.write_points(json_body)\\ + \\n\\ + \\n def close(self):\\ + \\n self.client.close()\\ + \\n"" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def addPredictClass(self, indent=0): + text = ""\\ + \\nclass predict():\\ + \\n\\ + \\n def __init__(self, base_config):\\ + \\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\ + \\n self.dataLocation = base_config['dataLocation']\\ + \\n self.db_enabled = base_config.get('db_enabled', False)\\ + \\n if self.db_enabled:\\ + \\n self.db_config = base_config['db_config']\\ + \\n home = Path.home()\\ + \\n if platform.system() == 'Windows':\\ + \\n from pathlib import WindowsPath\\ + \\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\ + \\n else:\\ + \\n from pathlib import PosixPath\\ + \\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\ + \\n if not output_model_dir.exists():\\ + \\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\ + \\n\\ + \\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\ + \\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\ + \\n mlflow.set_tracking_uri(tracking_uri)\\ + \\n mlflow.set_registry_uri(registry_uri)\\ + \\n client = mlflow.tracking.MlflowClient(\\ + \\n tracking_uri=tracking_uri,\\ + \\n registry_uri=registry_uri,\\ + \\n )\\ + \\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\\ + \\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\ + \\n self.model = mlflow.pyfunc.load_model(model_version_uri)\\ + \\n run = client.get_run(self.model.metadata.run_id)\\ + \\n if run.info.artifact_uri.startswith('file:'): #remove file:///\\ + \\n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\ + \\n else:\\ + \\n self.artifact_path = Path(run.info.artifact_uri)\\ + \\n with open(self.artifact_path/'deploy.json', 'r') as f:\\ + \\n deployment_dict = json.load(f)\\ + \\n with open(self.artifact_path/'features.txt', 'r') as f:\\ + \\n self.train_features = f.readline().rstrip().split(',')\\ + \\n\\ + \\n self.dataLocation = base_config['dataLocation']\\ + \\n self.selected_features = deployment_dict['load_data']['selected_features']\\ + \\n self.target_feature = deployment_dict['load_data']['target_feature']\\ + \\n self.output_model_dir = output_model_dir"" + if self.missing: + text += ""\\n self.missing_values = deployment_dict['transformation']['fillna']"" + if self.word2num_features: + text += ""\\n self.word2num_features = deployment_dict['transformation']['word2num_features']"" + if self.cat_encoder == 'labelencoding': + text += ""\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']"" + elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'): + text += ""\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']"" + text += ""\\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']"" + if self.target_encoder: + text += ""\\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])"" + if self.normalizer: + text += ""\\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\\ +\\n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']"" + if self.text_profiler: + text += ""\\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\\ +\\n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']"" + if self.feature_reducer: + text += ""\\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\\ +\\n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']"" + text += """""" + + def read_data_from_db(self): + if self.db_enabled: + try: + db = database(self.db_config) + query = ""SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''"".format(db.measurement, self.model_version, self.target_feature) + if 'read_time' in self.db_config.keys() and self.db_config['read_time']: + query += f"" time > now() - {self.db_config['read_time']}"" + data = db.read_data(query) + except: + raise ValueError('Unable to read from the database') + finally: + if db: + db.close() + return data + return None"""""" + text += ""\\ + \\n def predict(self, data):\\ + \\n df = pd.DataFrame()\\ + \\n if Path(data).exists():\\ + \\n if Path(data).suffix == '.tsv':\\ + \\n df=read_data(data,encoding='utf-8',sep='\\t')\\ + \\n elif Path(data).suffix == '.csv':\\ + \\n df=read_data(data,encoding='utf-8')\\ + \\n else:\\ + \\n if Path(data).suffix == '.json':\\ + \\n jsonData = read_json(data)\\ + \\n df = pd.json_normalize(jsonData)\\ + \\n elif is_file_name_url(data):\\ + \\n df = read_data(data,encoding='utf-8')\\ + \\n else:\\ + \\n jsonData = json.loads(data)\\ + \\n df = pd.json_normalize(jsonData)\\ + \\n if len(df) == 0:\\ + \\n raise ValueError('No data record found')\\ + \\n missing_features = [x for x in self.selected_features if x not in df.columns]\\ + \\n if missing_features:\\ + \\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\\ + \\n if self.target_feature not in df.columns:\\ + \\n raise ValueError(f'Ground truth values/target column({self.target_feature})" +"not found in current data')\\ + \\n df_copy = df.copy()\\ + \\n df = df[self.selected_features]"" + if self.word2num_features: + text += ""\\n for feat in self.word2num_features:"" + text += ""\\n df[ feat ] = df[feat].apply(lambda x: s2n(x))"" + if self.missing: + text += ""\\n df.fillna(self.missing_values, inplace=True)"" + if self.cat_encoder == 'labelencoding': + text += ""\\n df.replace(self.cat_encoder, inplace=True)"" + elif self.cat_encoder == 'targetencoding': + text += ""\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"" + text += ""\\n df = cat_enc.transform(df)"" + elif self.cat_encoder == 'onehotencoding': + text += ""\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"" + text += ""\\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()"" + text += ""\\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]"" + if self.normalizer: + text += ""\\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])"" + if self.text_profiler: + text += ""\\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\\ +\\n df_vect=self.text_profiler.transform(text_corpus)\\ +\\n if isinstance(df_vect, np.ndarray):\\ +\\n df1 = pd.DataFrame(df_vect)\\ +\\n else:\\ +\\n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\\ +\\n df1 = df1.add_suffix('_vect')\\ +\\n df = pd.concat([df, df1],axis=1)"" + if self.feature_reducer: + text += ""\\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"" + else: + text += ""\\n df = df[self.train_features]"" + if self.target_encoder: + text += ""\\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\\ + \\n df_copy['prediction'] = output.idxmax(axis=1)"" + else: + text += ""\\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\\ + \\n df_copy['prediction'] = output"" + text += ""\\n return df_copy"" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def getClassificationMatrixCode(self, indent=0): + text = ""\\ + \\ndef get_classification_metrices(actual_values, predicted_values):\\ + \\n result = {}\\ + \\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\ + \\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n\\ + \\n result['accuracy'] = accuracy_score\\ + \\n result['precision'] = avg_precision\\ + \\n result['recall'] = avg_recall\\ + \\n result['f1'] = avg_f1\\ + \\n return result\\ + \\n\\ + "" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def getRegrssionMatrixCode(self, indent=0): + text = ""\\ + \\ndef get_regression_metrices( actual_values, predicted_values):\\ + \\n result = {}\\ + \\n\\ + \\n me = np.mean(predicted_values - actual_values)\\ + \\n sde = np.std(predicted_values - actual_values, ddof = 1)\\ + \\n\\ + \\n abs_err = np.abs(predicted_values - actual_values)\\ + \\n mae = np.mean(abs_err)\\ + \\n sdae = np.std(abs_err, ddof = 1)\\ + \\n\\ + \\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\ + \\n mape = np.mean(abs_perc_err)\\ + \\n sdape = np.std(abs_perc_err, ddof = 1)\\ + \\n\\ + \\n result['mean_error'] = me\\ + \\n result['mean_abs_error'] = mae\\ + \\n result['mean_abs_perc_error'] = mape\\ + \\n result['error_std'] = sde\\ + \\n result['abs_error_std'] = sdae\\ + \\n result['abs_perc_error_std'] = sdape\\ + \\n return result\\ + \\n\\ + "" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def addSuffixCode(self, indent=1): + text =""\\n\\ + \\ndef check_drift( config):\\ + \\n prediction = predict(config)\\ + \\n usecase = config['modelName'] + '_' + config['modelVersion']\\ + \\n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\\ + \\n if not train_data_path.exists():\\ + \\n raise ValueError(f'Training data not found at {train_data_path}')\\ + \\n curr_with_pred = prediction.read_data_from_db()\\ + \\n if prediction.target_feature not in curr_with_pred.columns:\\ + \\n raise ValueError('Ground truth not updated for corresponding data in database')\\ + \\n train_with_pred = prediction.predict(train_data_path)\\ + \\n performance = {}"" + if self.problem_type == 'classification': + text += ""\\n\\ + \\n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\ + \\n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"" + else: + text += ""\\n\\ + \\n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\ + \\n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"" + text += ""\\n return performance"" + text += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n try:\\ + \\n if len(sys.argv) < 2:\\ + \\n raise ValueError('config file not present')\\ + \\n config = sys.argv[1]\\ + \\n if Path(config).is_file() and Path(config).suffix == '.json':\\ + \\n with open(config, 'r') as f:\\ + \\n config = json.load(f)\\ + \\n else:\\ + \\n config = json.loads(config)\\ + \\n output = check_drift(config)\\ + \\n status = {'Status':'Success','Message':json.loads(output)}\\ + \\n print('output_drift:'+json.dumps(status))\\ + \\n except Exception as e:\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print('output_drift:'+json.dumps(status))"" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def generateCode(self): + self.codeText += self.addDatabaseClass() + self.codeText += self.addPredictClass() + if self.problem_type == 'classification': + self.codeText += self.getClassificationMatrixCode() + elif self.problem_type == 'regression': + self.codeText += self.getRegrssionMatrixCode() + else: + raise ValueError(f""Unsupported problem type: {self.problem_type}"") + self.codeText += self.addSuffixCode() + + def getCode(self): + return self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class transformer(): + + def __init__(self, indent=0, tab_size=4): + self.df_name = 'df' + self.tab = ' ' * tab_size + self.codeText = """" + self.transformers = [] + self.TxCols = [] + self.imputers = {} + self.input_files = {} + self.output_files = {} + self.function_code = '' + self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','trainData' : 'transformedData.dat','testData' : 'test.dat','preprocessor' : 'preprocessor.pkl'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n return config"" + return text + + def getPrefixModules(self): + modules = [ + {'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas', 'mod_as':'pd'} + ,{'module':'numpy', 'mod_as':'np'} + ,{'module':'scipy'} + ] + return modules + + def addPrefixCode(self, indent=1): + self.codeText += """""" +def transformation(log): + config = validateConfig() + targetPath = Path('aion')/config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + meta_data_file = targetPath/IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {meta_data_file}') + log_file = targetPath/IOFiles['log'] + log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + dataLoc = targetPath/IOFiles['inputData'] + if not dataLoc.exists(): + return {'Status':'Failure','Message':'Data location does not exists.'} + status = dict() + df = read_data(dataLoc) + log.log_dataframe(df) + target_feature = config['target_feature'] + if config['test_ratio'] == 0.0: + train_data = df + test_data = pd.DataFrame() + else: + """""" + def getSuffixModules(self): + modules = [{'module':'pandas','mod_as':'pd'} + ,{'module':'json'} + ,{'module':'joblib'} + ] + return modules + + def addSuffixCode(self,encoder=False, indent=1): + self.codeText += """""" + train_data, preprocess_pipe, label_encoder = profilerObj.transform() + if not preprocess_pipe: + raise ValueError('Pipeline not created') + joblib.dump(preprocess_pipe, targetPath/IOFiles['preprocessor']) + test_data.reset_index(inplace=True) + + """""" + if encoder: + self.codeText += """""" + joblib.dump(label_encoder, targetPath/IOFiles['targetEncoder']) + if not test_data.empty: + ytest = label_encoder.transform(test_data[target_feature]) + """""" + else: + self.codeText += """""" + if not test_data.empty: + ytest = test_data[target_feature] + """""" + self.codeText += """""" + test_data.astype(profilerObj.train_features_type) + test_data = preprocess_pipe.transform(test_data) + if isinstance(test_data, scipy.sparse.spmatrix): + test_data = test_data.toarray() + preprocess_out_columns = train_data.columns.tolist() + preprocess_out_columns.remove(target_feature) + write_data(train_data,targetPath/IOFiles['trainData'],index=False) + if isinstance( test_data, np.ndarray): + test_data = pd.DataFrame(test_data, columns=preprocess_out_columns) + test_data[target_feature] = ytest + + write_data(test_data,targetPath/IOFiles['testData'],index=False) + + " +"log.log_dataframe(train_data) + status = {'Status':'Success','trainData':IOFiles['trainData'],'testData':IOFiles['testData']} + meta_data['transformation'] = {} + meta_data['transformation']['cat_features'] = train_data.select_dtypes('category').columns.tolist() + meta_data['transformation']['preprocessor'] = IOFiles['preprocessor'] + meta_data['transformation']['preprocess_out_columns'] = preprocess_out_columns + """""" + if encoder: + self.codeText += """""" + meta_data['transformation']['target_encoder'] = IOFiles['targetEncoder'] + """""" + self.codeText += """""" + meta_data['transformation']['Status'] = status + write_json(meta_data, str(targetPath/IOFiles['metaData'])) + log.info(f""Transformed data saved at {targetPath/IOFiles['trainData']}"") + log.info(f'output: {status}') + return json.dumps(status) + """""" + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'sys'} + ,{'module':'json'} + ,{'module':'logging'} + ,{'module':'argparse'} + ] + return modules + + def addMainCode(self, indent=1): + self.codeText += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n log = None\\ + \\n try:\\ + \\n print(transformation(log))\\ + \\n except Exception as e:\\ + \\n if log:\\ + \\n log.error(e, exc_info=True)\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print(json.dumps(status))"" + + def addValidateConfigCode(self, indent=1): + self.function_code += self.__addValidateConfigCode() + + def addLocalFunctionsCode(self): + self.addValidateConfigCode() + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self, indent=1): + return self.function_code + '\\n' + self.codeText + + def getDFName(self): + return self.df_name + +class data_profiler(): + + def __init__(self, importer, text_features=False): + self.importer = importer + self.codeText = """" + self.text_features = text_features + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def get_module_import_statement(self, mod): + text = """" + if not mod.get('module', None): + return text + if mod.get('mod_from', None): + text += f""from {mod['mod_from']} "" + text += f""import {mod['module']} "" + if mod.get('mod_as', None): + text += f""as {mod['mod_as']}"" + text += ""\\n"" + return text + + def get_import_modules(self): + profiler_importes = [ + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'w2n', 'mod_from': 'word2number', 'mod_as': None}, + {'module': 'LabelEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, + {'module': 'OrdinalEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, + {'module': 'OneHotEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, + {'module': 'SimpleImputer', 'mod_from': 'sklearn.impute', 'mod_as': None }, + {'module': 'KNNImputer', 'mod_from': 'sklearn.impute', 'mod_as': None }, + {'module': 'Pipeline', 'mod_from': 'sklearn.pipeline', 'mod_as': None }, + {'module': 'FeatureUnion', 'mod_from': 'sklearn.pipeline', 'mod_as': None }, + {'module': 'MinMaxScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, + {'module': 'StandardScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, + {'module': 'PowerTransformer', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, + {'module': 'ColumnTransformer', 'mod_from': 'sklearn.compose', 'mod_as': None }, + {'module': 'TransformerMixin', 'mod_from': 'sklearn.base', 'mod_as': None }, + {'module': 'IsolationForest', 'mod_from': 'sklearn.ensemble', 'mod_as': None }, + {'module': 'TargetEncoder', 'mod_from': 'category_encoders', 'mod_as': None } + ] + if self.text_features: + profiler_importes.append({'module': 'textProfiler', 'mod_from': 'text.textProfiler', 'mod_as': None }) + profiler_importes.append({'module': 'textCombine', 'mod_from': 'text.textProfiler', 'mod_as': None }) + return profiler_importes + + def get_importer(self): + return self.importer + + def get_code(self): + common_importes = self.get_import_modules() + for module in common_importes: + mod_name = module['module'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + if module['module'] in ['textProfiler','textCombine']: + self.importer.addLocalModule(mod_name, mod_from=mod_from, mod_as=mod_as) + else: + self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + + self.codeText += """""" +STR_TO_CAT_CONVERSION_LEN_MAX = 10 +log_suffix = f'[{Path(__file__).stem}] ' + +target_encoding_method_change = {'targetencoding': 'labelencoding'} + +supported_method = { + 'fillNa': + { + 'categorical' : ['mode','zero','na'], + 'numeric' : ['median','mean','knnimputer','zero','drop','na'], + }, + 'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'], + 'normalization': ['standardscaler','minmax','lognormal', 'na','none'], + 'outlier_column_wise': ['iqr','zscore', 'disable'], + 'outlierOperation': ['dropdata', 'average', 'nochange'] + } + +def findiqrOutlier(df): + Q1 = df.quantile(0.25) + Q3 = df.quantile(0.75) + IQR = Q3 - Q1 + index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))) + return index + +def findzscoreOutlier(df): + z = np.abs(scipy.stats.zscore(df)) + index = (z < 3) + return index + +def findiforestOutlier(df): + isolation_forest = IsolationForest(n_estimators=100) + isolation_forest.fit(df) + y_pred_train = isolation_forest.predict(df) + return y_pred_train == 1 + +def get_one_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_boolean(value): + if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): + return True + else: + return False + +class profiler(): + + def __init__(self, xtrain, ytrain=None, target=None, encode_target = True, config={}, keep_unprocessed=[], log=None): + if not isinstance(xtrain, pd.DataFrame): + raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type') + if xtrain.empty: + raise ValueError(f'{log_suffix}Data frame is empty') + if target and target in xtrain.columns: + self.target = xtrain[target] + xtrain.drop(target, axis=1, inplace=True) + self.target_name = target + elif ytrain: + self.target = ytrain + self.target_name = 'target' + else: + self.target = pd.Series() + self.target_name = None + self.encode_target = encode_target + self.label_encoder = None + keep_unprocessed = [x for x in keep_unprocessed if x in xtrain.columns] + if keep_unprocessed: + self.unprocessed = xtrain[keep_unprocessed] + self.data = xtrain.drop(keep_unprocessed, axis=1) + else: + self.data = xtrain + self.unprocessed = pd.DataFrame() + self.colm_type = {} + for colm, infer_type in zip(self.data.columns, self.data.dtypes): + self.colm_type[colm] = infer_type + self.numeric_feature = [] + self.cat_feature = [] + self.text_feature = [] + self.wordToNumericFeatures = [] + self.added_features = [] + self.pipeline = [] + self.dropped_features = {} + self.train_features_type={} + self.__update_type() + self.config = config + self.featureDict = config.get('featureDict', []) + self.output_columns = [] + self.feature_expender = [] + self.text_to_num = {} + if log: + self.log = log + else: + self.log = logging.getLogger('eion') + self.type_conversion = {} + + def log_dataframe(self, msg=None): + import io + buffer = io.StringIO() + self.data.info(buf=buffer) + if msg: + log_text = f'Data frame after {msg}:' + else: + log_text = 'Data frame:' + log_text += '\\\\n\\\\t'+str(self.data.head(2)).replace('\\\\n','\\\\n\\\\t') + log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t')) + self.log.info(log_text) + + def transform(self): + if self.is_target_available(): + if self.target_name: + self.log.info(f""Target feature name: '{self.target_name}'"") + self.log.info(f""Target feature size: {len(self.target)}"") + else: + self.log.info(f""Target feature not present"") + self.log_dataframe() + try: + self.process() + except Exception as e: + self.log.error(e, exc_info=True) + raise + pipe = FeatureUnion(self.pipeline) + self.log.info(pipe) + process_data = pipe.fit_transform(self.data, y=self.target) + self.update_output_features_names(pipe) + if isinstance(process_data, scipy.sparse.spmatrix): + process_data = process_data.toarray() + df = pd.DataFrame(process_data, columns=self.output_columns) + + if self.is_target_available() and self.target_name: + df[self.target_name] = self.target + if not self.unprocessed.empty: + df[self.unprocessed.columns] = self.unprocessed + self.log_numerical_fill() + self.log_categorical_fill() + self.log_normalization() + return df, pipe, self.label_encoder + + def log_type_conversion(self): + if self.log: + self.log.info('----------- Inspecting Features -----------') + self.log.info('----------- Type Conversion -----------') + count = 0 + for k, v in self.type_conversion.items(): + if v[0] != v[1]: + self.log.info(f'{k} -> from {v[0]} to {v[1]} : {v[2]}') + self.log.info('Status:- |... Feature inspection done') + + def check_config(self): + removeDuplicate = self.config.get('removeDuplicate', False) + self.config['removeDuplicate'] = get_boolean(removeDuplicate) + self.config['misValueRatio'] = float(self.config.get('misValueRatio', '1.0')) + self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', '1.0')) + self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', '20')) + featureDict = self.config.get('featureDict', []) + if isinstance(featureDict, dict): + self.config['featureDict'] = [] + if isinstance(featureDict, str): + self.config['featureDict'] = [] + + def process(self): + #remove duplicate not required at the time of prediction + self.check_config() + self.remove_constant_feature() + self.remove_empty_feature(self.config['misValueRatio']) + self.remove_index_features() + self.drop_na_target() + if self.config['removeDuplicate']: + self.drop_duplicate() + self.check_categorical_features() + self.string_to_numeric() + self.process_target() + self.train_features_type = dict(zip(self.data.columns, self.data.dtypes)) + self.parse_process_step_config() + self.process_drop_fillna() + #self.log_type_conversion() + self.update_num_fill_dict() + #print(self.num_fill_method_dict) + self.update_cat_fill_dict() + self.create_pipeline() + self.text_pipeline(self.config) + self.apply_outlier() + self.log.info(self.process_method) + self.log.info(self.train_features_type) + + def is_target_available(self): + return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target) + + def process_target(self, operation='encode', arg=None): + if self.encode_target: + if self.is_target_available(): + self.label_encoder = LabelEncoder() + self.target = self.label_encoder.fit_transform(self.target) + return self.label_encoder + return None + + def is_target_column(self," +"column): + return column == self.target_name + + def fill_default_steps(self): + + num_fill_method = get_one_true_option(self.config.get('numericalFillMethod',None)) + normalization_method = get_one_true_option(self.config.get('normalization',None)) + for colm in self.numeric_feature: + if num_fill_method: + self.fill_missing_value_method(colm, num_fill_method.lower()) + if normalization_method: + self.fill_normalizer_method(colm, normalization_method.lower()) + + cat_fill_method = get_one_true_option(self.config.get('categoricalFillMethod',None)) + cat_encode_method = get_one_true_option(self.config.get('categoryEncoding',None)) + for colm in self.cat_feature: + if cat_fill_method: + self.fill_missing_value_method(colm, cat_fill_method.lower()) + if cat_encode_method: + self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True) + + def parse_process_step_config(self): + self.process_method = {} + user_provided_data_type = {} + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + user_provided_data_type[colm] = feat_conf['type'] + if user_provided_data_type: + self.update_user_provided_type(user_provided_data_type) + + self.fill_default_steps() + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + if feat_conf.get('fillMethod', None): + self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower()) + if feat_conf.get('categoryEncoding', None): + self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower()) + if feat_conf.get('normalization', None): + self.fill_normalizer_method(colm, feat_conf['normalization'].lower()) + if feat_conf.get('outlier', None): + self.fill_outlier_method(colm, feat_conf['outlier'].lower()) + if feat_conf.get('outlierOperation', None): + self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower()) + + + def update_output_features_names(self, pipe): + columns = self.output_columns + start_index = {} + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names()} + if start_index: + index_shifter = 0 + for key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + v = [f'{x}_vect' for x in v] + key = key + index_shifter + self.output_columns[key:key] = v + index_shifter += len(v) + self.added_features = [*self.added_features, *v] + + + def text_pipeline(self, conf_json): + if self.text_feature: + pipeList = [] + max_features = 2000 + text_pipe = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", self.text_feature) + ], remainder=""drop"")), + (""text_fillNa"",SimpleImputer(strategy='constant', fill_value='')), + (""merge_text_feature"", textCombine())]) + obj = textProfiler() + pipeList = obj.textProfiler(conf_json, pipeList, max_features) + last_step = ""merge_text_feature"" + for pipe_elem in pipeList: + text_pipe.steps.append((pipe_elem[0], pipe_elem[1])) + last_step = pipe_elem[0] + text_transformer = ('text_process', text_pipe) + self.pipeline.append(text_transformer) + self.feature_expender.append({last_step:len(self.output_columns)}) + + def create_pipeline(self): + num_pipe = {} + for k,v in self.num_fill_method_dict.items(): + for k1,v1 in v.items(): + if k1 and k1 != 'none': + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)), + (k1, self.get_num_scaler(k1)) + ]) + else: + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)) + ]) + self.output_columns.extend(v1) + cat_pipe = {} + for k,v in self.cat_fill_method_dict.items(): + for k1,v1 in v.items(): + cat_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_cat_imputer(k)), + (k1, self.get_cat_encoder(k1)) + ]) + if k1 not in ['onehotencoding']: + self.output_columns.extend(v1) + else: + self.feature_expender.append({k1:len(self.output_columns)}) + for key, pipe in num_pipe.items(): + self.pipeline.append((key, pipe)) + for key, pipe in cat_pipe.items(): + self.pipeline.append((key, pipe)) + if not self.unprocessed.empty: + self.pipeline.append(Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", self.unprocessed.columns) + ], remainder=""drop""))])) + + ""Drop: feature during training but replace with zero during prediction "" + def process_drop_fillna(self): + drop_column = [] + if 'numFill' in self.process_method.keys(): + for col, method in self.process_method['numFill'].items(): + if method == 'drop': + self.process_method['numFill'][col] = 'zero' + drop_column.append(col) + if 'catFill' in self.process_method.keys(): + for col, method in self.process_method['catFill'].items(): + if method == 'drop': + self.process_method['catFill'][col] = 'zero' + drop_column.append(col) + if drop_column: + self.data.dropna(subset=drop_column, inplace=True) + + def update_num_fill_dict(self): + self.num_fill_method_dict = {} + if 'numFill' in self.process_method.keys(): + for f in supported_method['fillNa']['numeric']: + self.num_fill_method_dict[f] = {} + for en in supported_method['normalization']: + self.num_fill_method_dict[f][en] = [] + for col in self.numeric_feature: + numFillDict = self.process_method.get('numFill',{}) + normalizationDict = self.process_method.get('normalization',{}) + if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''): + self.num_fill_method_dict[f][en].append(col) + if not self.num_fill_method_dict[f][en] : + del self.num_fill_method_dict[f][en] + if not self.num_fill_method_dict[f]: + del self.num_fill_method_dict[f] + + def update_cat_fill_dict(self): + self.cat_fill_method_dict = {} + if 'catFill' in self.process_method.keys(): + for f in supported_method['fillNa']['categorical']: + self.cat_fill_method_dict[f] = {} + for en in supported_method['categoryEncoding']: + self.cat_fill_method_dict[f][en] = [] + for col in self.cat_feature: + catFillDict = self.process_method.get('catFill',{}) + catEncoderDict = self.process_method.get('catEncoder',{}) + if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''): + self.cat_fill_method_dict[f][en].append(col) + if not self.cat_fill_method_dict[f][en] : + del self.cat_fill_method_dict[f][en] + if not self.cat_fill_method_dict[f]: + del self.cat_fill_method_dict[f] + + + def __update_type(self): + self.numeric_feature = self.data.select_dtypes(include='number').columns.tolist() + self.cat_feature = self.data.select_dtypes(include='category').columns.tolist() + self.date_time = self.data.select_dtypes(include='datetime').columns.tolist() + self.text_feature = self.data.select_dtypes(include='object').columns.tolist() + + def update_user_provided_type(self, data_types): + allowed_types = ['numerical','categorical', 'text','date','index'] + type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),} + mapped_type = {k:type_mapping[v] for k,v in data_types.items()} + #self.log.info(mapped_type) + self.update_type(mapped_type, 'user provided data type') + + def get_type(self, as_list=False): + if as_list: + return [self.colm_type.values()] + else: + return self.colm_type + + def update_type(self, data_types={}, reason=''): + invalid_features = [x for x in data_types.keys() if x not in self.data.columns] + if invalid_features: + valid_feat = list(set(data_types.keys()) - set(invalid_features)) + valid_feat_type = {k:v for k,v in data_types if k in valid_feat} + else: + valid_feat_type = data_types + for k,v in valid_feat_type.items(): + if v != self.colm_type[k].name: + try: + self.data.astype({k:v}) + self.colm_type.update({k:self.data[k].dtype}) + self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) + except: + self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason) + self.data = self.data.astype(valid_feat_type) + self.__update_type() + + def string_to_numeric(self): + def to_number(x): + try: + return w2n.word_to_num(x) + except: + return np.nan + for col in self.text_feature: + col_values = self.data[col].copy() + col_values = pd.to_numeric(col_values, errors='coerce') + if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): + self.text_to_num[col] = 'float64' + self.wordToNumericFeatures.append(col) + if self.text_to_num: + columns = list(self.text_to_num.keys()) + self.data[columns] = self.data[columns].apply(lambda x: to_number(x)) + self.update_type(self.text_to_num) + self.log.info('----------- Inspecting Features -----------') + for col in self.text_feature: + self.log.info(f'-------> Feature : {col}') + if col in self.text_to_num: + self.log.info('----------> Numeric Status :Yes') + self.log.info('----------> Data Type Converting to numeric :Yes') + else: + self.log.info('----------> Numeric Status :No') + self.log.info(f'\\\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric') + self.log.info(f'\\\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}') + self.log.info('----------- Inspecting Features End -----------') + + def check_categorical_features(self): + num_data = self.data.select_dtypes(include='number') + num_data_unique = num_data.nunique() + num_to_cat_col = {} + for i, value in enumerate(num_data_unique): + if value < self.config['categoryMaxLabel']: + num_to_cat_col[num_data_unique.index[i]] = 'category' + if num_to_cat_col: + self.update_type(num_to_cat_col, 'numerical to categorical') + str_to_cat_col = {} + str_data = self.data.select_dtypes(include='object') + str_data_unique = str_data.nunique() + for i, value in enumerate(str_data_unique): + if value < self.config['categoryMaxLabel']: + str_to_cat_col[str_data_unique.index[i]] = 'category' + for colm in str_data.columns: + if self.data[colm].str.len().max() < STR_TO_CAT_CONVERSION_LEN_MAX: + str_to_cat_col[colm] = 'category' + if str_to_cat_col: + self.update_type(str_to_cat_col, 'text to categorical') + + def drop_features(self, features=[], reason='unspecified'): + if isinstance(features, str): + features = [features] + feat_to_remove = [x for x in features if x in self.data.columns] + if feat_to_remove: +" +" self.data.drop(feat_to_remove, axis=1, inplace=True) + for feat in feat_to_remove: + self.dropped_features[feat] = reason + self.log_drop_feature(feat_to_remove, reason) + self.__update_type() + + def drop_duplicate(self): + index = self.data.duplicated(keep='first') + if index.sum(): + self.remove_rows(index, 'duplicate rows') + + def drop_na_target(self): + if self.is_target_available(): + self.remove_rows(self.target.isna(), 'null target values') + + def log_drop_feature(self, columns, reason): + self.log.info(f'---------- Dropping {reason} features ----------') + self.log.info(f'\\\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found') + self.log.info(f'-------> Drop Features: {columns}') + self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}') + + def log_normalization(self): + if self.process_method.get('normalization', None): + self.log.info(f'\\\\nStatus:- !... Normalization treatment done') + for method in supported_method['normalization']: + cols = [] + for col, m in self.process_method['normalization'].items(): + if m == method: + cols.append(col) + if cols and method != 'none': + self.log.info(f'Running {method} on features: {cols}') + + def log_numerical_fill(self): + if self.process_method.get('numFill', None): + self.log.info(f'\\\\nStatus:- !... Fillna for numeric feature done') + for method in supported_method['fillNa']['numeric']: + cols = [] + for col, m in self.process_method['numFill'].items(): + if m == method: + cols.append(col) + if cols: + self.log.info(f'-------> Running {method} on features: {cols}') + + def log_categorical_fill(self): + if self.process_method.get('catFill', None): + self.log.info(f'\\\\nStatus:-!... FillNa for categorical feature done') + for method in supported_method['fillNa']['categorical']: + cols = [] + for col, m in self.process_method['catFill'].items(): + if m == method: + cols.append(col) + if cols: + self.log.info(f'-------> Running {method} on features: {cols}') + + def remove_constant_feature(self): + unique_values = self.data.nunique() + constant_features = [] + for i, value in enumerate(unique_values): + if value == 1: + constant_features.append(unique_values.index[i]) + if constant_features: + self.drop_features(constant_features, ""constant"") + for i in constant_features: + try: + self.numeric_feature.remove(i) + except ValueError: + pass + try: + self.cat_feature.remove(i) + except ValueError: + pass + + def remove_empty_feature(self, misval_ratio=1.0): + missing_ratio = self.data.isnull().sum() / len(self.data) + missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)} + empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio] + if empty_features: + self.drop_features(empty_features, ""empty"") + for i in empty_features: + try: + self.numeric_feature.remove(i) + except ValueError: + pass + try: + self.cat_feature.remove(i) + except: + pass + + def remove_index_features(self): + index_feature = [] + + for feat in self.numeric_feature: + if self.data[feat].nunique() == len(self.data): + if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)): + index_feature.append(feat) + self.drop_features(index_feature, ""index"") + for i in index_feature: + try: + self.numeric_feature.remove(i) + except ValueError: + pass + try: + self.cat_feature.remove(i) + except: + pass + + def fill_missing_value_method(self, colm, method): + if colm in self.numeric_feature: + if method in supported_method['fillNa']['numeric']: + if 'numFill' not in self.process_method.keys(): + self.process_method['numFill'] = {} + if method == 'na' and self.process_method['numFill'].get(colm, None): + pass # don't overwrite + else: + self.process_method['numFill'][colm] = method + if colm in self.cat_feature: + if method in supported_method['fillNa']['categorical']: + if 'catFill' not in self.process_method.keys(): + self.process_method['catFill'] = {} + if method == 'na' and self.process_method['catFill'].get(colm, None): + pass + else: + self.process_method['catFill'][colm] = method + + def check_encoding_method(self, method, colm,default=False): + if not self.is_target_available() and (method.lower() == list(target_encoding_method_change.keys())[0]): + method = target_encoding_method_change[method.lower()] + if default: + self.log.info(f""Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present"") + return method + + def fill_encoder_value_method(self,colm, method, default=False): + if colm in self.cat_feature: + if method.lower() in supported_method['categoryEncoding']: + if 'catEncoder' not in self.process_method.keys(): + self.process_method['catEncoder'] = {} + if method == 'na' and self.process_method['catEncoder'].get(colm, None): + pass + else: + self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default) + else: + self.log.info(f""-------> categorical encoding method '{method}' is not supported. supported methods are {supported_method['categoryEncoding']}"") + + def fill_normalizer_method(self,colm, method): + if colm in self.numeric_feature: + if method in supported_method['normalization']: + if 'normalization' not in self.process_method.keys(): + self.process_method['normalization'] = {} + if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None): + pass + else: + self.process_method['normalization'][colm] = method + else: + self.log.info(f""-------> Normalization method '{method}' is not supported. supported methods are {supported_method['normalization']}"") + + def apply_outlier(self): + inlier_indices = np.array([True] * len(self.data)) + if self.process_method.get('outlier', None): + self.log.info('-------> Feature wise outlier detection:') + for k,v in self.process_method['outlier'].items(): + if k in self.numeric_feature: + if v == 'iqr': + index = findiqrOutlier(self.data[k]) + elif v == 'zscore': + index = findzscoreOutlier(self.data[k]) + elif v == 'disable': + index = None + if k in self.process_method['outlierOperation'].keys(): + if self.process_method['outlierOperation'][k] == 'dropdata': + inlier_indices = np.logical_and(inlier_indices, index) + elif self.process_method['outlierOperation'][k] == 'average': + mean = self.data[k].mean() + index = ~index + self.data.loc[index,[k]] = mean + self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}') + elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable': + self.log.info(f'-------> Total outliers in ""{k}"": {(~index).sum()}') + if self.config.get('outlierDetection',None): + if self.config['outlierDetection'].get('IsolationForest','False') == 'True': + index = findiforestOutlier(self.data[self.numeric_feature]) + inlier_indices = np.logical_and(inlier_indices, index) + self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):') + if inlier_indices.sum() != len(self.data): + self.remove_rows( inlier_indices == False, 'outlier detection') + self.log.info('Status:- |... Outlier treatment done') + self.log.info(f'-------> Data Frame Shape After Outlier treatment (Rows,Columns): {self.data.shape}') + + def remove_rows(self, indices, msg=''): + if indices.sum(): + indices = ~indices + if len(indices) != len(self.data): + raise ValueError('Data Frame length mismatch') + self.data = self.data[indices] + self.data.reset_index(drop=True, inplace=True) + if self.is_target_available(): + self.target = self.target[indices] + if isinstance(self.target, pd.Series): + self.target.reset_index(drop=True, inplace=True) + if not self.unprocessed.empty: + self.unprocessed = self.unprocessed[indices] + self.unprocessed.reset_index(drop=True, inplace=True) + self.log.info(f'-------> {msg} dropped rows count: {(indices == False).sum()}') + + def fill_outlier_method(self,colm, method): + if colm in self.numeric_feature: + if method in supported_method['outlier_column_wise']: + if 'outlier' not in self.process_method.keys(): + self.process_method['outlier'] = {} + if method != 'Disable': + self.process_method['outlier'][colm] = method + else: + self.log.info(f""-------> outlier detection method '{method}' is not supported for column wise. supported methods are {supported_method['outlier_column_wise']}"") + + def fill_outlier_process(self,colm, method): + if colm in self.numeric_feature: + if method in supported_method['outlierOperation']: + if 'outlierOperation' not in self.process_method.keys(): + self.process_method['outlierOperation'] = {} + self.process_method['outlierOperation'][colm] = method + else: + self.log.info(f""-------> outlier process method '{method}' is not supported for column wise. supported methods are {supported_method['outlieroperation']}"") + + def get_cat_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_cat_encoder(self,method): + if method == 'labelencoding': + return OrdinalEncoder(handle_unknown=""error"") + elif method == 'onehotencoding': + return OneHotEncoder(sparse=False,handle_unknown=""error"") + elif method == 'targetencoding': + if not self.is_target_available(): + raise ValueError('Can not apply Target Encoding when target feature is not present') + return TargetEncoder(handle_unknown='error') + + def get_num_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'mean': + return SimpleImputer(strategy='mean') + elif method == 'median': + return SimpleImputer(strategy='median') + elif method == 'knnimputer': + return KNNImputer() + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_num_scaler(self,method): + if method == 'minmax': + return MinMaxScaler() + elif method == 'standardscaler': + return StandardScaler() + elif method == 'lognormal': + return PowerTransformer(method='yeo-johnson', standardize=False) + """""" + return self.codeText + + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class register(): + + def __init__(self, importer, indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.codeText = """" + self.function_code = """" + self.importer = importer + self.input_files = {} + self.output_files = {} + self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','model' : 'model.pkl', 'performance': 'performance.json','production':'production.json','monitor':'monitoring.json'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def code_imports(self): + modules = [{'module':'sys'} + ,{'module':'json'} + " +",{'module':'time'} + ,{'module':'platform'} + ,{'module':'tempfile'} + ,{'module':'sqlite3'} + ,{'module':'mlflow'} + ,{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'ViewType', 'mod_from':'mlflow.entities'} + ,{'module':'MlflowClient', 'mod_from':'mlflow.tracking'} + ,{'module':'ModelVersionStatus', 'mod_from':'mlflow.entities.model_registry.model_version_status'} + ] + self.import_modules(modules) + + def import_module(self, module, mod_from=None, mod_as=None): + self.importer.addModule(module, mod_from=mod_from, mod_as=mod_as) + + def import_modules(self, modules): + if isinstance(modules, list): + for mod in modules: + if isinstance(mod, dict): + self.importer.addModule(mod['module'], mod_from= mod.get('mod_from', None), mod_as=mod.get('mod_as', None)) + + def getImportCode(self): + return self.importer.getCode() + + def __addValidateConfigCode(self, models=None): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n return config\\ + "" + return text + + def addLocalFunctionsCode(self, models): + self.function_code += self.__addValidateConfigCode(models) + + def addPrefixCode(self, indent=1): + self.code_imports() + self.codeText += ""\\n\\ + \\ndef __merge_logs(log_file_sequence,path, files):\\ + \\n if log_file_sequence['first'] in files:\\ + \\n with open(path/log_file_sequence['first'], 'r') as f:\\ + \\n main_log = f.read()\\ + \\n files.remove(log_file_sequence['first'])\\ + \\n for file in files:\\ + \\n with open(path/file, 'r') as f:\\ + \\n main_log = main_log + f.read()\\ + \\n (path/file).unlink()\\ + \\n with open(path/log_file_sequence['merged'], 'w') as f:\\ + \\n f.write(main_log)\\ + \\n\\ + \\ndef merge_log_files(folder, models):\\ + \\n log_file_sequence = {\\ + \\n 'first': 'aion.log',\\ + \\n 'merged': 'aion.log'\\ + \\n }\\ + \\n log_file_suffix = '_aion.log'\\ + \\n log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()]\\ + \\n log_files.append(log_file_sequence['first'])\\ + \\n __merge_logs(log_file_sequence, folder, log_files)\\ + \\n\\ + \\ndef register_model(targetPath,models,usecasename, meta_data):\\ + \\n register = mlflow_register(targetPath, usecasename, meta_data)\\ + \\n register.setup_registration()\\ + \\n\\ + \\n runs_with_score = register.get_unprocessed_runs(models)\\ + \\n best_run = register.get_best_run(runs_with_score)\\ + \\n register.update_unprocessed(runs_with_score)\\ + \\n return register.register_model(models, best_run)\\ + \\n\\ + \\ndef register(log):\\ + \\n config = validateConfig()\\ + \\n targetPath = Path('aion')/config['targetPath']\\ + \\n models = config['models']\\ + \\n merge_log_files(targetPath, models)\\ + \\n meta_data_file = targetPath/IOFiles['metaData']\\ + \\n if meta_data_file.exists():\\ + \\n meta_data = read_json(meta_data_file)\\ + \\n else:\\ + \\n raise ValueError(f'Configuration file not found: {meta_data_file}')\\ + \\n usecase = config['targetPath']\\ + \\n # enable logging\\ + \\n log_file = targetPath/IOFiles['log']\\ + \\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\ + \\n register_model_name = register_model(targetPath,models,usecase, meta_data)\\ + \\n status = {'Status':'Success','Message':f'Model Registered: {register_model_name}'}\\ + \\n log.info(f'output: {status}')\\ + \\n return json.dumps(status)"" + + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'sys'} + ,{'module':'os'} + ,{'module':'json'} + ,{'module':'logging'} + ,{'module':'shutil'} + ,{'module':'argparse'} + ] + return modules + + def addMainCode(self, models, indent=1): + self.codeText += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n log = None\\ + \\n try:\\ + \\n print(register(log))\\ + \\n except Exception as e:\\ + \\n if log:\\ + \\n log.error(e, exc_info=True)\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print(json.dumps(status))"" + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + def query_with_quetes_code(self, decs=True, indent=1): + return """"""\\n{first_indentation}def __get_unprocessed_runs_sorted(self): +{indentation}query = ""tags.processed = 'no'"" +{indentation}runs = self.client.search_runs( +{indentation} experiment_ids=self.experiment_id, +{indentation} filter_string=query, +{indentation} run_view_type=ViewType.ACTIVE_ONLY, +{indentation} order_by=['metrics.test_score {0}'] +{indentation}) +{indentation}return runs\\n"""""".format('DESC' if decs else 'ASC', first_indentation=indent*self.tab, indentation=(1+indent)*self.tab) + + + def addClassCode(self, smaller_is_better=False): + self.codeText += ""\\ + \\nclass mlflow_register():\\ + \\n\\ + \\n def __init__(self, input_path, model_name, meta_data):\\ + \\n self.input_path = Path(input_path).resolve()\\ + \\n self.model_name = model_name\\ + \\n self.meta_data = meta_data\\ + \\n self.logger = logging.getLogger('ModelRegister')\\ + \\n self.client = None\\ + \\n self.monitoring_data = read_json(self.input_path/IOFiles['monitor'])\\ + \\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\\ + \\n if not self.monitoring_data.get('mlflow_config',False):\\ + \\n self.monitoring_data['mlflow_config'] = mlflow_default_config\\ + \\n\\ + \\n def setup_registration(self):\\ + \\n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(self.monitoring_data['mlflow_config'],self.input_path)\\ + \\n self.logger.info(f'MLflow tracking uri: {tracking_uri}')\\ + \\n self.logger.info(f'MLflow registry uri: {registry_uri}')\\ + \\n mlflow.set_tracking_uri(tracking_uri)\\ + \\n mlflow.set_registry_uri(registry_uri)\\ + \\n self.client = mlflow.tracking.MlflowClient(\\ + \\n tracking_uri=tracking_uri,\\ + \\n registry_uri=registry_uri,\\ + \\n )\\ + \\n self.experiment_id = self.client.get_experiment_by_name(self.model_name).experiment_id\\ + \\n"" + self.codeText += self.query_with_quetes_code(smaller_is_better == False) + self.codeText += ""\\ + \\n def __log_unprocessed_runs(self, runs):\\ + \\n self.logger.info('Unprocessed runs:')\\ + \\n for run in runs:\\ + \\n self.logger.info(' {}: {}'.format(run.info.run_id,run.data.metrics['test_score']))\\ + \\n\\ + \\n def get_unprocessed_runs(self, model_path):\\ + \\n unprocessed_runs = self.__get_unprocessed_runs_sorted()\\ + \\n if not unprocessed_runs:\\ + \\n raise ValueError('Registering fail: No new trained model')\\ + \\n self.__log_unprocessed_runs( unprocessed_runs)\\ + \\n return unprocessed_runs\\ + \\n\\ + \\n def __wait_until_ready(self, model_name, model_version):\\ + \\n client = MlflowClient()\\ + \\n for _ in range(10):\\ + \\n model_version_details = self.client.get_model_version(\\ + \\n name=model_name,\\ + \\n version=model_version,\\ + \\n )\\ + \\n status = ModelVersionStatus.from_string(model_version_details.status)\\ + \\n if status == ModelVersionStatus.READY:\\ + \\n break\\ + \\n time.sleep(1)\\ + \\n\\ + \\n def __create_model(self, run):\\ + \\n artifact_path = 'model'\\ + \\n model_uri = 'runs:/{run_id}/{artifact_path}'.format(run_id=run.info.run_id, artifact_path=artifact_path)\\ + \\n self.logger.info(f'Registering model (run id): {run.info.run_id}')\\ + \\n model_details = mlflow.register_model(model_uri=model_uri, name=self.model_name)\\ + \\n self.__wait_until_ready(model_details.name, model_details.version)\\ + \\n self.client.set_tag(run.info.run_id, 'registered', 'yes' )\\ + \\n state_transition = self.client.transition_model_version_stage(\\ + \\n name=model_details.name,\\ + \\n version=model_details.version,\\ + \\n stage='Production',\\ + \\n )\\ + \\n self.logger.info(state_transition)\\ + \\n return model_details\\ + \\n\\ + \\n def get_best_run(self, models):\\ + \\n return models[0]\\ + \\n\\ + \\n def __validate_config(self):\\ + \\n try:\\ + \\n load_data_loc = self.meta_data['load_data']['Status']['DataFilePath']\\ + \\n except KeyError:\\ + \\n raise ValueError('DataIngestion step output is corrupted')\\ + \\n\\ + \\n def __mlflow_log_transformer_steps(self, best_run):\\ + \\n run_id = best_run.info.run_id\\ + \\n meta_data = read_json(self.input_path/(best_run.data.tags['mlflow.runName']+'_'+IOFiles['metaData']))\\ + \\n self.__validate_config()\\ + \\n with mlflow.start_run(run_id):\\ + \\n if 'transformation' in meta_data.keys():\\ + \\n if 'target_encoder' in meta_data['transformation'].keys():\\ + \\n source_loc = meta_data['transformation']['target_encoder']\\ + \\n mlflow.log_artifact(str(self.input_path/source_loc))\\ + \\n meta_data['transformation']['target_encoder'] = Path(source_loc).name\\ + \\n if 'preprocessor' in meta_data['transformation'].keys():\\ + \\n source_loc = meta_data['transformation']['preprocessor']\\ + \\n mlflow.log_artifact(str(self.input_path/source_loc))\\ + \\n meta_data['transformation']['preprocessor'] = Path(source_loc).name\\ + \\n\\ + \\n write_json(meta_data, self.input_path/IOFiles['metaData'])\\ + \\n mlflow.log_artifact(str(self.input_path/IOFiles['metaData']))\\ + \\n\\ + \\n def __update_processing_tag(self, processed_runs):\\ + \\n self.logger.info('Changing status to processed:')\\ + \\n for run in processed_runs:\\ + \\n self.client.set_tag(run.info.run_id, 'processed', 'yes')\\ + \\n self.logger.info(f' run id: {run.info.run_id}')\\ + \\n\\ + \\n def update_unprocessed(self, runs):\\ + \\n return self.__update_processing_tag( runs)\\ + \\n\\ + \\n def __force_register(self, best_run):\\ + \\n self.__create_model( best_run)\\ + \\n self.__mlflow_log_transformer_steps( best_run)\\ + \\n production_json = self.input_path/IOFiles['production']\\ + \\n production_model = {'Model':best_run.data.tags['mlflow.runName'],'runNo':self.monitoring_data['runNo'],'score':best_run.data.metrics['test_score']}\\ + \\n write_json(production_model, production_json)\\ + \\n database_path = self.input_path/(self.input_path.stem + '.db')\\ + \\n if database_path.exists():\\ + \\n database_path.unlink()\\ + \\n return best_run.data.tags['mlflow.runName']\\ + \\n\\ + \\n def __get_register_model_score(self):\\ + \\n reg = self.client.list_registered_models()\\ + \\n if not reg:\\ + \\n return '', 0\\ + \\n run_id = reg[0].latest_versions[0].run_id\\ + \\n run = self.client.get_run(run_id)\\ + \\n score = run.data.metrics['test_score']\\ + \\n return run_id, score\\ + \\n\\ + \\n def register_model(self, models, best_run):\\ + \\n return self.__force_register(best_run)"" + + def local_functions_code(self, smaller_is_" +"better=True, indent=1): + if smaller_is_better: + min_max = 'min' + else: + min_max = 'max' + self.codeText += ""\\ndef validate_config(deploy_dict):\\ + \\n try:\\ + \\n load_data_loc = deploy_dict['load_data']['Status']['DataFilePath']\\ + \\n except KeyError:\\ + \\n raise ValueError('DataIngestion step output is corrupted')\\ + \\n\\ + \\ndef get_digest(fname):\\ + \\n import hashlib\\ + \\n hash_algo = hashlib.sha256()\\ + \\n with open(fname, 'rb') as f:\\ + \\n for chunk in iter(lambda: f.read(2 ** 20), b''):\\ + \\n hash_algo.update(chunk)\\ + \\n return hash_algo.hexdigest()\\ + \\n"" + + def getCode(self, indent=1): + return self.function_code + '\\n' + self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +from .imports import importModule + +utility_functions = { +'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +} + +#TODO convert read and write functions in to class functions +functions_code = { + 'read_json':{'imports':[{'mod':'json'}],'code':""\\n\\ + \\ndef read_json(file_path):\\ + \\n data = None\\ + \\n with open(file_path,'r') as f:\\ + \\n data = json.load(f)\\ + \\n return data\\ + \\n""}, + 'write_json':{'imports':[{'mod':'json'}],'code':""\\n\\ + \\ndef write_json(data, file_path):\\ + \\n with open(file_path,'w') as f:\\ + \\n json.dump(data, f)\\ + \\n""}, + 'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':""\\n\\ + \\ndef read_data(file_path, encoding='utf-8', sep=','):\\ + \\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\ + \\n""}, + 'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':""\\n\\ + \\ndef write_data(data, file_path, index=False):\\ + \\n return data.to_csv(file_path, index=index)\\ + \\n\\ + \\n#Uncomment and change below code for google storage\\ + \\n#from google.cloud import storage\\ + \\n#def write_data(data, file_path, index=False):\\ + \\n# file_name= file_path.name\\ + \\n# data.to_csv('output_data.csv')\\ + \\n# storage_client = storage.Client()\\ + \\n# bucket = storage_client.bucket('aion_data')\\ + \\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\ + \\n# return data\\ + \\n""}, + 'is_file_name_url':{'imports':[],'code':""\\n\\ + \\ndef is_file_name_url(file_name):\\ + \\n supported_urls_starts_with = ('gs://','https://','http://')\\ + \\n return file_name.startswith(supported_urls_starts_with)\\ + \\n""}, + 'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':""\\n\\ + \\nclass logger():\\ + \\n #setup the logger\\ + \\n def __init__(self, log_file, mode='w', logger_name=None):\\ + \\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\ + \\n self.log = logging.getLogger(logger_name)\\ + \\n\\ + \\n #get logger\\ + \\n def getLogger(self):\\ + \\n return self.log\\ + \\n\\ + \\n def info(self, msg):\\ + \\n self.log.info(msg)\\ + \\n\\ + \\n def error(self, msg, exc_info=False):\\ + \\n self.log.error(msg,exc_info)\\ + \\n\\ + \\n # format and log dataframe\\ + \\n def log_dataframe(self, df, rows=2, msg=None):\\ + \\n buffer = io.StringIO()\\ + \\n df.info(buf=buffer)\\ + \\n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\\ + \\n log_text += '\\\\n\\\\t'+str(df.head(rows)).replace('\\\\n','\\\\n\\\\t')\\ + \\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\ + \\n self.log.info(log_text)\\ + \\n""}, +} + +class utility_function(): + + def __init__(self, module): + if module in utility_functions.keys(): + self.module_name = module + else: + self.module_name = None + self.importer = importModule() + self.codeText = """" + + def get_code(self): + code = """" + if self.module_name: + functions = utility_functions[self.module_name] + for function in functions: + self.codeText += self.get_function_code(function) + code = self.importer.getCode() + code += self.codeText + return code + + def get_function_code(self, name): + code = """" + if name in functions_code.keys(): + code += functions_code[name]['code'] + if self.importer: + if 'imports' in functions_code[name].keys(): + for module in functions_code[name]['imports']: + mod_name = module['mod'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + return code + + def get_importer(self): + return self.importer + +if __name__ == '__main__': + obj = utility_function('load_data') + p = obj.get_utility_code() + print(p) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from .imports import importModule +from .load_data import tabularDataReader +from .transformer import transformer as profiler +from .transformer import data_profiler +from .selector import selector +from .trainer import learner +from .register import register +from .deploy import deploy +from .drift_analysis import drift +from .functions import global_function +from .data_reader import data_reader +from .utility import utility_function + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class deploy(): + + def __init__(self, target_encoder=False, feature_reducer=False, score_smaller_is_better=True, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = ""\\n\\n\\ +\\nclass deploy():\\ +\\n\\ +\\n def __init__(self, base_config, log=None):\\ + \\n self.targetPath = (Path('aion')/base_config['targetPath']).resolve()\\ + \\n if log:\\ + \\n self.logger = log\\ + \\n else:\\ + \\n log_file = self.targetPath/IOFiles['log']\\ + \\n self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\ + \\n try:\\ + \\n self.initialize(base_config)\\ + \\n except Exception as e:\\ + \\n self.logger.error(e, exc_info=True)\\ + \\n\\ +\\n def initialize(self, base_config):\\ + \\n self.usecase = base_config['targetPath']\\ + \\n monitoring_data = read_json(self.targetPath/IOFiles['monitor'])\\ + \\n self.prod_db_type = monitoring_data['prod_db_type']\\ + \\n self.db_config = monitoring_data['db_config']\\ + \\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\\ + \\n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(monitoring_data.get('mlflow_config',mlflow_default_config), self.targetPath)\\ + \\n mlflow.tracking.set_tracking_uri(tracking_uri)\\ + \\n mlflow.tracking.set_registry_uri(registry_uri)\\ + \\n client = mlflow.tracking.MlflowClient()\\ + \\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )\\ + \\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\ + \\n self.model = mlflow.pyfunc.load_model(model_version_uri)\\ + \\n run = client.get_run(self.model.metadata.run_id)\\ + \\n if run.info.artifact_uri.startswith('file:'): #remove file:///\\ + \\n skip_name = 'file:'\\ + \\n if run.info.artifact_uri.startswith('file:///'):\\ + \\n skip_name = 'file:///'\\ + \\n self.artifact_path = Path(run.info.artifact_uri[len(skip_name) : ])\\ + \\n self.artifact_path_type = 'file'\\ + \\n meta_data = read_json(self.artifact_path/IOFiles['metaData'])\\ + \\n else:\\ + \\n self.artifact_path = run.info.artifact_uri\\ + \\n self.artifact_path_type = 'url'\\ + \\n meta_data_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+IOFiles['metaData'])\\ + \\n meta_data = read_json(meta_data_file)\\ + \\n self.selected_features = meta_data['load_data']['selected_features']\\ + \\n self.train_features = meta_data['training']['features']"" + if target_encoder: + self.codeText += ""\\ + \\n if self.artifact_path_type == 'url':\\ + \\n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\\ + \\n target_encoder_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['target_encoder'])\\ + \\n else:\\ + \\n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']\\ + \\n target_encoder_file = self.artifact_path/meta_data['transformation']['target_encoder']\\ + \\n self.target_encoder = joblib.load(target_encoder_file)"" + else: + self.codeText += ""\\ + \\n if self.artifact_path_type == 'url':\\ + \\n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\\ + \\n else:\\ + \\n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']"" + self.codeText += ""\\ + \\n self.preprocessor = joblib.load(preprocessor_file)\\ + \\n self.preprocess_out_columns = meta_data['transformation']['preprocess_out_columns']\\ + "" + if feature_reducer: + self.codeText += ""\\ + \\n if self.artifact_path_type == 'url':\\ + \\n feature_reducer_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['featureengineering']['feature_reducer']['file'])\\ + \\n else:\\ + \\n feature_reducer_file = self.artifact_path/meta_data['featureengineering']['feature_reducer']['file']\\ + \\n self.feature_reducer = joblib.load(feature_reducer_file)\\ + \\n self.feature_reducer_cols = meta_data['featureengineering']['feature_reducer']['features']"" + self.codeText +=""\\n\\ +\\n def write_to_db(self, data):\\ +\\n prod_file = IOFiles['prodData']\\ +\\n writer = dataReader(reader_type=self.prod_db_type,target_path=self.targetPath, config=self.db_config )\\ +\\n writer.write(data, prod_file)\\ +\\n writer.close()\\ +\\n\\ +\\n def predict(self, data=None):\\ +\\n try:\\ +\\n return self.__predict(data)\\ +\\n except Exception as e:\\ +\\n if self.logger:\\ +\\n self.logger.error(e, exc_info=True)\\" +" +\\n raise ValueError(json.dumps({'Status':'Failure', 'Message': str(e)}))\\ +\\n\\ +\\n def __predict(self, data=None):\\ +\\n df = pd.DataFrame()\\ +\\n jsonData = json.loads(data)\\ +\\n df = pd.json_normalize(jsonData)\\ +\\n if len(df) == 0:\\ +\\n raise ValueError('No data record found')\\ +\\n missing_features = [x for x in self.selected_features if x not in df.columns]\\ +\\n if missing_features:\\ +\\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\\ +\\n df_copy = df.copy()\\ +\\n df = df[self.selected_features]\\ +\\n df = self.preprocessor.transform(df)\\ +\\n if isinstance(df, scipy.sparse.spmatrix):\\ +\\n df = df.toarray()\\ +\\n df = pd.DataFrame(df, columns=self.preprocess_out_columns)"" + if feature_reducer: + self.codeText += ""\\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"" + else: + self.codeText += ""\\n df = df[self.train_features]"" + if target_encoder: + self.codeText += ""\\n df = df.astype(np.float32)\\ + \\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\\ + \\n df_copy['prediction'] = output.idxmax(axis=1)\\ + \\n self.write_to_db(df_copy)\\ + \\n df_copy['probability'] = output.max(axis=1).round(2)\\ + \\n df_copy['remarks'] = output.apply(lambda x: x.to_json(), axis=1)\\ + \\n output = df_copy.to_json(orient='records')"" + else: + self.codeText += ""\\n output = self.model._model_impl.predict(df).reshape(1, -1)[0].round(2)\\ + \\n df_copy['prediction'] = output\\ + \\n self.write_to_db(df_copy)\\ + \\n output = df_copy.to_json(orient='records')"" + self.codeText += ""\\n return output"" + self.input_files = {} + self.output_files = {} + self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json', 'performance' : 'performance.json','monitor':'monitoring.json','log':'predict.log','prodData':'prodData'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + text += '\\n' + text += self.getOutputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def addStatement(self, statement, indent=1): + pass + + def getCode(self): + return self.codeText + + def getGroundtruthCode(self): + return """""" +import sys +import math +import json +import sqlite3 +import pandas as pd +from datetime import datetime +from pathlib import Path +import platform +from utility import * +from data_reader import dataReader + +IOFiles = { + ""monitoring"":""monitoring.json"", + ""prodDataGT"":""prodDataGT"" + } + +class groundtruth(): + + def __init__(self, base_config): + self.targetPath = Path('aion')/base_config['targetPath'] + data = read_json(self.targetPath/IOFiles['monitoring']) + self.prod_db_type = data['prod_db_type'] + self.db_config = data['db_config'] + + def actual(self, data=None): + df = pd.DataFrame() + jsonData = json.loads(data) + df = pd.json_normalize(jsonData) + if len(df) == 0: + raise ValueError('No data record found') + self.write_to_db(df) + status = {'Status':'Success','Message':'uploaded'} + return json.dumps(status) + + def write_to_db(self, data): + prod_file = IOFiles['prodDataGT'] + writer = dataReader(reader_type=self.prod_db_type, target_path=self.targetPath, config=self.db_config ) + writer.write(data, prod_file) + writer.close() + +"""""" + def getServiceCode(self): + return """""" + +from http.server import BaseHTTPRequestHandler,HTTPServer +from socketserver import ThreadingMixIn +import os +from os.path import expanduser +import platform +import threading +import subprocess +import argparse +import re +import cgi +import json +import shutil +import logging +import sys +import time +import seaborn as sns +from pathlib import Path +from predict import deploy +from groundtruth import groundtruth +import pandas as pd +import scipy.stats as st +import numpy as np +import warnings +from utility import * +from data_reader import dataReader + +warnings.filterwarnings(""ignore"") +config_input = None + +IOFiles = { + ""inputData"": ""rawData.dat"", + ""metaData"": ""modelMetaData.json"", + ""production"": ""production.json"", + ""log"": ""aion.log"", + ""monitoring"":""monitoring.json"", + ""prodData"": ""prodData"", + ""prodDataGT"":""prodDataGT"" +} + +def DistributionFinder(data): + try: + distributionName = """" + sse = 0.0 + KStestStatic = 0.0 + dataType = """" + if (data.dtype == ""float64"" or data.dtype == ""float32""): + dataType = ""Continuous"" + elif (data.dtype == ""int""): + dataType = ""Discrete"" + elif (data.dtype == ""int64""): + dataType = ""Discrete"" + if (dataType == ""Discrete""): + distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] + index, counts = np.unique(data.astype(int), return_counts=True) + + if (len(index) >= 2): + best_sse = np.inf + y1 = [] + total = sum(counts) + mean = float(sum(index * counts)) / total + variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) + dispersion = mean / float(variance) + theta = 1 / float(dispersion) + r = mean * (float(theta) / 1 - theta) + + for j in counts: + y1.append(float(j) / total) + + pmf1 = st.bernoulli.pmf(index, mean) + pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) + pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) + pmf4 = st.nbinom.pmf(index, mean, r) + pmf5 = st.poisson.pmf(index, mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1 - pmf5, 2.0)) + + sselist = [sse1, sse2, sse3, sse4, sse5] + best_distribution = 'NA' + for i in range(0, len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName = best_distribution + sse = best_sse + + elif (dataType == ""Continuous""): + + distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, + st.gamma, st.beta] + best_distribution = st.norm.name + best_sse = np.inf + datamin = data.min() + datamax = data.max() + nrange = datamax - datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + params = distribution.fit(data.astype(float)) + arg = params[:-2] + loc = params[-2] + scale = params[-1] + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if (best_sse > sse > 0): + best_distribution = distribution.name + best_sse = sse + distributionName = best_distribution + sse = best_sse + except: + response = str(sys.exc_info()[0]) + message = 'Job has Failed' + response + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + print(message) + return distributionName, sse + +def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()): + import matplotlib.pyplot as plt + import math + import io, base64, urllib + np.seterr(divide='ignore', invalid='ignore') + try: + plt.clf() + except: + pass + plt.rcParams.update({'figure.max_open_warning': 0}) + sns.set(color_codes=True) + pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + if len(feature) > 4: + numneroffeatures = len(feature) + plt.figure(figsize=(10, numneroffeatures*2)) + else: + plt.figure(figsize=(10,5)) + + for i in enumerate(feature): + + dataType = dataframe[i[1]].dtypes + if dataType not in pandasNumericDtypes: + dataframe[i[1]] = pd.Categorical(dataframe[i[1]]) + dataframe[i[1]] = dataframe[i[1]].cat.codes + dataframe[i[1]] = dataframe[i[1]].astype(int) + dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0]) + else: + dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean()) + + plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1) + plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1) + distname, sse = DistributionFinder(dataframe[i[1]]) + print(distname) + ax = sns.distplot(dataframe[i[1]], label=distname) + ax.legend(loc='best') + if newdataframe.empty == False: + dataType = newdataframe[i[1]].dtypes + if dataType not in pandasNumericDtypes: + newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]]) + newdataframe[i[1]] = newdataframe[i[1]].cat.codes + newdataframe[i[1]] = newdataframe[i[1]].astype(int) + newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0]) + else: + newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean()) + distname, sse = DistributionFinder(newdataframe[i[1]]) + print(distname) + ax = sns.distplot(newdataframe[i[1]],label=distname) + ax.legend(loc='best') + buf = io.BytesIO() + plt.savefig(buf, format='png') + buf.seek(0) + string = base64.b64encode(buf.read()) + uri = urllib.parse.quote(string) + return uri + +def read_json(file_path): + data = None + with open(file_path,'r') as f: + data = json.load(f) + return data + +class HTTPRequestHandler(BaseHTTPRequestHandler): + + def do_POST(self): + print('PYTHON ######## REQUEST ####### STARTED') + if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + usecase = self.path.split('/')[-2] + if usecase.lower() == config_input['targetPath'].lower(): + operation = self.path.split('/')[-1] + data = json.loads(data) + dataStr = json.dumps(data) + if operation.lower() == 'predict': + output=deployobj.predict(dataStr) + resp = output + elif operation" +".lower() == 'groundtruth': + gtObj = groundtruth(config_input) + output = gtObj.actual(dataStr) + resp = output + elif operation.lower() == 'delete': + targetPath = Path('aion')/config_input['targetPath'] + for file in data: + x = targetPath/file + if x.exists(): + os.remove(x) + resp = json.dumps({'Status':'Success'}) + else: + outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'}) + resp = outputStr + else: + outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'}) + resp = outputStr + + else: + outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'}) + resp = outputStr + resp=resp+'\\\\n' + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print('python ==> else1') + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + print('PYTHON ######## REQUEST ####### ENDED') + return + + def do_GET(self): + print('PYTHON ######## REQUEST ####### STARTED') + if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): + usecase = self.path.split('/')[-2] + self.send_response(200) + self.targetPath = Path('aion')/config_input['targetPath'] + meta_data_file = self.targetPath/IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {meta_data_file}') + production_file = self.targetPath/IOFiles['production'] + if production_file.exists(): + production_data = read_json(production_file) + else: + raise ValueError(f'Production Details not found: {production_file}') + operation = self.path.split('/')[-1] + if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'): + self.send_header('Content-Type', 'text/html') + self.end_headers() + ModelString = production_data['Model'] + ModelPerformance = ModelString+'_performance.json' + performance_file = self.targetPath/ModelPerformance + if performance_file.exists(): + performance_data = read_json(performance_file) + else: + raise ValueError(f'Production Details not found: {performance_data}') + Scoring_Creteria = performance_data['scoring_criteria'] + train_score = round(performance_data['metrices']['train_score'],2) + test_score = round(performance_data['metrices']['test_score'],2) + current_score = 'NA' + monitoring = read_json(self.targetPath/IOFiles['monitoring']) + reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config']) + inputDatafile = self.targetPath/IOFiles['inputData'] + NoOfPrediction = 0 + NoOfGroundTruth = 0 + inputdistribution = '' + if reader.file_exists(IOFiles['prodData']): + dfPredict = reader.read(IOFiles['prodData']) + dfinput = pd.read_csv(inputDatafile) + features = meta_data['training']['features'] + inputdistribution = getDriftDistribution(features,dfinput,dfPredict) + NoOfPrediction = len(dfPredict) + if reader.file_exists(IOFiles['prodDataGT']): + dfGroundTruth = reader.read(IOFiles['prodDataGT']) + NoOfGroundTruth = len(dfGroundTruth) + common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()] + proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner') + if Scoring_Creteria.lower() == 'accuracy': + from sklearn.metrics import accuracy_score + current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction']) + current_score = round((current_score*100),2) + elif Scoring_Creteria.lower() == 'recall': + from sklearn.metrics import accuracy_score + current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro') + current_score = round((current_score*100),2) + msg = \\"""""" + +Performance Details + + + +

Deployed Model:{ModelString}

+
+ + + + + + + + + +
No of Prediction{NoOfPrediction}
No of GroundTruth{NoOfGroundTruth}
+
+ + + + + + + + + + + + + +
Score TypeTrain ScoreTest ScoreProduction Score
{Scoring_Creteria}{train_score}{test_score}{current_score}
+
+
+ + + +\\"""""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution) + elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'): + self.send_header('Content-Type', 'text/plain') + self.end_headers() + log_file = self.targetPath/IOFiles['log'] + if log_file.exists(): + with open(log_file) as f: + msg = f.read() + f.close() + else: + raise ValueError(f'Log Details not found: {log_file}') + else: + self.send_header('Content-Type', 'application/json') + self.end_headers() + features = meta_data['load_data']['selected_features'] + bodydes='[' + for x in features: + if bodydes != '[': + bodydes = bodydes+',' + bodydes = bodydes+'{""'+x+'"":""value""}' + bodydes+=']' + urltext = '/AION/'+config_input['targetPath']+'/predict' + urltextgth='/AION/'+config_input['targetPath']+'/groundtruth' + urltextproduction='/AION/'+config_input['targetPath']+'/metrices' + msg=\\"""""" +Version:{modelversion} +RunNo: {runNo} +URL for Prediction +================== +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} +Output: prediction,probability(if Applicable),remarks corresponding to each row. + +URL for GroundTruth +=================== +URL:{urltextgth} +RequestType: POST +Content-Type=application/json +Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work + +URL for Model In Production Analysis +==================================== +URL:{urltextproduction} +RequestType: GET +Content-Type=application/json + +\\"""""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes) + self.wfile.write(msg.encode()) + else: + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + return + +class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): + allow_reuse_address = True + + def shutdown(self): + self.socket.close() + HTTPServer.shutdown(self) + +class file_status(): + + def __init__(self, reload_function, params, file, logger): + self.files_status = {} + self.initializeFileStatus(file) + self.reload_function = reload_function + self.params = params + self.logger = logger + + def initializeFileStatus(self, file): + self.files_status = {'path': file, 'time':file.stat().st_mtime} + + def is_file_changed(self): + if self.files_status['path'].stat().st_mtime > self.files_status['time']: + self.files_status['time'] = self.files_status['path'].stat().st_mtime + return True + return False + + def run(self): + global config_input + while( True): + time.sleep(30) + if self.is_file_changed(): + production_details = targetPath/IOFiles['production'] + if not production_details.exists(): + raise ValueError(f'Model in production details does not exist') + productionmodel = read_json(production_details) + config_file = Path(__file__).parent/'config.json' + if not Path(config_file).exists(): + raise ValueError(f'Config file is missing: {config_file}') + config_input = read_json(config_file) + config_input['deployedModel'] = productionmodel['Model'] + config_input['deployedRunNo'] = productionmodel['runNo'] + self.logger.info('Model changed Reloading.....') + self.logger.info(f'Model: {config_input[""deployedModel""]}') + self.logger.info(f'Version: {str(config_input[""modelVersion""])}') + self.logger.info(f'runNo: {str(config_input[""deployedRunNo""])}') + self.reload_function(config_input) + +class SimpleHttpServer(): + def __init__(self, ip, port, model_file_path,reload_function,params, logger): + self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) + self.status_checker = file_status( reload_function, params, model_file_path, logger) + + def start(self): + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.daemon = True + self.server_thread.start() + self.status_thread = threading.Thread(target=self.status_checker.run) + self.status_thread.start() + + def waitForThread(self): + self.server_thread.join() + self.status_thread.join() + + def stop(self): + self.server.shutdown() + self.waitForThread() + +if __name__=='__main__': + parser = argparse.ArgumentParser(description='HTTP Server') + parser.add_argument('-ip','--ipAddress', help='HTTP Server IP') + parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server') + args = parser.parse_args() + config_file = Path(__file__).parent/'config.json' + if not Path(config_file).exists(): + raise ValueError(f'Config file is missing: {config_file}') + config = read_json(config_file) + if args.ipAddress: + config['ipAddress'] = args.ipAddress + if args.portNo: + config['portNo'] = args.portNo + targetPath = Path('aion')/config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + production_details = targetPath/IOFiles['production'] + if not production_details.exists(): + raise ValueError(f'Model in production details does not exist') + productionmodel = read_json(production_details) + config['deployedModel'] = productionmodel['Model'] + config['deployedRunNo'] = productionmodel['runNo'] + #server = SimpleHttpServer(config['ipAddress'],int(config['portNo'])) + config_input = config + logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S') " +" + logger = logging.getLogger(Path(__file__).parent.name) + deployobj = deploy(config_input, logger) + server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger) + logger.info('HTTP Server Running...........') + logger.info(f""IP Address: {config['ipAddress']}"") + logger.info(f""Port No.: {config['portNo']}"") + print('HTTP Server Running...........') + print('For Prediction') + print('================') + print('Request Type: Post') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/predict') + print('\\\\nFor GroundTruth') + print('================') + print('Request Type: Post') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/groundtruth') + print('\\\\nFor Help') + print('================') + print('Request Type: Get') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/help') + print('\\\\nFor Model In Production Analysis') + print('================') + print('Request Type: Get') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/metrices') + server.start() + server.waitForThread() +"""""" """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +class global_function(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = """" + self.available_functions = { + 'iqr':{'name':'iqrOutlier','code':f""\\n\\ndef iqrOutlier(df):\\ + \\n{self.tab}Q1 = df.quantile(0.25)\\ + \\n{self.tab}Q3 = df.quantile(0.75)\\ + \\n{self.tab}IQR = Q3 - Q1\\ + \\n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\\ + \\n{self.tab}return index""}, + 'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f""\\n\\ndef zscoreOutlier(df):\\ + \\n{self.tab}z = numpy.abs(stats.zscore(df))\\ + \\n{self.tab}index = (z < 3).all(axis=1)\\ + \\n{self.tab}return index""}, + 'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f""\\n\\ndef iforestOutlier(df):\\ + \\n{self.tab}from sklearn.ensemble import IsolationForest\\ + \\n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\\ + \\n{self.tab}isolation_forest.fit(df)\\ + \\n{self.tab}y_pred_train = isolation_forest.predict(df)\\ + \\n{self.tab}return y_pred_train == 1""}, + 'minMaxImputer':{'name':'minMaxImputer','code':f""\\n\\nclass minMaxImputer(TransformerMixin):\\ + \\n{self.tab}def __init__(self, strategy='max'):\\ + \\n{self.tab}{self.tab}self.strategy = strategy\\ + \\n{self.tab}def fit(self, X, y=None):\\ + \\n{self.tab}{self.tab}self.feature_names_in_ = X.columns\\ + \\n{self.tab}{self.tab}if self.strategy == 'min':\\ + \\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\\ + \\n{self.tab}{self.tab}else:\\ + \\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\\ + \\n{self.tab}{self.tab}return self\\ + \\n{self.tab}def transform(self, X):\\ + \\n{self.tab}{self.tab}import numpy\\ + \\n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)""}, + 'DummyEstimator':{'name':'DummyEstimator','code':f""\\n\\nclass DummyEstimator(BaseEstimator):\\ + \\n{self.tab}def fit(self): pass\\ + \\n{self.tab}def score(self): pass""}, + 'start_reducer':{'name':'start_reducer','imports':[{'mod':'itertools'},{'mod':'numpy','mod_as':'np'},{'mod':'pandas','mod_as':'pd'},{'mod':'VarianceThreshold','mod_from':'sklearn.feature_selection'}], 'code':"""""" +def start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05): + + qconstantColumns = [] + train_features = df.columns.tolist() + train_features.remove(target_feature) + df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature + numeric_features = df.select_dtypes(include='number').columns.tolist() + non_numeric_features = df.select_dtypes(exclude='number').columns.tolist() + if numeric_features and var_threshold: + qconstantFilter = VarianceThreshold(threshold=var_threshold) + tempDf=df[numeric_features] + qconstantFilter.fit(tempDf) + qconstantColumns = [column for column in numeric_features if column not in tempDf.columns[qconstantFilter.get_support()]] + if target_feature in qconstantColumns: + qconstantColumns.remove(target_feature) + numeric_features = list(set(numeric_features) - set(qconstantColumns)) + if numeric_features: + numColPairs = list(itertools.product(numeric_features, numeric_features)) + for item in numColPairs: + if(item[0] == item[1]): + numColPairs.remove(item) + tempArray = [] + for item in numColPairs: + tempCorr = np.abs(df[item[0]].corr(df[item[1]])) + if(tempCorr > corr_threshold): + tempArray.append(item[0]) + tempArray = np.unique(tempArray).tolist() + nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray)) + groupedFeatures = [] + if tempArray: + corrDic = {} + for feature in tempArray: + temp = [] + for col in tempArray: + tempCorr = np.abs(df[feature].corr(df[col])) + temp.append(tempCorr) + corrDic[feature] = temp + #Similar correlation df + corrDF = pd.DataFrame(corrDic,index = tempArray) + corrDF.loc[:,:] = np.tril(corrDF, k=-1) + alreadyIn = set() + similarFeatures = [] + for col in corrDF: + perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist() + if perfectCorr and col not in alreadyIn: + alreadyIn.update(set(perfectCorr)) + perfectCorr.append(col) + similarFeatures.append(perfectCorr) + updatedSimFeatures = [] + for items in similarFeatures: + if(target_feature != '' and target_feature in items): + for p in items: + updatedSimFeatures.append(p) + else: + updatedSimFeatures.append(items[0]) + newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols)) + updatedFeatures = list(set(newTempFeatures + non_numeric_features)) + else: + updatedFeatures = list(set(df.columns) -set(qconstantColumns)) + else: + updatedFeatures = list(set(df.columns) -set(qconstantColumns)) + return updatedFeatures + """"""}, + 'feature_importance_class':{'name':'feature_importance_class','code':""\\n\\ + \\ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\\ + \\n import pandas as pd\\ + \\n from sklearn.feature_selection import chi2\\ + \\n from sklearn.feature_selection import f_classif\\ + \\n from sklearn.feature_selection import mutual_info_classif\\ + \\n \\ + \\n impFeatures = []\\ + \\n if cat_features:\\ + \\n categoricalData=df[cat_features]\\ + \\n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\\ + \\n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\\ + \\n impFeatures.append(corrSeries[corrSeriescorrTh].index.tolist())\\ + \\n pearsonScore=df.corr() \\ + \\n targetPScore=abs(pearsonScore[target_feature])\\ + \\n impFeatures.append(targetPScore[targetPScorecorrTh].index.tolist())\\ + \\n pearsonScore=df.corr()\\ + \\n targetPScore=abs(pearsonScore[target_feature])\\ + \\n impFeatures.append(targetPScore[targetPScore 2):\\ + \\n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\\ + \\n else:\\ + \\n class_type = 'binary_class' if class_count == 2 else 'multi_class'\\ + \\n if score_param in scorer_mapping.keys():\\ + \\n score_param = scorer_mapping[score_param][class_type]\\ + \\n else:\\ + \\n score_param = 'accuracy'\\ + \\n return score_param""}, + 'log_dataframe':{'name':'log_dataframe','code':f""\\n\\ + \\ndef log_dataframe(df, msg=None):\\ + \\n import io\\ + \\n buffer = io.StringIO()\\ + \\n df.info(buf=buffer)\\ + \\n if msg:\\ + \\n log_text = f'Data frame after {{msg}}:'\\ + \\n else:\\ + \\n log_text = 'Data frame:'\\ + \\n log_text += '\\\\n\\\\t'+str(df.head(2)).replace('\\\\n','\\\\n\\\\t')\\ + \\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\ + \\n get_logger().info(log_text)""}, + 'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':""\\n\\ + \\nclass BayesSearchCV():\\ + \\n\\ + \\n def __init__(self, estimator, params" +", scoring, n_iter, cv):\\ + \\n self.estimator = estimator\\ + \\n self.params = params\\ + \\n self.scoring = scoring\\ + \\n self.iteration = n_iter\\ + \\n self.cv = cv\\ + \\n self.best_estimator_ = None\\ + \\n self.best_score_ = None\\ + \\n self.best_params_ = None\\ + \\n\\ + \\n def __min_fun(self, params):\\ + \\n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\\ + \\n acc = score.mean()\\ + \\n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\\ + \\n\\ + \\n def fit(self, X, y):\\ + \\n trials = Trials()\\ + \\n self.X = X\\ + \\n self.y = y\\ + \\n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\\ + \\n result = sorted(trials.results, key = lambda x: x['loss'])[0]\\ + \\n self.best_estimator_ = result['model']\\ + \\n self.best_score_ = result['score']\\ + \\n self.best_params_ = result['params']\\ + \\n self.best_estimator_.fit(X, y)\\ + \\n\\ + \\n def hyperOptParamConversion( paramSpace):\\ + \\n paramDict = {}\\ + \\n for j in list(paramSpace.keys()):\\ + \\n inp = paramSpace[j]\\ + \\n isLog = False\\ + \\n isLin = False\\ + \\n isRan = False\\ + \\n isList = False\\ + \\n isString = False\\ + \\n try:\\ + \\n # check if functions are given as input and reassign paramspace\\ + \\n v = paramSpace[j]\\ + \\n if 'logspace' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isLog = True\\ + \\n elif 'linspace' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isLin = True\\ + \\n elif 'range' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isRan = True\\ + \\n elif 'list' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isList = True\\ + \\n elif '[' and ']' in paramSpace[j]:\\ + \\n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\\ + \\n isList = True\\ + \\n x = paramSpace[j].split(',')\\ + \\n except:\\ + \\n x = paramSpace[j]\\ + \\n str_arg = paramSpace[j]\\ + \\n\\ + \\n # check if arguments are string\\ + \\n try:\\ + \\n test = eval(x[0])\\ + \\n except:\\ + \\n isString = True\\ + \\n\\ + \\n if isString:\\ + \\n paramDict.update({j: hp.choice(j, x)})\\ + \\n else:\\ + \\n res = eval(str_arg)\\ + \\n if isLin:\\ + \\n y = eval('np.linspace' + str(res))\\ + \\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\ + \\n elif isLog:\\ + \\n y = eval('np.logspace' + str(res))\\ + \\n paramDict.update(\\ + \\n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\\ + \\n elif isRan:\\ + \\n y = eval('np.arange' + str(res))\\ + \\n paramDict.update({j: hp.choice(j, y)})\\ + \\n # check datatype of argument\\ + \\n elif isinstance(eval(x[0]), bool):\\ + \\n y = list(map(lambda i: eval(i), x))\\ + \\n paramDict.update({j: hp.choice(j, eval(str(y)))})\\ + \\n elif isinstance(eval(x[0]), float):\\ + \\n res = eval(str_arg)\\ + \\n if len(str_arg.split(',')) == 3 and not isList:\\ + \\n y = eval('np.linspace' + str(res))\\ + \\n #print(y)\\ + \\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\ + \\n else:\\ + \\n y = list(res) if isinstance(res, tuple) else [res]\\ + \\n paramDict.update({j: hp.choice(j, y)})\\ + \\n else:\\ + \\n res = eval(str_arg)\\ + \\n if len(str_arg.split(',')) == 3 and not isList:\\ + \\n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\\ + \\n else:\\ + \\n y = list(res) if isinstance(res, tuple) else [res]\\ + \\n paramDict.update({j: hp.choice(j, y)})\\ + \\n return paramDict""}, + 's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':""\\n\\ + \\ndef s2n(value):\\ + \\n try:\\ + \\n x=eval(value)\\ + \\n return x\\ + \\n except:\\ + \\n try:\\ + \\n return w2n.word_to_num(value)\\ + \\n except:\\ + \\n return np.nan""}, + 'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':""\\n\\ + \\ndef read_json(file_path):\\ + \\n data = None\\ + \\n with open(file_path,'r') as f:\\ + \\n data = json.load(f)\\ + \\n return data\\ + \\n\\ + \\ndef write_json(data, file_path):\\ + \\n with open(file_path,'w') as f:\\ + \\n json.dump(data, f)\\ + \\n\\ + \\ndef read_data(file_path, encoding='utf-8', sep=','):\\ + \\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\ + \\n\\ + \\ndef write_data(data, file_path, index=False):\\ + \\n return data.to_csv(file_path, index=index)\\ + \\n\\ + \\n#Uncomment and change below code for google storage\\ + \\n#def write_data(data, file_path, index=False):\\ + \\n# file_name= file_path.name\\ + \\n# data.to_csv('output_data.csv')\\ + \\n# storage_client = storage.Client()\\ + \\n# bucket = storage_client.bucket('aion_data')\\ + \\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\ + \\n# return data\\ + \\n\\ + \\ndef is_file_name_url(file_name):\\ + \\n supported_urls_starts_with = ('gs://','https://','http://')\\ + \\n return file_name.startswith(supported_urls_starts_with)\\ + \\n""}, + 'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f""\\n\\ + \\nlog = None\\ + \\ndef set_logger(log_file, mode='a'):\\ + \\n global log\\ + \\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\ + \\n log = logging.getLogger(Path(__file__).parent.name)\\ + \\n return log\\ + \\n\\ + \\ndef get_logger():\\ + \\n return log\\n""}, + 'mlflowSetPath':{'name':'mlflowSetPath','code':f""\\n\\ndef mlflowSetPath(path, name):\\ + \\n{self.tab}db_name = str(Path(path)/'mlruns')\\ + \\n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\\ + \\n{self.tab}mlflow.set_experiment(str(Path(path).name))\\ + \\n""}, + 'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f""\\n\\ndef mlflow_create_experiment(config, path, name):\\ + \\n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\\ + \\n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\\ + \\n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\\ + \\n{self.tab}client = mlflow.tracking.MlflowClient()\\ + \\n{self.tab}experiment = client.get_experiment_by_name(name)\\ + \\n{self.tab}if experiment:\\ + \\n{self.tab}{self.tab}experiment_id = experiment.experiment_id\\ + \\n{self.tab}else:\\ + \\n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\\ + \\n{self.tab}return client, experiment_id\\ + \\n""}, + 'get_mlflow_uris':{'name':'get_mlflow_uris','code':f""\\n\\ndef get_mlflow_uris(config, path):\\ + \\n artifact_uri = None\\ + \\n tracking_uri_type = config.get('tracking_uri_type',None)\\ + \\n if tracking_uri_type == 'localDB':\\ + \\n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\\ + \\n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\\ + \\n tracking_uri = config['tracking_uri']\\ + \\n if config.get('artifacts_uri', None):\\ + \\n if Path(config['artifacts_uri']).exists():\\ + \\n artifact_uri = 'file:' + config['artifacts_uri']\\ + \\n else:\\ + \\n artifact_uri = config['artifacts_uri']\\ + \\n else:\\ + \\n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\\ + \\n else:\\ + \\n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\\ + \\n artifact_uri = None\\ + \\n if config.get('registry_uri', None):\\ + \\n registry_uri = config['registry_uri']\\ + \\n else:\\ + \\n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\\ + \\n return tracking_uri, artifact_uri, registry_uri\\ + \\n""}, + 'logMlflow':{'name':'logMlflow','code':f""\\n\\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\\ + \\n{self.tab}run_id = None\\ + \\n{self.tab}for k,v in params.items():\\ + \\n{self.tab}{self.tab}mlflow.log_param(k, v)\\ + \\n{self.tab}for k,v in metrices.items():\\ + \\n{self.tab}{self.tab}mlflow.log_metric(k, v)\\ + \\n{self.tab}if 'CatBoost' in algoName:\\ + \\n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\\ + \\n{self.tab}else:\\ + \\n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\\ + \\n{self.tab}tags['processed'] = 'no'\\ + \\n{self.tab}tags['registered'] = 'no'\\ + \\n{self.tab}mlflow.set_tags(tags)\\ + \\n{self.tab}if model_info:\\ + \\n{self.tab}{self.tab}run_id = model_info.run_id\\ + \\n{self.tab}return run_id\\ + \\n""}, + 'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':""\\ndef get_classification_metrices( actual_values, predicted_values):\\ + \\n result = {}\\ + \\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\ + \\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n\\ + \\n result['accuracy'] = math.floor(accuracy_score*10000" +")/100\\ + \\n result['precision'] = math.floor(avg_precision*10000)/100\\ + \\n result['recall'] = math.floor(avg_recall*10000)/100\\ + \\n result['f1'] = math.floor(avg_f1*10000)/100\\ + \\n return result\\ + \\n""}, + 'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':""\\ndef get_regression_metrices( actual_values, predicted_values):\\ + \\n result = {}\\ + \\n\\ + \\n me = np.mean(predicted_values - actual_values)\\ + \\n sde = np.std(predicted_values - actual_values, ddof = 1)\\ + \\n\\ + \\n abs_err = np.abs(predicted_values - actual_values)\\ + \\n mae = np.mean(abs_err)\\ + \\n sdae = np.std(abs_err, ddof = 1)\\ + \\n\\ + \\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\ + \\n mape = np.mean(abs_perc_err)\\ + \\n sdape = np.std(abs_perc_err, ddof = 1)\\ + \\n\\ + \\n result['mean_error'] = me\\ + \\n result['mean_abs_error'] = mae\\ + \\n result['mean_abs_perc_error'] = mape\\ + \\n result['error_std'] = sde\\ + \\n result['abs_error_std'] = sdae\\ + \\n result['abs_perc_error_std'] = sdape\\ + \\n return result\\ + \\n""} + } + + def add_function(self, name, importer=None): + if name in self.available_functions.keys(): + self.codeText += self.available_functions[name]['code'] + if importer: + if 'imports' in self.available_functions[name].keys(): + for module in self.available_functions[name]['imports']: + mod_name = module['mod'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + + def get_function_name(self, name): + if name in self.available_functions.keys(): + return self.available_functions[name]['name'] + return None + + def getCode(self): + return self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from importlib.metadata import version +import sys + + +class importModule(): + + def __init__(self): + self.importModule = {} + self.stdlibModule = [] + self.localModule = {} + + def addLocalModule(self,module, mod_from=None, mod_as=None): + if module == '*': + if module not in self.localModule.keys(): + self.localModule[module]= [mod_from] + else: + self.localModule[module].append(mod_from) + elif module not in self.localModule.keys(): + self.localModule[module] = {'from':mod_from, 'as':mod_as} + + def addModule(self, module, mod_from=None, mod_as=None): + if module not in self.importModule.keys(): + self.importModule[module] = {'from':mod_from, 'as':mod_as} + if module in sys.stdlib_module_names: + self.stdlibModule.append(module) + elif isinstance(self.importModule[module], list): + if mod_as not in [x['as'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as not in [x['from'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as != self.importModule[module]['as']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + elif mod_from != self.importModule[module]['from']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + + def getModules(self): + return (self.importModule, self.stdlibModule) + + def getBaseModule(self, extra_importers=[]): + modules_alias = { 'sklearn':'scikit-learn', + 'genetic_selection':'sklearn-genetic', + 'google': 'google-cloud-storage', + 'azure':'azure-storage-file-datalake'} + local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} + modules = [] + require = """" + if extra_importers: + extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] + importers_module = [self.importModule] + extra_importers + for importer_module in importers_module: + for k,v in importer_module.items(): + if v['from']: + mod = v['from'].split('.')[0] + else: + mod = k + if mod in modules_alias.keys(): + mod = modules_alias[mod] + modules.append(mod) + modules = list(set(modules)) + for mod in modules: + try: + if mod in local_modules.keys(): + require += f""{local_modules[mod]}\\n"" + else: + require += f""{mod}=={version(mod)}\\n"" + except : + if mod not in sys.stdlib_module_names: + raise + return require + + def getCode(self): + def to_string(k, v): + mod = '' + if v['from']: + mod += 'from {} '.format(v['from']) + mod += 'import {}'.format(k) + if v['as']: + mod += ' as {} '.format(v['as']) + return mod + + modules = """" + local_modules = """" + std_lib_modules = """" + third_party_modules = """" + for k,v in self.importModule.items(): + if k in self.stdlibModule: + std_lib_modules = std_lib_modules + '\\n' + to_string(k, v) + elif isinstance(v, dict): + third_party_modules = third_party_modules + '\\n' + to_string(k, v) + elif isinstance(v, list): + for alias in v: + third_party_modules = third_party_modules + '\\n' + to_string(k, alias) + for k,v in self.localModule.items(): + if k != '*': + local_modules = local_modules + '\\n' + to_string(k, v) + else: + for mod_from in v: + local_modules = local_modules + '\\n' + f'from {mod_from} import {k}' + if std_lib_modules: + modules = modules + ""\\n#Standard Library modules"" + std_lib_modules + if third_party_modules: + modules = modules + ""\\n\\n#Third Party modules"" + third_party_modules + if local_modules: + modules = modules + ""\\n\\n#local modules"" + local_modules + '\\n' + return modules + + def copyCode(self, importer): + self.importModule, self.stdlibModule = importer.getModules() + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +import json + +class learner(): + + def __init__(self, problem_type=""classification"", target_feature="""", sample_method=None,indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.df_name = 'df' + self.problem_type = problem_type + self.target_feature = target_feature + self.search_space = [] + self.codeText = f""\\ndef train(log):"" + self.input_files = {} + self.output_files = {} + self.function_code = '' + self.addInputFiles({'inputData' : 'featureEngineeredData.dat','testData' : 'test.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json','log' : 'aion.log'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n return config"" + return text + + def __addSaveModelCode(self): + text = ""\\n\\ + \\ndef save_model( experiment_id, estimator, features, metrices, params,tags, scoring):\\ + \\n # mlflow log model, metrices and parameters\\ + \\n with mlflow.start_run(experiment_id = experiment_id, run_name = model_name):\\ + \\n return logMlflow(params, metrices, estimator, tags, model_name.split('_')[0])"" + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + return self.function_code + '\\n' + self.codeText + + def addLocalFunctionsCode(self): + self.function_code += self.__addValidateConfigCode() + self.function_code += self.__addSaveModelCode() + + def getPrefixModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas', 'mod_as':'pd'} + ] + return modules + + def addPrefixCode(self, indent=1): + self.codeText += ""\\ + \\n config = validateConfig()\\ + \\n targetPath = Path('aion')/config['targetPath']\\ + \\n if not targetPath.exists():\\ + \\n raise ValueError(f'targetPath does not exist')\\ + \\n meta_data_file = targetPath/IOFiles['metaData']\\ + \\n if meta_data_file.exists():\\ + \\n meta_data = read_json(meta_data_file)\\ + \\n else:\\ + \\n raise ValueError(f'Configuration file not found: {meta_data_file}')\\ + \\n log_file = targetPath/IOFiles['log']\\ + \\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\ + \\n dataLoc = targetPath/IOFiles['inputData']\\ + \\n if not dataLoc.exists():\\ + \\n return {'Status':'Failure','Message':'Data location does not exists.'}\\ + \\n\\ + \\n status = dict()\\ + \\n usecase = config['targetPath']\\ + \\n df = pd.read_csv(dataLoc)\\ + \\n prev_step_output = meta_data['featureengineering']['Status']"" + + def getSuffixModules(self): + modules = [{'module':'platform'} + ,{'module':'time'} + ,{'module':'mlflow'} + ] + return modules + def add_100_trainsize_code(self): + self.codeText +=""\\n\\ + \\n else:\\ + \\n test_score = train_score\\ + \\n metrices = {}"" + def addSuffixCode(self, indent=1): + self.codeText += ""\\n\\ + \\n meta_data['training'] = {}\\ + \\n meta_data['training']['features'] = features\\ + \\n scoring = config['scoring_criteria']\\ + \\n tags = {'estimator_name': model_name}\\ + \\n monitoring_data = read_json(targetPath/IOFiles['monitor'])\\ + \\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\\ + \\n mlflow_client, experiment_id = mlflow_create_experiment(monitoring_data.get('mlflow_config',mlflow_default_config), targetPath, usecase)\\ + \\n run_id = save_model(experiment_id, estimator,features, metrices,best_params,tags,scoring)\\ + \\n write_json(meta_data, targetPath/IOFiles['metaDataOutput'])\\ + \\n write_json({'scoring_criteria': scoring, 'metrices':metrices, 'param':best_params}, targetPath/IOFiles['performance'])\\ + \\n\\ + \\n # return status\\ + \\n status = {'Status':'Success','mlflow_run_id':run_id,'FeaturesUsed':features,'test_score':metrices['test_score'],'train" +"_score':metrices['train_score']}\\ + \\n log.info(f'Test score: {test_score}')\\ + \\n log.info(f'Train score: {train_score}')\\ + \\n log.info(f'MLflow run id: {run_id}')\\ + \\n log.info(f'output: {status}')\\ + \\n return json.dumps(status)"" + + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'sys'} + ,{'module':'json'} + ,{'module':'logging'} + ] + return modules + + def addMainCode(self, indent=1): + self.codeText += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n log = None\\ + \\n try:\\ + \\n print(train(log))\\ + \\n except Exception as e:\\ + \\n if log:\\ + \\n log.error(e, exc_info=True)\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print(json.dumps(status))\\ + "" + + def add_variable(self, name, value, indent=1): + if isinstance(value, str): + self.codeText += f""\\n{self.tab * indent}{name} = '{value}'"" + else: + self.codeText += f""\\n{self.tab * indent}{name} = {value}"" + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + def add_search_space_w(self, algoritms): + for model, params in algoritms.items(): + d = {'clf': f""[{model}()]""} + for k,v in params.items(): + if isinstance(v, str): + d[f'clf__{k}']=f""'{v}'"" + else: + d[f'clf__{k}']= f""{v}"" + self.search_space.append(d) + + def add_search_space(self, indent=1): + self.codeText += f""\\n{self.tab}search_space = config['search_space']"" + + def add_train_test_split(self, train_feature, target_feature,test_ratio, indent=1): + self.codeText += ""\\n\\n # split the data for training\\ + \\n selected_features = prev_step_output['selected_features']\\ + \\n target_feature = config['target_feature']\\ + \\n train_features = prev_step_output['total_features'].copy()\\ + \\n train_features.remove(target_feature)\\ + \\n X_train = df[train_features]\\ + \\n y_train = df[target_feature]\\ + \\n if config['test_ratio'] > 0.0:\\ + \\n test_data = read_data(targetPath/IOFiles['testData'])\\ + \\n X_test = test_data[train_features]\\ + \\n y_test = test_data[target_feature]\\ + \\n else:\\ + \\n X_test = pd.DataFrame()\\ + \\n y_test = pd.DataFrame()"" + + def add_model_fit(self, estimator, optimizer, selector_method, importer, indent=1): + # need to adjust the indent + importer.addModule('importlib') + importer.addModule('operator') + text = f""\\n features = selected_features['{selector_method}']\\ + \\n estimator = {estimator}()\\ + \\n param = config['algorithms']['{estimator}']"" + if optimizer == 'GridSearchCV': + text += ""\\n grid = GridSearchCV(estimator, param,cv=config['optimization_param']['trainTestCVSplit'])\\ + \\n grid.fit(X_train[features], y_train)\\ + \\n train_score = grid.best_score_ * 100\\ + \\n best_params = grid.best_params_\\ + \\n estimator = grid.best_estimator_"" + elif optimizer == 'GeneticSelectionCV': + text += ""\\n grid = GeneticSelectionCV(estimator, scoring=scorer, n_generations=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'],n_population=config['optimization_param']['geneticparams']['n_population'],crossover_proba=config['optimization_param']['geneticparams']['crossover_proba'],mutation_proba=config['optimization_param']['geneticparams']['mutation_proba'],crossover_independent_proba=config['optimization_param']['geneticparams']['crossover_independent_proba'],mutation_independent_proba=config['optimization_param']['geneticparams']['mutation_independent_proba'],tournament_size=config['optimization_param']['geneticparams']['tournament_size'],n_gen_no_change=config['optimization_param']['geneticparams']['n_gen_no_change'])\\ + \\n grid.fit(X_train[features], y_train)\\ + \\n train_score = grid.score(X_train[features], y_train)\\ + \\n best_params = grid.estimator_.get_params()\\ + \\n estimator = grid.estimator_"" + else: + text += f""\\n grid = {optimizer}(estimator, param, scoring=scorer, n_iter=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'])\\ + \\n grid.fit(X_train[features], y_train)\\ + \\n train_score = grid.best_score_ * 100\\ + \\n best_params = grid.best_params_\\ + \\n estimator = grid.best_estimator_"" + self.codeText += text + + def addLearner(self, model_name, params, importer, indent=1): + importer.addModule('Pipeline', mod_from='sklearn.pipeline') + importer.addModule('ColumnTransformer', mod_from='sklearn.compose') + importer.addModule('confusion_matrix', mod_from='sklearn.metrics') + model_params = [] + for k,v in params.items(): + if isinstance(v, str): + model_params.append(f""{k}='{v}'"") + else: + model_params.append(f""{k}={v}"") + model_params = "","".join(model_params) + self.codeText += self.getTransformer() + text = f""\\n{self.tab * indent}pipeline = Pipeline(steps = [('preprocessor', preprocessor),('learner',{model_name}({model_params}))])"" + self.codeText += text + self.codeText += self.splitTargetFeature(importer) + if self.balancing: + self.codeText += self.balancingCode(importer) + self.codeText += self.fitModelCode(importer) + + def splitTargetFeature(self, importer, indent=1): + importer.addModule('train_test_split', mod_from='sklearn.model_selection') + return f""\\n{self.tab * indent}target = df['{self.target_feature}']\\ + \\n{self.tab * indent}df = df.drop(['{self.target_feature}'], axis=1)\\ + \\n{self.tab * indent}X_train, X_test, y_train, y_test = train_test_split(df,target, train_size = percentage/100.0)"" + + def getCode_remove(self, model_name=None, indent=1): + return self.codeText + + def getDFName(self): + return self.df_name + + def copyCode(self, learner): + self.codeText = learner.getCode() + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +class input_drift(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = '' + + def addInputDriftClass(self): + text = ""\\ + \\nclass inputdrift():\\ + \\n\\ + \\n def __init__(self,base_config):\\ + \\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\ + \\n self.currentDataLocation = base_config['currentDataLocation']\\ + \\n home = Path.home()\\ + \\n if platform.system() == 'Windows':\\ + \\n from pathlib import WindowsPath\\ + \\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\ + \\n else:\\ + \\n from pathlib import PosixPath\\ + \\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\ + \\n if not output_model_dir.exists():\\ + \\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\ + \\n\\ + \\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\ + \\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\ + \\n mlflow.set_tracking_uri(tracking_uri)\\ + \\n mlflow.set_registry_uri(registry_uri)\\ + \\n client = mlflow.tracking.MlflowClient(\\ + \\n tracking_uri=tracking_uri,\\ + \\n registry_uri=registry_uri,\\ + \\n )\\ + \\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\ + \\n model = mlflow.pyfunc.load_model(model_version_uri)\\ + \\n run = client.get_run(model.metadata.run_id)\\ + \\n if run.info.artifact_uri.startswith('file:'):\\ + \\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\ + \\n else:\\ + \\n artifact_path = Path(run.info.artifact_uri)\\ + \\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\\ + \\n\\ + \\n def get_input_drift(self,current_data, historical_data):\\ + \\n curr_num_feat = current_data.select_dtypes(include='number')\\ + \\n hist_num_feat = historical_data.select_dtypes(include='number')\\ + \\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\\ + \\n alert_count = 0\\ + \\n data = {\\ + \\n 'current':{'data':current_data},\\ + \\n 'hist': {'data': historical_data}\\ + \\n }\\ + \\n dist_changed_columns = []\\ + \\n dist_change_message = []\\ + \\n for feature in num_features:\\ + \\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\\ + \\n if (curr_static_value < 0.05):\\ + \\n distribution = {}\\ + \\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\\ + \\n distribution['curr'] = self.DistributionFinder( current_data[feature])\\ + \\n if(distribution['hist']['name'] == distribution['curr']['name']):\\ + \\n pass\\ + \\n else:\\ + \\n alert_count = alert_count + 1\\ + \\n dist_changed_columns.append(feature)\\ + \\n changed_column = {}\\ + \\n changed_column['Feature'] = feature\\ + \\n changed_column['KS_Training'] = curr_static_value\\ + \\n changed_column['Training_Distribution'] = distribution['hist']['name']\\ + \\n changed_column['New_Distribution'] = distribution['curr']['name']\\ + \\n dist_change_message.append(changed_column)\\ + \\n if alert_count:\\ + \\n resultStatus = dist_change_message\\ + \\n else :\\ + \\n resultStatus='Model is working as expected'\\ + \\n return(alert_count, resultStatus)\\ + \\n\\ + \\n def DistributionFinder(self,data):\\ + \\n best_distribution =''\\ + \\n best_sse =0.0\\ + \\n if(data.dtype in ['int','int64']):\\ + \\n distributions= {'bernoulli':{'algo':st.bernoulli},\\ + \\n 'binom':{'algo':st.binom},\\ + \\n 'geom':{'algo':st.geom},\\ + \\n 'nbinom':{'algo':st.nbinom},\\ + \\n 'poisson':{'algo':st.poisson}\\ + \\n }\\ + \\n index, counts = np.unique(data.astype(int),return_counts=True)\\ + \\n if(len(index)>=2):\\ + \\n best_sse = np.inf\\ + \\n y1=[]\\ + \\n total=sum(counts)\\ + \\n mean=float(sum(index*counts))/total\\ + \\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\\ + \\n dispersion=mean/float(variance)\\ + \\n theta=1/float(dispersion)\\ + \\n r=mean*(float(theta)/1-theta)\\ + \\n\\ + \\n for j in counts:\\ + \\n y1.append(float(j)/total)\\ + \\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\\ + \\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\\ + \\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\\ + \\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\\ + \\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\\ + \\n\\ + \\n sselist = []\\ + \\n for dist in distributions.keys():\\ + \\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\\ + \\n if np.isnan(distributions[dist]['s" +"ess']):\\ + \\n distributions[dist]['sess'] = float('inf')\\ + \\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\\ + \\n best_distribution = best_dist\\ + \\n best_sse = distributions[best_dist]['sess']\\ + \\n\\ + \\n elif (len(index) == 1):\\ + \\n best_distribution = 'Constant Data-No Distribution'\\ + \\n best_sse = 0.0\\ + \\n elif(data.dtype in ['float64','float32']):\\ + \\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\\ + \\n best_distribution = st.norm.name\\ + \\n best_sse = np.inf\\ + \\n nrange = data.max() - data.min()\\ + \\n\\ + \\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\\ + \\n x = (x + np.roll(x, -1))[:-1] / 2.0\\ + \\n\\ + \\n for distribution in distributions:\\ + \\n with warnings.catch_warnings():\\ + \\n warnings.filterwarnings('ignore')\\ + \\n params = distribution.fit(data.astype(float))\\ + \\n arg = params[:-2]\\ + \\n loc = params[-2]\\ + \\n scale = params[-1]\\ + \\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\\ + \\n sse = np.sum(np.power(y - pdf, 2.0))\\ + \\n if( sse < best_sse):\\ + \\n best_distribution = distribution.name\\ + \\n best_sse = sse\\ + \\n\\ + \\n return {'name':best_distribution, 'sse': best_sse}\\ + \\n\\ + "" + return text + + def addSuffixCode(self, indent=1): + text =""\\n\\ + \\ndef check_drift( config):\\ + \\n inputdriftObj = inputdrift(config)\\ + \\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\\ + \\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\\ + \\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\\ + \\n if message == 'Model is working as expected':\\ + \\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\\ + \\n else:\\ + \\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\\ + \\n return(output_json)\\ + \\n\\ + \\nif __name__ == '__main__':\\ + \\n try:\\ + \\n if len(sys.argv) < 2:\\ + \\n raise ValueError('config file not present')\\ + \\n config = sys.argv[1]\\ + \\n if Path(config).is_file() and Path(config).suffix == '.json':\\ + \\n with open(config, 'r') as f:\\ + \\n config = json.load(f)\\ + \\n else:\\ + \\n config = json.loads(config)\\ + \\n output = check_drift(config)\\ + \\n status = {'Status':'Success','Message':output}\\ + \\n print('input_drift:'+json.dumps(status))\\ + \\n except Exception as e:\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print('input_drift:'+json.dumps(status))"" + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def generateCode(self): + self.codeText += self.addInputDriftClass() + self.codeText += self.addSuffixCode() + + def getCode(self): + return self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class tabularDataReader(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.function_code = '' + self.codeText = '' + self.code_generated = False + + def getInputFiles(self): + IOFiles = { + ""rawData"": ""rawData.dat"", + ""metaData"" : ""modelMetaData.json"", + ""log"" : ""aion.log"", + ""outputData"" : ""rawData.dat"", + ""monitoring"":""monitoring.json"", + ""prodData"": ""prodData"", + ""prodDataGT"":""prodDataGT"" + } + text = 'IOFiles = ' + if not IOFiles: + text += '{ }' + else: + text += json.dumps(IOFiles, indent=4) + return text + + def getOutputFiles(self): + output_files = { + 'metaData' : 'modelMetaData.json', + 'log' : 'aion.log', + 'outputData' : 'rawData.dat' + } + text = 'output_file = ' + if not output_files: + text += '{ }' + else: + text += json.dumps(output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n if not config['targetPath']:\\ + \\n raise ValueError(f'Target Path is not configured')\\ + \\n return config"" + return text + + def addMainCode(self): + self.codeText += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n log = None\\ + \\n try:\\ + \\n print(load_data(log))\\ + \\n except Exception as e:\\ + \\n if log:\\ + \\n log.getLogger().error(e, exc_info=True)\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print(json.dumps(status))\\ + \\n raise Exception(str(e))\\ + "" + def addLoadDataCode(self): + self.codeText += """""" +#This function will read the data and save the data on persistent storage +def load_data(log): + config = validateConfig() + targetPath = Path('aion')/config['targetPath'] + targetPath.mkdir(parents=True, exist_ok=True) + log_file = targetPath/IOFiles['log'] + log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + monitoring = targetPath/IOFiles['monitoring'] + if monitoring.exists(): + monitoringStatus = read_json(monitoring) + if monitoringStatus['dataLocation'] == '' and monitoringStatus['driftStatus'] != 'No Drift': + reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None)) + + raw_data_location = targetPath/IOFiles['rawData'] + if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']): + predicted_data = reader.read(IOFiles['prodData']) + actual_data = reader.read(IOFiles['prodDataGT']) + common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()] + mergedRes = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner') + raw_data_path = pd.read_csv(raw_data_location) + df = pd.concat([raw_data_path,mergedRes]) + else: + raise ValueError(f'Prod Data not found') + elif monitoringStatus['dataLocation'] == '': + raise ValueError(f'Data Location does not exist') + else: + if 's3' in monitoringStatus.keys(): + input_reader = dataReader(reader_type='s3',target_path=None, config=monitoringStatus['s3']) + log.info(f""Downloading '{monitoringStatus['s3']['file_name']}' from s3 bucket '{monitoringStatus['s3']['bucket_name']}'"") + df = input_reader.read(monitoringStatus['s3']['file_name']) + else: + location = monitoringStatus['dataLocation'] + log.info(f'Dataset path: {location}') + df = read_data(location) + else: + raise ValueError(f'Monitoring.json does not exist') + + status = {} + output_data_path = targetPath/IOFiles['outputData'] + log.log_dataframe(df) + required_features = list(set(config['selected_features'] + [config['target_feature']])) + log.info('Dataset features required: ' + ','.join(required_features)) + missing_features = [x for x in required_features if x not in df.columns.tolist()] + if missing_features: + raise ValueError(f'Some feature/s is/are missing: {missing_features}') + log.info('Removing unused features: '+','.join(list(set(df.columns) - set(required_features)))) + df = df[required_features] + log.info(f'Required features: {required_features}') + try: + log.info(f'Saving Dataset: {str(output_data_path)}') + write_data(df, output_data_path, index=False) + status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'Records':len(df)} + except: + raise ValueError('Unable to create data file') + + meta_data_file = targetPath/IOFiles['metaData'] + meta_data = dict() + meta_data['load_data'] = {} + meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if x != config['target_feature']] + meta_data['load_data']['Status'] = status + write_json(meta_data, meta_data_file) + output = json.dumps(status) + log.info(output) + return output +"""""" + def addValidateConfigCode(self, indent=1): + self.function_code += self.__addValidateConfigCode() + + def generateCode(self): + self.addValidateConfigCode() + self.addLoadDataCode() + self.addMainCode() + self.code_generated = True + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + if not self.code_generated: + self.generateCode() + return self.function_code + '\\n' + self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class drift(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = '' + + def getInputFiles(self): + IOFiles = { + ""log"": ""aion.log"", + ""trainingData"":""rawData.dat"", + ""production"": ""production.json"", + ""monitoring"":""monitoring.json"", + ""prodData"": ""prodData"", + ""prodDataGT"":""prodDataGT"" + } + text = 'IOFiles = ' + if not IOFiles: + text += '{ }' + else: + text += json.dumps(IOFiles, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + return self.codeText + +# temporary code + + def get_input_drift_import_modules(self): + return [ + {'module': 'sys', 'mod_from': None, 'mod_as': None}, + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'mlflow', 'mod_from': None, 'mod_as': None}, + {'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'warnings', 'mod_from': None, 'mod_as': None}, + {'module': 'platform', 'mod_from': None, 'mod_as': None } + ] + + def get_input_drift_code(self): + return """""" + +class inputdrift(): + + def __init__(self,base_config): + if 'mlflowURL' in base_config: + self.usecase = base_config['modelName'] + '_' + base_config['modelVersion'] + self.currentDataLocation = base_config['currentDataLocation'] + home = Path.home() + if platform.system() == 'Windows': + from pathlib import WindowsPath + output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data' + output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase + else: + from pathlib import PosixPath + output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data' + output_model_dir = Posix" +"Path(home)/'HCLT'/'AION'/'target'/self.usecase + if not output_model_dir.exists(): + raise ValueError(f'Configuration file not found at {output_model_dir}') + + tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns') + registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db') + mlflow.set_tracking_uri(tracking_uri) + mlflow.set_registry_uri(registry_uri) + client = mlflow.tracking.MlflowClient( + tracking_uri=tracking_uri, + registry_uri=registry_uri, + ) + model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase) + model = mlflow.pyfunc.load_model(model_version_uri) + run = client.get_run(model.metadata.run_id) + if run.info.artifact_uri.startswith('file:'): + artifact_path = Path(run.info.artifact_uri[len('file:///') : ]) + else: + artifact_path = Path(run.info.artifact_uri) + self.trainingDataPath = artifact_path/(self.usecase + '_data.csv') + + def get_input_drift(self,current_data, historical_data): + curr_num_feat = current_data.select_dtypes(include='number') + hist_num_feat = historical_data.select_dtypes(include='number') + num_features = [feat for feat in historical_data.columns if feat in curr_num_feat] + alert_count = 0 + data = { + 'current':{'data':current_data}, + 'hist': {'data': historical_data} + } + dist_changed_columns = [] + dist_change_message = [] + for feature in num_features: + curr_static_value = round(st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue,3) + if (curr_static_value < 0.05): + try: + distribution = {} + distribution['hist'] = self.DistributionFinder( historical_data[feature]) + distribution['curr'] = self.DistributionFinder( current_data[feature]) + if(distribution['hist']['name'] == distribution['curr']['name']): + pass + else: + alert_count = alert_count + 1 + dist_changed_columns.append(feature) + changed_column = {} + changed_column['Feature'] = feature + changed_column['KS_Training'] = curr_static_value + changed_column['Training_Distribution'] = distribution['hist']['name'] + changed_column['New_Distribution'] = distribution['curr']['name'] + dist_change_message.append(changed_column) + except: + pass + if alert_count: + resultStatus = dist_change_message + else : + resultStatus='Model is working as expected' + return(alert_count, resultStatus) + + def DistributionFinder(self,data): + best_distribution ='' + best_sse =0.0 + if(data.dtype in ['int','int64']): + distributions= {'bernoulli':{'algo':st.bernoulli}, + 'binom':{'algo':st.binom}, + 'geom':{'algo':st.geom}, + 'nbinom':{'algo':st.nbinom}, + 'poisson':{'algo':st.poisson} + } + index, counts = np.unique(data.astype(int),return_counts=True) + if(len(index)>=2): + best_sse = np.inf + y1=[] + total=sum(counts) + mean=float(sum(index*counts))/total + variance=float((sum(index**2*counts) -total*mean**2))/(total-1) + dispersion=mean/float(variance) + theta=1/float(dispersion) + r=mean*(float(theta)/1-theta) + + for j in counts: + y1.append(float(j)/total) + distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean) + distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index)) + distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean)) + distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r) + distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean) + + sselist = [] + for dist in distributions.keys(): + distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0)) + if np.isnan(distributions[dist]['sess']): + distributions[dist]['sess'] = float('inf') + best_dist = min(distributions, key=lambda v: distributions[v]['sess']) + best_distribution = best_dist + best_sse = distributions[best_dist]['sess'] + + elif (len(index) == 1): + best_distribution = 'Constant Data-No Distribution' + best_sse = 0.0 + elif(data.dtype in ['float64','float32']): + distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] + best_distribution = st.norm.name + best_sse = np.inf + nrange = data.max() - data.min() + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') + params = distribution.fit(data.astype(float)) + arg = params[:-2] + loc = params[-2] + scale = params[-1] + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if( sse < best_sse): + best_distribution = distribution.name + best_sse = sse + + return {'name':best_distribution, 'sse': best_sse} + + +def check_drift( config): + inputdriftObj = inputdrift(config) + historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath,skipinitialspace = True,na_values=['-','?']) + currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation,skipinitialspace = True,na_values=['-','?']) + historicaldataFrame.columns = historicaldataFrame.columns.str.strip() + currentdataFrame.columns = currentdataFrame.columns.str.strip() + dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame) + if message == 'Model is working as expected': + output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}} + else: + output_json = {'status':'SUCCESS','data':{'Affected Columns':message}} + return(output_json) +"""""" + + def get_main_drift_code(self, problem_type, smaller_is_better=True): + text = '' + if problem_type == 'classification': + text += """""" +def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5): + testscore = production['score'] + current_score = current_matrices[scoring_criteria] + threshold_value = testscore * threshold / 100.0 + if current_score > (testscore - threshold_value) : + return True + else: + return False + +def get_metrices(actual_values, predicted_values): + from sklearn.metrics import accuracy_score + from sklearn.metrics import precision_score + from sklearn.metrics import recall_score + from sklearn.metrics import f1_score + result = {} + accuracy_score = accuracy_score(actual_values, predicted_values) + avg_precision = precision_score(actual_values, predicted_values, + average='macro') + avg_recall = recall_score(actual_values, predicted_values, + average='macro') + avg_f1 = f1_score(actual_values, predicted_values, + average='macro') + + result['accuracy'] = round((accuracy_score*100),2) + result['precision'] = round((avg_precision*100),2) + result['recall'] = round((avg_recall*100),2) + result['f1'] = round((avg_f1*100),2) + return result + """""" + else: + text += """""" +def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5): + testscore = production['score'] + current_score = current_matrices[scoring_criteria] + threshold_value = testscore * threshold / 100.0 +"""""" + if smaller_is_better: + text += """""" + if current_score < (testscore + threshold_value) :"""""" + else: + text += """""" + if current_score > (testscore - threshold_value) :"""""" + text += """""" + return True + else: + return False + +def get_metrices(actual_values, predicted_values): + import numpy as np + result = {} + me = np.mean(predicted_values - actual_values) + sde = np.std(predicted_values - actual_values, ddof = 1) + + abs_err = np.abs(predicted_values - actual_values) + mae = np.mean(abs_err) + sdae = np.std(abs_err, ddof = 1) + + abs_perc_err = 100.0 * np.abs(predicted_values - actual_values) / actual_values + mape = np.mean(abs_perc_err) + sdape = np.std(abs_perc_err, ddof = 1) + + result['mean_error'] = me + result['mean_abs_error'] = mae + result['mean_abs_perc_error'] = mape + result['error_std'] = sde + result['abs_error_std'] = sdae + result['abs_perc_error_std'] = sdape + return result + """""" + text += """""" +def monitoring(config, log=None): + targetPath = Path('aion')/config['targetPath'] + targetPath.mkdir(parents=True, exist_ok=True) + log_file = targetPath/IOFiles['log'] + log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + output_json = {} + trainingDataLocation = targetPath/IOFiles['trainingData'] + monitoring = targetPath/IOFiles['monitoring'] + log.info(f'Input Location External: {config[""inputUriExternal""]}') + trainingStatus = 'False' + dataFileLocation = '' + driftStatus = 'No Drift' + if monitoring.exists(): + monitoring_data = read_json(monitoring) + if monitoring_data.get('runNo', False): + reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None)) + production= targetPath/IOFiles['production'] + proddataDF = pd.DataFrame() + predicted_data = pd.DataFrame() + if production.exists(): + production = read_json(production) + if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']): + predicted_data = reader.read(IOFiles['prodData']) + actual_data = reader.read(IOFiles['prodDataGT']) + common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()] + proddataDF = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner') + currentPerformance = {} + currentPerformance = get_metrices(proddataDF[config['target_feature']], proddataDF['prediction']) + if is_drift_within_limits(production, currentPerformance,config['scoring_criteria']): + log.info(f'OutputDrift: No output drift found') + output_json.update({'outputDrift':'Model score is with in limits'}) + else: + log.info(f'OutputDrift: Found Output Drift') + log.info(f'Original Test Score: {production[""score""]}') + log.info(f'Current Score: {currentPerformance[config[""scoring_criteria""]]}') + output_json.update({'outputDrift':{'Meassage': 'Model output is drifted','trainedScore':production[""score""], 'currentScore':currentPerformance[config[""scoring_criteria""]]}}) + trainingStatus = 'True' + driftStatus = 'Output Drift' + else: + if reader.file_exists(IOFiles['prodData']): + predicted_data = reader.read(IOFiles['prodData']) + log.info(f'OutputDrift: Prod Data not found') + output_json.update({'outputDrift':'Prod Data not found'}) + else: + log.info(f'Last Time pipeline not executed completely') + output_json.update({'Msg':'Pipeline is not executed completely'}) + trainingStatus = 'True' + if config['inputUriExternal']: + dataFileLocation = config['inputUriExternal'] + elif 's3' in config.keys(): + dataFileLocation = 'cloud' + else: + dataFileLocation = config['inputUri'] + + + if trainingStatus == 'False': + historicaldataFrame=pd.read_csv(trainingDataLocation) + if config['inputUriExternal']: + currentdataFrame=pd.read_csv(config['inputUriExternal']) + elif not predicted_data.empty: + currentdataFrame = predicted_data.copy() + elif 's3' in config.keys(): + reader = dataReader(reader_type='s3',target_path=config['targetPath'], config=config['s3']) + currentdataFrame = reader.read(config['s3']['file_name']) + else" +": + currentdataFrame=pd.read_csv(config['inputUri']) + inputdriftObj = inputdrift(config) + dataalertcount,inputdrift_message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame) + + if inputdrift_message == 'Model is working as expected': + log.info(f'InputDrift: No input drift found') + output_json.update({'Status':'SUCCESS','inputDrift':'Model is working as expected'}) + else: + log.info(f'InputDrift: Input drift found') + log.info(f'Affected Columns {inputdrift_message}') + output_json.update({'inputDrift':{'Affected Columns':inputdrift_message}}) + trainingStatus = 'True' + driftStatus = 'Input Drift' + if config['inputUriExternal']: + dataFileLocation = config['inputUriExternal'] + elif actual_data_path.exists() and predict_data_path.exists(): + dataFileLocation = '' + elif 's3' in config.keys(): + dataFileLocation = 'cloud' + else: + dataFileLocation = config['inputUri'] + else: + log.info(f'Pipeline Executing first Time') + output_json.update({'Msg':'Pipeline executing first time'}) + trainingStatus = 'True' + if config['inputUriExternal']: + dataFileLocation = config['inputUriExternal'] + elif 's3' in config.keys(): + dataFileLocation = 'cloud' + else: + dataFileLocation = config['inputUri'] + else: + log.info(f'Pipeline Executing first Time') + output_json.update({'Msg':'Pipeline executing first time'}) + trainingStatus = 'True' + if config['inputUriExternal']: + dataFileLocation = config['inputUriExternal'] + elif 's3' in config.keys(): + dataFileLocation = 'cloud' + else: + dataFileLocation = config['inputUri'] + if monitoring.exists(): + monitoring_data['runNo'] = int(monitoring_data.get('runNo', '0')) + 1 + else: + monitoring_data = {} + monitoring_data['runNo'] = 1 + monitoring_data['prod_db_type'] = config.get('prod_db_type', 'sqlite') + monitoring_data['db_config'] = config.get('db_config', {}) + monitoring_data['mlflow_config'] = config.get('mlflow_config', None) + if 's3' in config.keys(): + monitoring_data['s3'] = config['s3'] + monitoring_data['dataLocation'] = dataFileLocation + monitoring_data['driftStatus'] = driftStatus + write_json(monitoring_data,targetPath/IOFiles['monitoring']) + output = {'Status':'SUCCESS'} + output.update(output_json) + return(json.dumps(output)) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--inputUri', help='Training Data Location') + + args = parser.parse_args() + config_file = Path(__file__).parent/'config.json' + if not Path(config_file).exists(): + raise ValueError(f'Config file is missing: {config_file}') + config = read_json(config_file) + config['inputUriExternal'] = None + if args.inputUri: + if args.inputUri != '': + config['inputUriExternal'] = args.inputUri + log = None + try: + print(monitoring(config, log)) + except Exception as e: + if log: + log.error(e, exc_info=True) + status = {'Status':'Failure','Message':str(e)} + print(json.dumps(status)) + raise Exception(str(e)) +"""""" + return text """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class selector(): + + def __init__(self, indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.codeText = f""\\n\\ndef featureSelector(log):"" + self.pipe = 'pipe' + self.code_generated = False + self.input_files = {} + self.output_files = {} + self.function_code = '' + self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n return config"" + return text + + def addMainCode(self): + self.codeText += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n log = None\\ + \\n try:\\ + \\n print(featureSelector(log))\\ + \\n except Exception as e:\\ + \\n if log:\\ + \\n log.error(e, exc_info=True)\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print(json.dumps(status))\\ + "" + def addValidateConfigCode(self, indent=1): + self.function_code += self.__addValidateConfigCode() + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + return self.function_code + '\\n' + self.codeText + + def addLocalFunctionsCode(self): + self.addValidateConfigCode() + + + def getPrefixModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas', 'mod_as':'pd'} + ] + return modules + + def addPrefixCode(self, indent=1): + self.codeText += ""\\ + \\n config = validateConfig()\\ + \\n targetPath = Path('aion')/config['targetPath']\\ + \\n if not targetPath.exists():\\ + \\n raise ValueError(f'targetPath does not exist')\\ + \\n meta_data_file = targetPath/IOFiles['metaData']\\ + \\n if meta_data_file.exists():\\ + \\n meta_data = read_json(meta_data_file)\\ + \\n else:\\ + \\n raise ValueError(f'Configuration file not found: {meta_data_file}')\\ + \\n log_file = targetPath/IOFiles['log']\\ + \\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\ + \\n dataLoc = targetPath/IOFiles['inputData']\\ + \\n if not dataLoc.exists():\\ + \\n return {'Status':'Failure','Message':'Data location does not exists.'}\\ + \\n\\ + \\n status = dict()\\ + \\n df = pd.read_csv(dataLoc)\\ + \\n prev_step_output = meta_data['transformation']"" + + def getSuffixModules(self): + modules = [{'module':'platform'} + ,{'module':'time'} + ] + return modules + + def addSuffixCode(self, indent=1): + self.codeText += ""\\n\\ + \\n csv_path = str(targetPath/IOFiles['outputData'])\\ + \\n write_data(df, csv_path,index=False)\\ + \\n status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'total_features':total_features, 'selected_features':selected_features}\\ + \\n log.info(f'Selected data saved at {csv_path}')\\ + \\n meta_data['featureengineering']['Status'] = status\\ + \\n write_json(meta_data, str(targetPath/IOFiles['metaData']))\\ + \\n log.info(f'output: {status}')\\ + \\n return json.dumps(status)"" + + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'sys'} + ,{'module':'json'} + ,{'module':'logging'} + ,{'module':'argparse'} + ] + return modules + + def add_variable(self, name, value, indent=1): + if isinstance(value, str): + self.codeText += f""\\n{self.tab * indent}{name} = '{value}'"" + else: + self.codeText += f""\\n{self.tab * indent}{name} = {value}"" + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + def modelBased(self, problem_type, indent=1): + if problem_type == 'classification': + self.codeText += f""\\n{self.tab * indent}selector = SelectFromModel(ExtraTreesClassifier())"" + self.codeText += f""\\n{self.tab * indent}selector()"" + if problem_type == 'regression': + self.codeText += f""\\n{self.tab * indent}pipe = Pipeline([('selector', SelectFromModel(Lasso()))])"" + self.codeText += f""\\n{self.tab * indent}selector.fit(df[train_features],df[target_feature])"" + self.codeText += f""\\n{self.tab * indent}selected_features = [x for x,y in zip(train_features, selector.get_support()) if y]"" + self.codeText += f""\\n{self.tab * indent}df = df[selected_features + [target_feature]]"" + + def featureReductionBased(self, reducer, n_components, indent=1): + if reducer == 'pca': + if n_components == 0: + self.codeText += f""\\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components='mle',svd_solver = 'full'))])"" + elif n_components < 1: + self.codeText += f""\\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components={n_components},svd_solver = 'full'))])"" + else: + self.codeText += f""\\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components=int({n_components})))])"" + self.codeText += ""pipe.fit_transform(df)"" + + def getPipe(self): + return self.pipe + + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.ml.core import * +from .utility import * + +def run_output_drift(config): + importer = importModule() + drifter = output_drift(missing = get_variable('fillna', False), word2num_features= get_variable('word2num_features', False), cat_encoder = get_variable('cat_encoder', False),target_encoder = get_variable('target_encoder', False),normalizer = get_variable('normalizer', False),text_profiler = get_variable('text_features', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False),problem_type=config['problem_type']) + function = global_function() + importer.addModule('sys') + importer.addModule('math') + importer.addModule('json') + importer.addModule('platform') + importer.addModule('joblib') + importer.addModule('mlflow') + importer.addModule('sklearn') + importer.addModule('numpy', mod_as='np') + importer.addModule('pandas', mod_as='pd') + importer.addModule('Path', mod_from='pathlib') + importer.addModule('InfluxDBClient', mod_from='influxdb') + function.add_function('readWrite') + code = file_header(config['modelName']+'_'+config['modelVersion']) + code += importer.getCode() + code += function.getCode() + drifter.generateCode() + code += drifter.getCode() + deploy_path = Path(config[""deploy_path""])/'MLaC'/'OutputDrift' + deploy_path.mkdir(parents=True, exist_ok=True) + py_file = deploy_path/""output_drift.py"" + with open(py_file, ""w"") as f: + f.write(code) + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + f.write(importer.getBaseModule()) + create_docker_file('output_drift', deploy_path) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +import shutil +from pathlib import Path +import json +from mlac.ml" +".core import * +from .utility import * +import tarfile + +output_file_map = { + 'text' : {'text' : 'text_profiler.pkl'}, + 'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'}, + 'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'}, + 'normalizer' : {'normalizer' : 'normalizer.pkl'} +} + +def add_common_imports(importer): + common_importes = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'argparse', 'mod_from': None, 'mod_as': None}, + {'module': 'platform', 'mod_from': None, 'mod_as': None } + ] + for mod in common_importes: + importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + +def add_text_dependency(): + return """"""nltk==3.6.3 +textblob==0.15.3 +spacy==3.1.3 +demoji==1.1.0 +bs4==0.0.1 +text_unidecode==1.3 +contractions==0.1.73 +"""""" + +def get_transformer_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""train_features"",""text_features"",""profiler"",""test_ratio""] #Bugid 13217 + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_transformer(config): + transformer = profiler() + importer = importModule() + function = global_function() + importModules(importer, transformer.getPrefixModules()) + importer.addModule('warnings') + transformer.addPrefixCode() + importer.addModule('train_test_split', mod_from='sklearn.model_selection') + if config[""problem_type""] == 'classification': + importer.addModule('LabelEncoder', mod_from='sklearn.preprocessing') + transformer.addInputFiles({'targetEncoder':'targetEncoder.pkl'}) + update_variable('target_encoder', True) + transformer.addStatement(""train_data, test_data = train_test_split(df,stratify=df[target_feature],test_size=config['test_ratio'])"",indent=2) #Bugid 13217 + transformer.addStatement(""profilerObj = profiler(xtrain=train_data, target=target_feature, encode_target=True, config=config['profiler'],log=log)"") #Bugid 13217 + else: + transformer.addStatement(""train_data, test_data = train_test_split(df,test_size=config['test_ratio'])"",indent=2) + transformer.addStatement(""profilerObj = profiler(xtrain=train_data, target=target_feature, config=config['profiler'],log=log)"") + + importModules(importer, transformer.getSuffixModules()) + importModules(importer, transformer.getMainCodeModules()) + transformer.addSuffixCode( config[""problem_type""] == 'classification') + transformer.addMainCode() + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'DataTransformation' + deploy_path.mkdir(parents=True, exist_ok=True) + generated_files = [] + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('transformer') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + # create the dataProfiler file + profiler_importer = importModule() + importer.addLocalModule('profiler', mod_from='dataProfiler') + profiler_obj = data_profiler(profiler_importer, True if config[""text_features""] else False) + code_text = profiler_obj.get_code() # import statement will be generated when profiler_obj.get_code is called. + # need to copy data profiler from AION code as code is splitted and merging code amnnually + # can add bugs. need a better way to find the imported module + #aion_transformer = Path(__file__).parent.parent.parent.parent/'transformations' + aion_utilities = Path(__file__).parent.parent.parent.parent/'utilities' #added for non encryption --Usnish + (deploy_path/'transformations').mkdir(parents=True, exist_ok=True) + if not (aion_utilities/'transformations'/'dataProfiler.py').exists(): + raise ValueError('Data profiler file removed from AION') + shutil.copy(aion_utilities/'transformations'/'dataProfiler.py',deploy_path/""dataProfiler.py"") + shutil.copy(aion_utilities/'transformations'/'data_profiler_functions.py',deploy_path/""transformations""/""data_profiler_functions.py"") + + + if (deploy_path/'text').exists(): + shutil.rmtree(deploy_path/'text') + + with tarfile.open(aion_utilities/'text.tar') as file: + file.extractall(deploy_path) + if (deploy_path/'utils').exists(): + shutil.rmtree(deploy_path/'utils') + with tarfile.open(aion_utilities / 'utils.tar') as file: + file.extractall(deploy_path) + + generated_files.append(""dataProfiler.py"") + generated_files.append(""transformations"") + generated_files.append(""text"") + generated_files.append(""utils"") + + code = file_header(usecase) + code += ""\\nimport os\\nos.path.abspath(os.path.join(__file__, os.pardir))\\n"" #chdir to import from current dir + code += importer.getCode() + code += '\\nwarnings.filterwarnings(""ignore"")\\n' + code += transformer.getInputOutputFiles() + code += function.getCode() + transformer.addLocalFunctionsCode() + code += transformer.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), profiler_importer]) + if config[""text_features""]: + req += add_text_dependency() + f.write(req) + generated_files.append(""requirements.txt"") + + config_file = deploy_path/""config.json"" + config_data = get_transformer_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('transformer', deploy_path,config['modelName'], generated_files,True if config[""text_features""] else False) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.ml.core import * +from .utility import * + +def get_register_params(config, models): + param_keys = [""modelVersion"",""problem_type""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + data['models'] = models + return data + +def run_register(config): + importer = importModule() + function = global_function() + registration = register(importer) + function.add_function('get_mlflow_uris') + models = get_variable('models_name') + smaller_is_better = get_variable('smaller_is_better', False) + registration.addClassCode(smaller_is_better) + registration.addLocalFunctionsCode(models) + registration.addPrefixCode() + registration.addMainCode(models) + importModules(importer, registration.getMainCodeModules()) + importer.addModule('warnings') + + generated_files = [] + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'ModelRegistry' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('register') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file required for creating a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = registration.getImportCode() + code += '\\nwarnings.filterwarnings(""ignore"")\\n' + code += registration.getInputOutputFiles() + code += function.getCode() + code += registration.getCode() + # create serving file + with open(deploy_path/""aionCode.py"", 'w') as f: + f.write(file_header(usecase) + code) + generated_files.append(""aionCode.py"") + + # create requirements file + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + # create config file + with open (deploy_path/""config.json"", ""w"") as f: + json.dump(get_register_params(config, models), f, indent=4) + generated_files.append(""config.json"") + + # create docker file + create_docker_file('register', deploy_path,config['modelName'], generated_files) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +import datetime +from pathlib import Path + +variables = {} + +def init_variables(): + global variables + variables = {} +def update_variable(name, value): + variables[name] = value + +def get_variable(name, default=None): + return variables.get(name, default) + +def append_variable(name, value): + data = get_variable(name) + if not data: + update_variable(name, [value]) + elif not isinstance(data, list): + update_variable(name, [data, value]) + else: + data.append(value) + update_variable(name, data) + +def addDropFeature(feature, features_list, coder, indent=1): + coder.addStatement(f'if {feature} in {features_list}:', indent=indent) + coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1) + +def importModules(importer, modules_list): + for module in modules_list: + mod_from = module.get('mod_from',None) + mod_as = module.get('mod_as',None) + importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) + +def file_header(use_case, module_name=None): + time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ') + text = ""#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n"" + return text + f""'''\\nThis file is automatically generated by AION for {use_case} usecase.\\nFile generation time: {time_str}\\n'''"" + +def get_module_mapping(module): + mapping = { + ""LogisticRegression"": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'} + ,""GaussianNB"": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'} + ,""DecisionTreeClassifier"": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'} + ,""SVC"": {'module':'SVC', 'mod_from':'sklearn.svm'} + ,""KNeighborsClassifier"": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'} + ,""GradientBoostingClassifier"": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'} + ,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'} + ,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'} + ,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'} + ,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'} + + ,""LinearRegression"": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'} + ,""Lasso"": {'module':'Lasso', 'mod_from':'sklearn.linear_model'} + ,""Ridge"": {'module':'Ridge', 'mod_from':'sklearn.linear_model'} + ,""DecisionTreeRegressor"": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'} + ,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'} + ,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'} + ,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'} + ,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'} + } + return mapping.get(module, None) + +def create_docker_file(name, path,usecasename,files=[],text_feature=False): + text = """" + if name == 'load_data': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL """ +"usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'transformer': + text='FROM python:3.8-slim-buster\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='''RUN \\ +''' + text+=''' pip install --no-cache-dir -r requirements.txt\\ +''' + if text_feature: + text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\ +''' + text+='\\n' + elif name == 'selector': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'train': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + text+='COPY requirements.txt requirements.txt' + text+='\\n' + text+='COPY config.json config.json' + text+='\\n' + text+='COPY aionCode.py aionCode.py' + text+='\\n' + text+='COPY utility.py utility.py' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'register': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'Prediction': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='''RUN \\ +''' + text+='''pip install --no-cache-dir -r requirements.txt\\ +''' + if text_feature: + text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\ +''' + text+='\\n' + text+='ENTRYPOINT [""python"", ""aionCode.py"",""-ip"",""0.0.0.0"",""-pn"",""8094""]\\n' + elif name == 'input_drift': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + file_name = Path(path)/'Dockerfile' + with open(file_name, 'w') as f: + f.write(text) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from .load_data import run_loader +from .transformer import run_transformer +from .selector import run_selector +from .trainer import run_trainer +from .register import run_register +from .deploy import run_deploy +from .drift_analysis import run_drift_analysis + + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +import shutil +from pathlib import Path +import json +from mlac.ml.core import * +from .utility import * +import tarfile +def add_text_dependency(): + return """"""nltk==3.6.3 +textblob==0.15.3 +spacy==3.1.3 +demoji==1.1.0 +bs4==0.0.1 +text_unidecode==1.3 +contractions==0.1.73 +"""""" + +def get_deploy_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + data['ipAddress'] = '127.0.0.1' + data['portNo'] = '8094' + return data + +def import_trainer_module(importer): + non_sklearn_modules = get_variable('non_sklearn_modules') + if non_sklearn_modules: + for mod in non_sklearn_modules: + module = get_module_mapping(mod) + mod_from = module.get('mod_from',None) + mod_as = module.get('mod_as',None) + importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) + +imported_modules = [ + {'module': 'sys', 'mod_from': None, 'mod_as': None}, + {'module': 'math', 'mod_from': None, 'mod_as': None}, + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'shutil', 'mod_from': None, 'mod_as': None}, + {'module': 'mlflow', 'mod_from': None, 'mod_as': None}, + {'module': 'sklearn', 'mod_from': None, 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'argparse', 'mod_from': None, 'mod_as': None}, + {'module': 'platform', 'mod_from': None, 'mod_as': None} + ] + +def run_deploy(config): + generated_files = [] + importer = importModule() + deployer = deploy(target_encoder = get_variable('target_encoder', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False)) + + function = global_function() + importModules(importer, imported_modules) + + if get_variable('cat_encoder', False): + importer.addModule('category_encoders') + import_trainer_module(importer) + if get_variable('word2num_features'): + function.add_function('s2n', importer) + if get_variable('text_features'): + importer.addLocalModule('textProfiler', mod_from='text.textProfiler') + + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'ModelServing' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('Prediction') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create the production data reader file + importer.addLocalModule('*', mod_from='data_reader') + reader_obj = data_reader(['sqlite','influx']) + with open(deploy_path/""data_reader.py"", 'w') as f: + f.write(file_header(usecase) + reader_obj.get_code()) + generated_files.append(""data_reader.py"") + + # need to copy data profiler from AION code as code is splitted and merging code amnnually + # can add bugs + aion_utilities = Path(__file__).parent.parent.parent.parent / 'utilities' + + with tarfile.open(aion_utilities / 'text.tar') as file: + file.extractall(deploy_path) + if (deploy_path / 'utils').exists(): + shutil.rmtree(deploy_path / 'utils') + with tarfile.open(aion_utilities / 'utils.tar') as file: + file.extractall(deploy_path ) + generated_files.append(""text"") + generated_files.append(""utils"") + + # create empty init file required for creating a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + function.add_function('get_mlflow_uris') + code = file_header(usecase) + code += importer.getCode() + code += deployer.getInputOutputFiles() + code += function.getCode() + code += deployer.getCode() + + # create prediction file + with open(deploy_path/""predict.py"", 'w') as f: + f.write(code) + generated_files.append(""predict.py"") + + # create groundtruth file + with open(deploy_path/""groundtruth.py"", 'w') as f: + f.write(file_header(usecase) + deployer.getGroundtruthCode()) + generated_files.append(""groundtruth.py"") + + # create create service file + with open(deploy_path/""aionCode.py"", 'w') as f: + f.write(file_header(usecase) + deployer.getServiceCode()) + generated_files.append(""aionCode.py"") + importer.addModule('seaborn') + # create requirements file + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()]) + if config[""text_features""]: + req += add_text_dependency() + f.write(req) + generated_files.append(""requirements.txt"") + + # create config file + config_file = deploy_path/""config.json"" + config_data = get_deploy_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + # create docker file + create_docker_file('Prediction', deploy_path,config['modelName'], generated_files, True if config[""text_features""] else False) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.ml.core import * +from .utility import * + +def get_model_name(algo, method): + if method == 'modelBased': + return algo + '_' + 'MLBased' + if method == 'statisticalBased': + return algo + '_' + 'StatisticsBased' + else: + return algo + + +def get_training_params(config, algo): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""train_features"",""scoring_criteria"",""test_ratio"",""optimization_param""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['algorithms'] = {algo: config['algorithms'][algo]} + data['targetPath'] = config['modelName'] + return data + +def addImporterLearner(model, importer): + module = get_module_mapping(model) + mod_from = module.get('mod_from',None) + mod_as = module.get('mod_as',None) + importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) + if not get_variable('non_sklearn_modules'): + update_variable('non_sklearn_modules', []) + if 'sklearn' not in mod_from: + modules = get_variable('non_sklearn_modules') + modules.append(model) + update_variable('non_sklearn_modules', modules) + +def addEvaluator(scorer_type, optimizer,trainer, importer): + trainer.addStatement(""if not X_test.empty:"") + if optimizer == 'genetic': + trainer.addStatement('features = [x for i,x in enumerate(features) if grid.support_[" +"i]]',indent=2) + trainer.addStatement('y_pred = estimator.predict(X_test[features])',indent=2) + if scorer_type == 'accuracy': + importer.addModule('accuracy_score', mod_from='sklearn.metrics') + trainer.addStatement(f""test_score = round(accuracy_score(y_test,y_pred),2) * 100"",indent=2) + importer.addModule('confusion_matrix', mod_from='sklearn.metrics') + trainer.addStatement(""log.info('Confusion Matrix:')"",indent=2) + trainer.addStatement(""log.info('\\\\n' + pd.DataFrame(confusion_matrix(y_test,y_pred)).to_string())"",indent=2) + elif scorer_type == 'recall': + importer.addModule('recall_score', mod_from='sklearn.metrics') + trainer.addStatement(f""test_score = round(recall_score(y_test,y_pred,average='macro'),2) * 100"",indent=2) + importer.addModule('confusion_matrix', mod_from='sklearn.metrics') + trainer.addStatement(f""log.info('Confusion Matrix:\\\\n')"",indent=2) + trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2) + elif scorer_type == 'precision': + importer.addModule('precision_score', mod_from='sklearn.metrics') + trainer.addStatement(f""test_score = round(precision_score(y_test,y_pred,average='macro'),2) * 100"",indent=2) + importer.addModule('confusion_matrix', mod_from='sklearn.metrics') + trainer.addStatement(f""log.info('Confusion Matrix:\\\\n')"",indent=2) + trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2) + elif scorer_type == 'f1_score': + importer.addModule('f1_score', mod_from='sklearn.metrics') + trainer.addStatement(f""test_score = round(f1_score(y_test,y_pred,average='macro'),2) * 100"",indent=2) + importer.addModule('confusion_matrix', mod_from='sklearn.metrics') + trainer.addStatement(f""log.info('Confusion Matrix:\\\\n')"",indent=2) + trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2) + elif scorer_type == 'roc_auc': + importer.addModule('roc_auc_score', mod_from='sklearn.metrics') + trainer.addStatement(""try:"") + trainer.addStatement(f""test_score = round(roc_auc_score(y_test,y_pred),2) * 100"", indent=3) + importer.addModule('confusion_matrix', mod_from='sklearn.metrics') + trainer.addStatement(f""log.info('Confusion Matrix:\\\\n')"",indent=3) + trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=3) + trainer.addStatement(""except:"") + trainer.addStatement(""try:"",indent=3) + trainer.addStatement(""actual = pd.get_dummies(y_test)"",indent=4) + trainer.addStatement(""y_pred = pd.get_dummies(y_pred)"",indent=4) + trainer.addStatement(f""test_score = round(roc_auc_score(y_test,y_pred,average='weighted', multi_class='ovr'),2) * 100"", indent=3) + trainer.addStatement(f""log.info('Confusion Matrix:\\\\n')"",indent=4) + trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=4) + trainer.addStatement(""except:"",indent=3) + trainer.addStatement(f""test_score = 0.0"", indent=4) + elif scorer_type == 'neg_mean_squared_error' or scorer_type == 'mse': + importer.addModule('mean_squared_error', mod_from='sklearn.metrics') + trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred),2)',indent=2) + update_variable('smaller_is_better', True) + elif scorer_type == 'neg_root_mean_squared_error' or scorer_type == 'rmse': + importer.addModule('mean_squared_error', mod_from='sklearn.metrics') + trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred,squared=False),2)',indent=2) + update_variable('smaller_is_better', True) + elif scorer_type == 'neg_mean_absolute_error' or scorer_type == 'mae': + importer.addModule('mean_absolute_error', mod_from='sklearn.metrics') + trainer.addStatement(f'test_score = round(mean_absolute_error(y_test,y_pred),2)',indent=2) + update_variable('smaller_is_better', True) + elif scorer_type == 'r2': + importer.addModule('r2_score', mod_from='sklearn.metrics') + trainer.addStatement(f'test_score = round(r2_score(y_test,y_pred),2)',indent=2) +def update_search_space(algo, config): + search_space = [] + algoritms = config[""algorithms""] + model = algo + params = algoritms[model] + model_dict = {model:get_module_mapping(model)['mod_from']} + d = {'algo': model_dict} + d['param'] = params + search_space.append(d) + config['search_space'] = search_space + +def get_optimization(optimization, importer, function=None): + if optimization == 'grid': + importer.addModule('GridSearchCV', mod_from='sklearn.model_selection') + optimization = 'GridSearchCV' + elif optimization == 'random': + importer.addModule('RandomizedSearchCV', mod_from='sklearn.model_selection') + optimization = 'RandomizedSearchCV' + elif optimization == 'genetic': + importer.addModule('GeneticSelectionCV', mod_from='genetic_selection') + optimization = 'GeneticSelectionCV' + elif optimization == 'bayesopt': + optimization = 'BayesSearchCV' + function.add_function(optimization,importer) + return optimization + +def scoring_criteria_reg(score_param): + scorer_mapping = { + 'mse':'neg_mean_squared_error', + 'rmse':'neg_root_mean_squared_error', + 'mae':'neg_mean_absolute_error', + 'r2':'r2' + } + return scorer_mapping.get(score_param, 'neg_mean_squared_error') + +def addBalancing(balancingMethod, importer, code): + if balancingMethod == 'oversample': + importer.addModule('SMOTE', mod_from='imblearn.over_sampling') + code.addStatement(""\\n # data balancing"") + code.addStatement(""X_train, y_train = SMOTE(sampling_strategy='auto', k_neighbors=1, random_state=100).fit_resample(X_train, y_train)"") + if balancingMethod == 'undersample': + importer.addModule('TomekLinks', mod_from='imblearn.under_sampling') + code.addStatement(""\\n # data balancing"") + code.addStatement(""X_train, y_train = TomekLinks().fit_resample(X_train, y_train)"") + +def run_trainer(base_config): + base_trainer = learner() + base_importer = importModule() + function = global_function() + base_importer.addModule('joblib') + base_importer.addModule('warnings') + base_importer.addModule('argparse') + base_importer.addModule('pandas', mod_as='pd') + base_importer.addModule('Path', mod_from='pathlib') + function.add_function('get_mlflow_uris') + function.add_function('mlflow_create_experiment') + importModules(base_importer,base_trainer.getPrefixModules()) + base_trainer.addPrefixCode() + if base_config[""algorithms""]: + base_trainer.add_train_test_split('train_features', 'target_feature', ""config['test_ratio']"") + if base_config[""problem_type""] == 'classification': + if base_config[""balancingMethod""]: + addBalancing(base_config[""balancingMethod""],base_importer,base_trainer) + base_trainer.addStatement(f""log.info('Data balancing done')"") + base_trainer.addStatement(""\\n #select scorer"") + if base_config[""problem_type""] == 'classification': + function.add_function('scoring_criteria', base_importer) + base_trainer.addStatement(""scorer = scoring_criteria(config['scoring_criteria'],config['problem_type'], df[target_feature].nunique())"") + else: + base_config['scoring_criteria'] = scoring_criteria_reg(base_config['scoring_criteria']) + base_trainer.addStatement(f""scorer = config['scoring_criteria']"") + base_trainer.addStatement(f""log.info('Scoring criteria: {base_config['scoring_criteria']}')"") + feature_selector = [] + if base_config['feature_reducer']: + feature_selector.append(base_config['feature_reducer']) + elif base_config['feature_selector']: + feature_selector = base_config['feature_selector'] + for algo in base_config[""algorithms""].keys(): + for method in feature_selector: + trainer = learner() + importer = importModule() + trainer.copyCode(base_trainer) + importer.copyCode(base_importer) + config = base_config + usecase = config['modelName']+'_'+config['modelVersion'] + addImporterLearner(algo, importer) + trainer.addStatement(""\\n #Training model"") + trainer.addStatement(f""log.info('Training {algo} for {method}')"") + trainer.add_model_fit(algo, get_optimization(config[""optimization""], importer, function), method, importer) + trainer.addStatement(""\\n #model evaluation"") + addEvaluator(config['scoring_criteria'],config[""optimization""], trainer, importer) + function.add_function('mlflowSetPath') + function.add_function('logMlflow') + importModules(importer, trainer.getSuffixModules()) + importModules(importer, trainer.getMainCodeModules()) + if base_config[""problem_type""] == 'classification': + function.add_function('classification_metrices', importer) + trainer.addStatement(""metrices = get_classification_metrices(y_test,y_pred)"",indent=2) + trainer.add_100_trainsize_code() + trainer.addStatement(""metrices.update({'train_score': train_score, 'test_score':test_score})"") + else: + function.add_function('regression_metrices', importer) + trainer.addStatement(""metrices = get_regression_metrices(y_test,y_pred)"",indent=2) + trainer.add_100_trainsize_code() + trainer.addStatement(""metrices.update({'train_score': train_score, 'test_score':test_score})"") + trainer.addSuffixCode() + trainer.addMainCode() + + model_name = get_model_name(algo,method) + deploy_path = Path(config[""deploy_path""])/'MLaC'/('ModelTraining'+'_' + model_name) + deploy_path.mkdir(parents=True, exist_ok=True) + generated_files = [] + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('train') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = importer.getCode() + code += 'warnings.filterwarnings(""ignore"")\\n' + code += f""\\nmodel_name = '{model_name}'\\n"" + append_variable('models_name',model_name) + out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','performance':f'{model_name}_performance.json','metaDataOutput':f'{model_name}_modelMetaData.json'} + trainer.addOutputFiles(out_files) + code += trainer.getInputOutputFiles() + code += function.getCode() + trainer.addLocalFunctionsCode() + code += trainer.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + with open (deploy_path/""config.json"", ""w"") as f: + json.dump(get_training_params(config, algo), f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('train', deploy_path,config['modelName'], generated_files) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.ml.core import * +from .utility import * + + +def run_input_drift(config): + importer = importModule() + drifter = input_drift() + importer.addModule('sys') + importer.addModule('json') + importer.addModule('mlflow') + importer.addModule('platform') + importer.addModule('warnings') + importer.addModule('numpy', mod_as='np') + importer.addModule('pandas', mod_as='pd') + " +"importer.addModule('stats', mod_from='scipy', mod_as='st') + importer.addModule('Path', mod_from='pathlib') + code = file_header(config['modelName']+'_'+config['modelVersion']) + code += importer.getCode() + drifter.generateCode() + code += drifter.getCode() + deploy_path = Path(config[""deploy_path""])/'MLaC'/'InputDrift' + deploy_path.mkdir(parents=True, exist_ok=True) + py_file = deploy_path/""input_drift.py"" + with open(py_file, ""w"") as f: + f.write(code) + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + f.write(importer.getBaseModule()) + create_docker_file('input_drift', deploy_path) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +import platform +from mlac.ml.core import * +from .utility import * + +imported_modules = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'argparse', 'mod_from': None, 'mod_as': None}, + {'module': 'platform', 'mod_from': None, 'mod_as': None } + ] + +def get_load_data_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""selected_features""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_loader(config): + generated_files = [] + importer = importModule() + loader = tabularDataReader() + importModules(importer, imported_modules) + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'DataIngestion' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('load_data') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + # create the production data reader file + importer.addLocalModule('dataReader', mod_from='data_reader') + readers = ['sqlite','influx'] + if 's3' in config.keys(): + readers.append('s3') + reader_obj = data_reader(readers) + with open(deploy_path/""data_reader.py"", 'w') as f: + f.write(file_header(usecase) + reader_obj.get_code()) + generated_files.append(""data_reader.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = file_header(usecase) + code += importer.getCode() + code += loader.getInputOutputFiles() + code += loader.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + config_file = deploy_path/""config.json"" + config_data = get_load_data_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('load_data', deploy_path,config['modelName'],generated_files) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.ml.core import * +from .utility import * + +imported_modules = [ + {'module': 'sys', 'mod_from': None, 'mod_as': None}, + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'math', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'mlflow', 'mod_from': None, 'mod_as': None}, + {'module': 'sklearn', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'argparse', 'mod_from': None, 'mod_as': None}, + {'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'}, + {'module': 'platform', 'mod_from': None, 'mod_as': None } + ] + +def get_drift_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""selected_features"",""scoring_criteria"",""s3""] + data = {key:value for (key,value) in config.items() if key in param_keys} + usecase = config['modelName'] + data['targetPath'] = usecase + if config['dataLocation'] != '': + data['inputUri'] = config['dataLocation'] + else: + data['inputUri'] = '' + data['prod_db_type'] = config.get('prod_db_type', 'sqlite') + data['db_config'] = config.get('db_config', {}) + data['mlflow_config'] = config.get('mlflow_config', {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}) + return data + +def run_drift_analysis(config): + init_variables() + importer = importModule() + function = global_function() + drifter = drift() + importModules(importer, imported_modules) + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'ModelMonitoring' + deploy_path.mkdir(parents=True, exist_ok=True) + + generated_files = [] + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('drift') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + # create the production data reader file + importer.addLocalModule('dataReader', mod_from='data_reader') + readers = ['sqlite','influx'] + if 's3' in config.keys(): + readers.append('s3') + reader_obj = data_reader(readers) + with open(deploy_path/""data_reader.py"", 'w') as f: + f.write(file_header(usecase) + reader_obj.get_code()) + generated_files.append(""data_reader.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + importer.addLocalModule('inputdrift', mod_from='input_drift') + + code = file_header(usecase) + code += importer.getCode() + code += drifter.getInputOutputFiles() + code += function.getCode() + code += drifter.get_main_drift_code(config['problem_type'], get_variable('smaller_is_better', False)) + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + input_drift_importer = importModule() + importModules(input_drift_importer, drifter.get_input_drift_import_modules()) + code = file_header(usecase) + code += input_drift_importer.getCode() + code += drifter.get_input_drift_code() + with open(deploy_path/""input_drift.py"", ""w"") as f: + f.write(code) + generated_files.append(""input_drift.py"") + + with open (deploy_path/""config.json"", ""w"") as f: + json.dump(get_drift_params(config), f, indent=4) + generated_files.append(""config.json"") + + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + f.write(importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer(), input_drift_importer])) + generated_files.append(""requirements.txt"") + create_docker_file('input_drift', deploy_path,config['modelName'], generated_files) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +import platform +from mlac.ml.core import * +from .utility import * + +output_file_map = { + 'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'} +} + +def get_selector_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""train_features"",""cat_features"",""n_components""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_selector(config): + select = selector() + importer = importModule() + function = global_function() + importModules(importer,select.getPrefixModules()) + select.addPrefixCode() + if config[""target_feature""] in config[""train_features""]: + config[""train_features""].remove(config[""target_feature""]) + select.addStatement(""train_features = df.columns.tolist()"") + select.addStatement(""target_feature = config['target_feature']"") + select.addStatement(""train_features.remove(target_feature)"") + select.addStatement(""cat_features = prev_step_output['cat_features']"") + select.add_variable('total_features',[]) + select.addStatement(""log.log_dataframe(df)"") + methods = config.get(""feature_selector"", None) + feature_reducer = config.get(""feature_reducer"", None) + select.addStatement(""selected_features = {}"") + select.addStatement(""meta_data['featureengineering']= {}"") + if feature_reducer: + update_variable('feature_reducer', True) + select.addStatement(f""log.info('Running dimensionality reduction technique( {feature_reducer})')"") + if feature_reducer == 'pca': + importer.addModule('PCA', mod_from='sklearn.decomposition') + if int(config[""n_components""]) == 0: + select.addStatement(""dimension_reducer = PCA(n_components='mle',svd_solver = 'full')"") + elif int(config[""n_components""]) < 1: + select.addStatement(""dimension_reducer = PCA(n_components=config['n_components'],svd_solver = 'full')"") + else: + select.addStatement(""dimension_reducer = PCA(n_components=config['n_components'])"") + elif feature_reducer == 'svd': + importer.addModule('TruncatedSVD', mod_from='sklearn.decomposition') + if config[""n_components""] < 2: + config[""n_components""] = 2 + select.addStatement(""dimension_reducer = TruncatedSVD(n_components=config['n_components'], n_iter=7, random_state=42)"") + elif feature_reducer == 'factoranalysis': + importer.addModule('FactorAnalysis', mod_from='sklearn.decomposition') + if config[""n_components""] == 0: + select.addStatement(""dimension_reducer = FactorAnalysis()"") + else: + select.addStatement(""dimension_reducer = FactorAnalysis(n_components=config['n_components'])"") + elif feature_reducer == 'ica': + importer.addModule('FastICA', mod_from='sklearn.decomposition') + if config[""n_components""] == 0: + select.addStatement(""dimension_reducer = FastICA()"") + else: + select.addStatement(""dimension_reducer = FastICA(n_components=config['n_components'])"") + select.addStatement(""pca_array = dimension_reducer.fit_transform(df[train_features])"") + select.addStatement(""pca_columns = ['pca_'+str(e) for e in list(range(pca_array.shape[1]))]"") + select.addStatement(""scaledDF = pd.DataFrame(pca_array, columns=pca_columns)"") + select.addStatement(""scaledDF[target_feature] = df[target_feature]"") + select.addStatement(""df = scaledDF"") + select.addStatement(f""selected_features['{feature_reducer}'] = pca_columns"") + select.addStatement(""total_features = df.columns.tolist()"") + select.addStatement(""meta_data['featureengineering']['feature_reducer']= {}"") + select.addStatement(""reducer_file_name = str(targetPath/IOFiles['feature_reducer'])"") + importer.addModule('joblib') + select.addStatement(""joblib.dump(dimension_reducer, reducer" +"_file_name)"") + select.addStatement(""meta_data['featureengineering']['feature_reducer']['file']= IOFiles['feature_reducer']"") + select.addStatement(""meta_data['featureengineering']['feature_reducer']['features']= train_features"") + select.addOutputFiles(output_file_map['feature_reducer']) + + elif methods: + if 'allFeatures' in methods: + addDropFeature('target_feature', 'train_features', select) + select.addStatement(""selected_features['allFeatures'] = train_features"") + if 'modelBased' in methods: + select.addStatement(f""log.info('Model Based Correlation Analysis Start')"") + select.addStatement(""model_based_feat = []"") + importer.addModule('numpy', mod_as='np') + importer.addModule('RFE', mod_from='sklearn.feature_selection') + importer.addModule('MinMaxScaler', mod_from='sklearn.preprocessing') + if config[""problem_type""] == 'classification': + importer.addModule('ExtraTreesClassifier', mod_from='sklearn.ensemble') + select.addStatement(""estimator = ExtraTreesClassifier(n_estimators=100)"") + else: + importer.addModule('Lasso', mod_from='sklearn.linear_model') + select.addStatement(""estimator = Lasso()"") + select.addStatement(""estimator.fit(df[train_features],df[target_feature])"") + select.addStatement(""rfe = RFE(estimator, n_features_to_select=1, verbose =0 )"") + select.addStatement(""rfe.fit(df[train_features],df[target_feature])"") + select.addStatement(""ranks = MinMaxScaler().fit_transform(-1*np.array([list(map(float, rfe.ranking_))]).T).T[0]"") + select.addStatement(""ranks = list(map(lambda x: round(x,2), ranks))"") + select.addStatement(""for item, rank in zip(df.columns,ranks):"") + select.addStatement(""if rank > 0.30:"", indent=2) + select.addStatement(""model_based_feat.append(item)"", indent=3) + addDropFeature('target_feature', 'model_based_feat', select) + select.addStatement(""selected_features['modelBased'] = model_based_feat"") + select.addStatement(f""log.info(f'Highly Correlated Features : {{model_based_feat}}')"") + if 'statisticalBased' in methods: + select.addStatement(f""log.info('Statistical Based Correlation Analysis Start')"") + function.add_function('start_reducer',importer) + select.addStatement(f""features = start_reducer(df, target_feature, {config['corr_threshold']},{config['var_threshold']})"") + select.addStatement(""train_features = [x for x in features if x in train_features]"") + select.addStatement(""cat_features = [x for x in cat_features if x in features]"") + select.addStatement(""numeric_features = df[features].select_dtypes('number').columns.tolist()"") + if config[""problem_type""] == 'classification': + function.add_function('feature_importance_class') + select.addStatement(f""statistics_based_feat = feature_importance_class(df[features], numeric_features, cat_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})"") + else: + function.add_function('feature_importance_reg') + select.addStatement(f""statistics_based_feat = feature_importance_reg(df[features], numeric_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})"") + addDropFeature('target_feature', 'statistics_based_feat', select) + select.addStatement(""selected_features['statisticalBased'] = statistics_based_feat"") + select.addStatement(f""log.info('Highly Correlated Features : {{statistics_based_feat}}')"") + select.addStatement(""total_features = list(set([x for y in selected_features.values() for x in y] + [target_feature]))"") + select.addStatement(f""df = df[total_features]"") + select.addStatement(""log.log_dataframe(df)"") + select.addSuffixCode() + importModules(importer, select.getSuffixModules()) + importModules(importer, select.getMainCodeModules()) + select.addMainCode() + + generated_files = [] + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'FeatureEngineering' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('selector') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = file_header(usecase) + code += importer.getCode() + code += select.getInputOutputFiles() + code += function.getCode() + select.addLocalFunctionsCode() + code += select.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + config_file = deploy_path/""config.json"" + config_data = get_selector_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('selector', deploy_path,config['modelName'], generated_files) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from .imports import importModule + +supported_reader = ['sqlite', 'influx','s3'] + + + +functions_code = { + 'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""""" + +class dataReader(): + + def get_reader(self, reader_type, target_path=None, config=None): + if reader_type == 'sqlite': + return sqlite_writer(target_path=target_path) + elif reader_type == 'influx': + return Influx_writer(config=config) + elif reader_type == 'gcs': + return gcs(config=config) + elif reader_type == 'azure': + return azure(config=config) + elif reader_type == 's3': + return s3bucket(config=config) + else: + raise ValueError(reader_type) +"""""" + }, + 'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':""""""\\n\\ +class sqlite_writer(): + def __init__(self, target_path): + self.target_path = Path(target_path) + database_file = self.target_path.stem + '.db' + self.db = sqlite_db(self.target_path, database_file) + + def file_exists(self, file): + if file: + return self.db.table_exists(file) + else: + return False + + def read(self, file): + return self.db.read(file) + + def write(self, data, file): + self.db.write(data, file) + + def close(self): + self.db.close() + +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + '.db' + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + self.tables = [] + + def table_exists(self, name): + if name in self.tables: + return True + elif name: + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + if len(listOfTables) > 0 : + self.tables.append(name) + return True + return False + + def read(self, table_name): + return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + + def write(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def delete(self, name): + pass + + def close(self): + self.conn.close() + + """""" + }, + 'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':""""""\\n\\ +class Influx_writer(): + + def __init__(self, config): + self.db = influx_db(config) + + def file_exists(self, file): + if file: + return self.db.table_exists(file) + else: + return False + + def read(self, file): + query = ""SELECT * FROM {}"".format(file) + if 'read_time' in self.db_config.keys() and self.db_config['read_time']: + query += f"" time > now() - {self.db_config['read_time']}"" + return self.db.read(query) + + def write(self, data, file): + self.db.write(data, file) + + def close(self): + pass + + +class influx_db(): + + def __init__(self, config): + self.host = config['host'] + self.port = config['port'] + self.user = config.get('user', None) + self.password = config.get('password', None) + self.token = config.get('token', None) + self.database = config['database'] + self.measurement = config['measurement'] + self.tags = config['tags'] + self.client = self.get_client() + + def table_exists(self, name): + query = f""SHOW MEASUREMENTS ON {self.database}"" + result = self.client(query) + for measurement in result['measurements']: + if measurement['name'] == name: + return True + return False + + def read(self, query)->pd.DataFrame: + cursor = self.client.query(query) + points = cursor.get_points() + my_list=list(points) + df=pd.DataFrame(my_list) + return df + + def get_client(self): + headers = None + if self.token: + headers={""Authorization"": self.token} + client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers) + databases = client.get_list_database() + databases = [x['name'] for x in databases] + if self.database not in databases: + client.create_database(self.database) + return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers) + + def write(self,data, measurement=None): + if isinstance(data, pd.DataFrame): + sorted_col = data.columns.tolist() + sorted_col.sort() + data = data[sorted_col] + data = data.to_dict(orient='records') + if not measurement: + measurement = self.measurement + for row in data: + if 'time' in row.keys(): + p = '%Y-%m-%dT%H:%M:%S.%fZ' + time_str = datetime.strptime(row['time'], p) + del row['time'] + else: + time_str = None + if 'model_ver' in row.keys(): + self.tags['model_ver']= row['model_ver'] + del row['model_ver'] + json_body = [{ + 'measurement': measurement, + 'time': time_str, + 'tags': self.tags, + 'fields': row + }] + self.client.write_points(json_body) + + def delete(self, name): + pass + + def close(self): + self.client.close() +"""""" +}, + 's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':""""""\\n\\ +class s3bucket(): + + def __init__(self, config={}): + if 's3' in config.keys(): + config = config['s3'] + aws_access_key_id = config.get('aws_access_key_id','') +" +" aws_secret_access_key = config.get('aws_secret_access_key','') + bucket_name = config.get('bucket_name','') + if not aws_access_key_id: + raise ValueError('aws_access_key_id can not be empty') + if not aws_secret_access_key: + raise ValueError('aws_secret_access_key can not be empty') + self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key)) + self.bucket_name = bucket_name + + def read(self, file_name): + try: + response = self.client.get_object(Bucket=self.bucket_name, Key=file_name) + return pd.read_csv(response['Body']) + except ClientError as ex: + if ex.response['Error']['Code'] == 'NoSuchBucket': + raise ValueError(f""Bucket '{self.bucket_name}' not found in aws s3 storage"") + elif ex.response['Error']['Code'] == 'NoSuchKey': + raise ValueError(f""File '{file_name}' not found in s3 bucket '{self.bucket_name}'"") + else: + raise + + """""" +}, + 'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':""""""\\n\\ +def azure(): + + def __init__(self,config={}): + if 'azure' in config.keys(): + config = config['azure'] + account_name = config.get('account_name','') + account_key = config.get('account_key','') + container_name = config.get('container_name','') + if not account_name: + raise ValueError('Account name can not be empty') + if not account_key: + raise ValueError('Account key can not be empty') + if not container_name: + raise ValueError('Container name can not be empty') + service_client = DataLakeServiceClient(account_url=""{}://{}.dfs.core.windows.net"".format(""https"", account_name), credential=account_key) + self.file_system_client = service_client.get_file_system_client(container_name) + + def read(self, directory_name): + root_dir = str(directory_name) + file_paths = self.file_system_client.get_paths(path=root_dir) + main_df = pd.DataFrame() + for path in file_paths: + if not path.is_directory: + file_client = file_system_client.get_file_client(path.name) + file_ext = Path(path.name).suffix + if file_ext in ["".csv"", "".tsv""]: + with open(csv_local, ""wb"") as my_file: + file_client.download_file().readinto(my_file) + with open(csv_local, 'r') as file: + data = file.read() + row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t']) + processed_df = pd.read_csv(csv_local, sep=row_delimiter) + elif file_ext == "".parquet"": + stream = io.BytesIO() + file_client.download_file().readinto(stream) + processed_df = pd.read_parquet(stream, engine='pyarrow') + elif file_ext == "".avro"": + with open(avro_local, ""wb"") as my_file: + file_client.download_file().readinto(my_file) + processed_df = pdx.read_avro(avro_local) + if main_df.empty: + main_df = pd.DataFrame(processed_df) + else: + main_df = main_df.append(processed_df, ignore_index=True) + return main_df + + """""" + }, + 'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':""""""\\n\\ +class gcs(): + + def __init__(self, config={}): + if 'gcs' in config.keys(): + config = config['gcs'] + account_key = config.get('account_key','') + bucket_name = config.get('bucket_name','') + if not account_key: + raise ValueError('Account key can not be empty') + if not bucket_name: + raise ValueError('bucket name can not be empty') + storage_client = storage.Client.from_service_account_json(account_key) + self.bucket = storage_client.get_bucket(bucket_name) + + def read(self, bucket_name, file_name): + data = self.bucket.blob(file_name).download_as_text() + return pd.read_csv(data, encoding = 'utf-8', sep = ',') + """""" + } +} + +class data_reader(): + + def __init__(self, reader_type=[]): + self.supported_readers = supported_reader + if isinstance(reader_type, str): + self.readers = [reader_type] + elif not reader_type: + self.readers = self.supported_readers + else: + self.readers = reader_type + unsupported_reader = [ x for x in self.readers if x not in self.supported_readers] + if unsupported_reader: + raise ValueError(f""reader type '{unsupported_reader}' is not supported\\nSupported readers are {self.supported_readers}"") + self.codeText = """" + self.importer = importModule() + + def get_reader_code(self, readers): + reader_code = { + 'sqlite': 'return sqlite_writer(target_path=target_path)', + 'influx': 'return Influx_writer(config=config)', + 'gcs': 'return gcs(config=config)', + 'azure': 'return azure(config=config)', + 's3': 'return s3bucket(config=config)' + } + code = ""\\n\\ndef dataReader(reader_type, target_path=None, config=None):\\n"" + for i, reader in enumerate(readers): + if not i: + code += f"" if reader_type == '{reader}':\\n"" + else: + code += f"" elif reader_type == '{reader}':\\n"" + code += f"" {reader_code[reader]}\\n"" + if readers: + code += "" else:\\n"" + code += f"""""" raise ValueError(""'{{reader_type}}' not added during code generation"")\\n"""""" + else: + code += f"""""" raise ValueError(""'{{reader_type}}' not added during code generation"")\\n"""""" + return code + + def get_code(self): + code = self.get_reader_code(self.readers) + functions = [] + for reader in self.readers: + functions.append(reader) + for function in functions: + code += self.get_function_code(function) + self.codeText += self.importer.getCode() + self.codeText += code + return self.codeText + + def get_function_code(self, name): + code = """" + if name in functions_code.keys(): + code += functions_code[name]['code'] + if self.importer: + if 'imports' in functions_code[name].keys(): + for module in functions_code[name]['imports']: + mod_name = module['mod'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + return code + + def get_importer(self): + return self.importer + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +class output_drift(): + + def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4): + self.tab = ' ' * tab_size + self.codeText = '' + self.missing = missing + self.word2num_features = word2num_features + self.cat_encoder = cat_encoder + self.target_encoder = target_encoder + self.normalizer = normalizer + self.text_profiler = text_profiler + self.feature_reducer = feature_reducer + self.score_smaller_is_better = score_smaller_is_better + self.problem_type = problem_type + + def addDatabaseClass(self, indent=0): + text = ""\\ + \\nclass database():\\ + \\n def __init__(self, config):\\ + \\n self.host = config['host']\\ + \\n self.port = config['port']\\ + \\n self.user = config['user']\\ + \\n self.password = config['password']\\ + \\n self.database = config['database']\\ + \\n self.measurement = config['measurement']\\ + \\n self.tags = config['tags']\\ + \\n self.client = self.get_client()\\ + \\n\\ + \\n def read_data(self, query)->pd.DataFrame:\\ + \\n cursor = self.client.query(query)\\ + \\n points = cursor.get_points()\\ + \\n my_list=list(points)\\ + \\n df=pd.DataFrame(my_list)\\ + \\n return df\\ + \\n\\ + \\n def get_client(self):\\ + \\n client = InfluxDBClient(self.host,self.port,self.user,self.password)\\ + \\n databases = client.get_list_database()\\ + \\n databases = [x['name'] for x in databases]\\ + \\n if self.database not in databases:\\ + \\n client.create_database(self.database)\\ + \\n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\\ + \\n\\ + \\n def write_data(self,data):\\ + \\n if isinstance(data, pd.DataFrame):\\ + \\n sorted_col = data.columns.tolist()\\ + \\n sorted_col.sort()\\ + \\n data = data[sorted_col]\\ + \\n data = data.to_dict(orient='records')\\ + \\n for row in data:\\ + \\n if 'time' in row.keys():\\ + \\n p = '%Y-%m-%dT%H:%M:%S.%fZ'\\ + \\n time_str = datetime.strptime(row['time'], p)\\ + \\n del row['time']\\ + \\n else:\\ + \\n time_str = None\\ + \\n if 'model_ver' in row.keys():\\ + \\n self.tags['model_ver']= row['model_ver']\\ + \\n del row['model_ver']\\ + \\n json_body = [{\\ + \\n 'measurement': self.measurement,\\ + \\n 'time': time_str,\\ + \\n 'tags': self.tags,\\ + \\n 'fields': row\\ + \\n }]\\ + \\n self.client.write_points(json_body)\\ + \\n\\ + \\n def close(self):\\ + \\n self.client.close()\\ + \\n"" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def addPredictClass(self, indent=0): + text = ""\\ + \\nclass predict():\\ + \\n\\ + \\n def __init__(self, base_config):\\ + \\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\ + \\n self.dataLocation = base_config['dataLocation']\\ + \\n self.db_enabled = base_config.get('db_enabled', False)\\ + \\n if self.db_enabled:\\ + \\n self.db_config = base_config['db_config']\\ + \\n home = Path.home()\\ + \\n if platform.system() == 'Windows':\\ + \\n from pathlib import WindowsPath\\ + \\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\ + \\n else:\\ + \\n from pathlib import PosixPath\\ + \\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\ + \\n if not output_model_dir.exists():\\ + \\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\ + \\n\\ + \\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\ + \\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\ + \\n mlflow.set_tracking_uri(tracking_uri)\\ + \\n mlflow.set_registry_uri(registry_uri)\\ + \\n client = mlflow.tracking.MlflowClient(\\ + \\n tracking_uri=tracking_uri,\\ + \\n registry_uri=registry_uri,\\ + \\n )\\ + \\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\\ + \\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\ + \\n self.model = mlflow.pyfunc.load_model(model_version_uri)\\ + \\n run = client.get_run(self.model.metadata.run_id)\\ + \\n if run.info.artifact_uri.startswith('file:'): #remove file:///\\ + \\n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\ + \\n else:\\ + \\n self.artifact_path = Path(run.info.artifact_uri)\\ + \\n with open(self.artifact_path/'deploy.json', 'r') as f:\\ + \\n deployment_dict = json.load(f)\\ + \\n with open(self.artifact_path/'features.txt', 'r') as f:\\ + \\n self.train_features = f.readline()." +"rstrip().split(',')\\ + \\n\\ + \\n self.dataLocation = base_config['dataLocation']\\ + \\n self.selected_features = deployment_dict['load_data']['selected_features']\\ + \\n self.target_feature = deployment_dict['load_data']['target_feature']\\ + \\n self.output_model_dir = output_model_dir"" + if self.missing: + text += ""\\n self.missing_values = deployment_dict['transformation']['fillna']"" + if self.word2num_features: + text += ""\\n self.word2num_features = deployment_dict['transformation']['word2num_features']"" + if self.cat_encoder == 'labelencoding': + text += ""\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']"" + elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'): + text += ""\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']"" + text += ""\\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']"" + if self.target_encoder: + text += ""\\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])"" + if self.normalizer: + text += ""\\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\\ +\\n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']"" + if self.text_profiler: + text += ""\\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\\ +\\n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']"" + if self.feature_reducer: + text += ""\\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\\ +\\n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']"" + text += """""" + + def read_data_from_db(self): + if self.db_enabled: + try: + db = database(self.db_config) + query = ""SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''"".format(db.measurement, self.model_version, self.target_feature) + if 'read_time' in self.db_config.keys() and self.db_config['read_time']: + query += f"" time > now() - {self.db_config['read_time']}"" + data = db.read_data(query) + except: + raise ValueError('Unable to read from the database') + finally: + if db: + db.close() + return data + return None"""""" + text += ""\\ + \\n def predict(self, data):\\ + \\n df = pd.DataFrame()\\ + \\n if Path(data).exists():\\ + \\n if Path(data).suffix == '.tsv':\\ + \\n df=read_data(data,encoding='utf-8',sep='\\t')\\ + \\n elif Path(data).suffix == '.csv':\\ + \\n df=read_data(data,encoding='utf-8')\\ + \\n else:\\ + \\n if Path(data).suffix == '.json':\\ + \\n jsonData = read_json(data)\\ + \\n df = pd.json_normalize(jsonData)\\ + \\n elif is_file_name_url(data):\\ + \\n df = read_data(data,encoding='utf-8')\\ + \\n else:\\ + \\n jsonData = json.loads(data)\\ + \\n df = pd.json_normalize(jsonData)\\ + \\n if len(df) == 0:\\ + \\n raise ValueError('No data record found')\\ + \\n missing_features = [x for x in self.selected_features if x not in df.columns]\\ + \\n if missing_features:\\ + \\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\\ + \\n if self.target_feature not in df.columns:\\ + \\n raise ValueError(f'Ground truth values/target column({self.target_feature}) not found in current data')\\ + \\n df_copy = df.copy()\\ + \\n df = df[self.selected_features]"" + if self.word2num_features: + text += ""\\n for feat in self.word2num_features:"" + text += ""\\n df[ feat ] = df[feat].apply(lambda x: s2n(x))"" + if self.missing: + text += ""\\n df.fillna(self.missing_values, inplace=True)"" + if self.cat_encoder == 'labelencoding': + text += ""\\n df.replace(self.cat_encoder, inplace=True)"" + elif self.cat_encoder == 'targetencoding': + text += ""\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"" + text += ""\\n df = cat_enc.transform(df)"" + elif self.cat_encoder == 'onehotencoding': + text += ""\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"" + text += ""\\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()"" + text += ""\\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]"" + if self.normalizer: + text += ""\\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])"" + if self.text_profiler: + text += ""\\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\\ +\\n df_vect=self.text_profiler.transform(text_corpus)\\ +\\n if isinstance(df_vect, np.ndarray):\\ +\\n df1 = pd.DataFrame(df_vect)\\ +\\n else:\\ +\\n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\\ +\\n df1 = df1.add_suffix('_vect')\\ +\\n df = pd.concat([df, df1],axis=1)"" + if self.feature_reducer: + text += ""\\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"" + else: + text += ""\\n df = df[self.train_features]"" + if self.target_encoder: + text += ""\\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\\ + \\n df_copy['prediction'] = output.idxmax(axis=1)"" + else: + text += ""\\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\\ + \\n df_copy['prediction'] = output"" + text += ""\\n return df_copy"" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def getClassificationMatrixCode(self, indent=0): + text = ""\\ + \\ndef get_classification_metrices(actual_values, predicted_values):\\ + \\n result = {}\\ + \\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\ + \\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n\\ + \\n result['accuracy'] = accuracy_score\\ + \\n result['precision'] = avg_precision\\ + \\n result['recall'] = avg_recall\\ + \\n result['f1'] = avg_f1\\ + \\n return result\\ + \\n\\ + "" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def getRegrssionMatrixCode(self, indent=0): + text = ""\\ + \\ndef get_regression_metrices( actual_values, predicted_values):\\ + \\n result = {}\\ + \\n\\ + \\n me = np.mean(predicted_values - actual_values)\\ + \\n sde = np.std(predicted_values - actual_values, ddof = 1)\\ + \\n\\ + \\n abs_err = np.abs(predicted_values - actual_values)\\ + \\n mae = np.mean(abs_err)\\ + \\n sdae = np.std(abs_err, ddof = 1)\\ + \\n\\ + \\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\ + \\n mape = np.mean(abs_perc_err)\\ + \\n sdape = np.std(abs_perc_err, ddof = 1)\\ + \\n\\ + \\n result['mean_error'] = me\\ + \\n result['mean_abs_error'] = mae\\ + \\n result['mean_abs_perc_error'] = mape\\ + \\n result['error_std'] = sde\\ + \\n result['abs_error_std'] = sdae\\ + \\n result['abs_perc_error_std'] = sdape\\ + \\n return result\\ + \\n\\ + "" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def addSuffixCode(self, indent=1): + text =""\\n\\ + \\ndef check_drift( config):\\ + \\n prediction = predict(config)\\ + \\n usecase = config['modelName'] + '_' + config['modelVersion']\\ + \\n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\\ + \\n if not train_data_path.exists():\\ + \\n raise ValueError(f'Training data not found at {train_data_path}')\\ + \\n curr_with_pred = prediction.read_data_from_db()\\ + \\n if prediction.target_feature not in curr_with_pred.columns:\\ + \\n raise ValueError('Ground truth not updated for corresponding data in database')\\ + \\n train_with_pred = prediction.predict(train_data_path)\\ + \\n performance = {}"" + if self.problem_type == 'classification': + text += ""\\n\\ + \\n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\ + \\n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"" + else: + text += ""\\n\\ + \\n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\ + \\n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"" + text += ""\\n return performance"" + text += ""\\n\\ + \\nif __name__ == '__main__':\\ + \\n try:\\ + \\n if len(sys.argv) < 2:\\ + \\n raise ValueError('config file not present')\\ + \\n config = sys.argv[1]\\ + \\n if Path(config).is_file() and Path(config).suffix == '.json':\\ + \\n with open(config, 'r') as f:\\ + \\n config = json.load(f)\\ + \\n else:\\ + \\n config = json.loads(config)\\ + \\n output = check_drift(config)\\ + \\n status = {'Status':'Success','Message':json.loads(output)}\\ + \\n print('output_drift:'+json.dumps(status))\\ + \\n except Exception as e:\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print('output_drift:'+json.dumps(status))"" + if indent: + text = text.replace('\\n', (self.tab * indent) + '\\n') + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def generateCode(self): + self.codeText += self.addDatabaseClass() + self.codeText += self.addPredictClass() + if self.problem_type == 'classification': + self.codeText += self.getClassificationMatrixCode() + elif self.problem_type == 'regression': + self.codeText += self.getRegrssionMatrixCode() + else: + raise ValueError(f""Unsupported problem type: {self.problem_type}"") + self.codeText += self.addSuffixCode() + + def getCode(self): + return self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class transformer(): + + def __init__(self, indent=0, tab_size=4): + self.df_name = 'df' + self.tab = ' ' * tab_size + self.codeText = """" + self.transformers = [] + self.TxCols = [] + self.imputers = {} + self.input_files = {} + self.output_files = {} + self.function_code = '' + self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','transformedData' : 'transformedData.dat','normalization' : 'normalization.pkl'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self." +"input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n return config"" + return text + + def getPrefixModules(self): + modules = [ + {'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas', 'mod_as':'pd'} + ,{'module':'warnings'} + ,{'module':'json'} + ,{'module':'logging'} + ,{'module':'joblib'} + ,{'module':'MinMaxScaler', 'mod_from':'sklearn.preprocessing'} + ] + return modules + + def addPrefixCode(self, indent=1): + self.codeText += """""" +def transformation(config, targetPath, log): + dataLoc = targetPath / IOFiles['inputData'] + if not dataLoc.exists(): + return {'Status': 'Failure', 'Message': 'Data location does not exists.'} + df = read_data(dataLoc) + log.log_dataframe(df) + + target_feature = config['target_feature'] + dateTimeFeature=config['dateTimeFeature'] + df.set_index(dateTimeFeature, inplace=True) + df = df.dropna() + df=df.fillna(df.mean()) + if len(target_feature) == 1: + trainX = df[target_feature].to_numpy().reshape(-1,1) + else: + trainX = df[target_feature].to_numpy() + + scaler = MinMaxScaler(feature_range=(0, 1)) + trainX = scaler.fit_transform(trainX) + normalization_file_name = str(targetPath / IOFiles['normalization']) + joblib.dump(scaler, normalization_file_name) + + df[target_feature] = trainX + log.log_dataframe(df) + csv_path = str(targetPath / IOFiles['transformedData']) + write_data(df, csv_path, index=True) + + status = {'Status': 'Success', 'DataFilePath': IOFiles['transformedData'], + 'target_feature': target_feature,'dateTimeFeature':dateTimeFeature, + ""Normalization_file"":normalization_file_name } + meta_data['transformation'] = {} + meta_data['transformation']['Status'] = status + write_json(meta_data, str(targetPath / IOFiles['metaData'])) + log.info(f'Transformed data saved at {csv_path}') + log.info(f'output: {status}') + return json.dumps(status) + """""" + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'sys'} + ,{'module':'json'} + ,{'module':'logging'} + ,{'module':'argparse'} + ] + return modules + + def addMainCode(self, indent=1): + self.codeText += """""" +if __name__ == '__main__': + config = validateConfig() + targetPath = Path('aion') / config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + meta_data_file = targetPath / IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {meta_data_file}') + log_file = targetPath / IOFiles['log'] + log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + print(transformation(config, targetPath, log)) + except Exception as e: + + status = {'Status': 'Failure', 'Message': str(e)} + print(json.dumps(status)) + """""" + def addValidateConfigCode(self, indent=1): + self.function_code += self.__addValidateConfigCode() + + def addLocalFunctionsCode(self): + self.addValidateConfigCode() + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self, indent=1): + return self.function_code + '\\n' + self.codeText + + def getDFName(self): + return self.df_name + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class register(): + + def __init__(self, importer, indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.codeText = """" + self.function_code = """" + self.importer = importer + self.input_files = {} + self.output_files = {} + self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','metrics': 'metrics.json','production': 'production.json'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self, models=None): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = utils.read_json(config_file)\\ + \\n return config\\ + "" + return text + + def addLocalFunctionsCode(self, models): + self.function_code += self.__addValidateConfigCode(models) + + def addPrefixCode(self, smaller_is_better=False, indent=1): + compare = 'min' if smaller_is_better else 'max' + self.codeText += f"""""" +def get_best_model(run_path): + models_path = [d for d in run_path.iterdir() if d.is_dir] + scores = {{}} + for model in models_path: + metrics = utils.read_json(model/IOFiles['metrics']) + if metrics.get('score', None): + scores[model.stem] = metrics['score'] + best_model = {compare}(scores, key=scores.get) + return best_model + +def __merge_logs(log_file_sequence,path, files): + if log_file_sequence['first'] in files: + with open(path/log_file_sequence['first'], 'r') as f: + main_log = f.read() + files.remove(log_file_sequence['first']) + for file in files: + with open(path/file, 'r') as f: + main_log = main_log + f.read() + (path/file).unlink() + with open(path/log_file_sequence['merged'], 'w') as f: + f.write(main_log) + +def merge_log_files(folder, models): + log_file_sequence = {{ + 'first': 'aion.log', + 'merged': 'aion.log' + }} + log_file_suffix = '_aion.log' + log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()] + log_files.append(log_file_sequence['first']) + __merge_logs(log_file_sequence, folder, log_files) + +def register(config, targetPath, log): + meta_data_file = targetPath / IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = utils.read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {{meta_data_file}}') + run_id = meta_data['monitoring']['runId'] + usecase = config['targetPath'] + current_run_path = targetPath/'runs'/str(run_id) + register_model_name = get_best_model(current_run_path) + models = config['models'] + merge_log_files(targetPath, models) + meta_data['register'] = {{'runId':run_id, 'model': register_model_name}} + utils.write_json(meta_data, targetPath/IOFiles['metaData']) + utils.write_json({{'Model':register_model_name,'runNo':str(run_id)}}, targetPath/IOFiles['production']) + status = {{'Status':'Success','Message':f'Model Registered: {{register_model_name}}'}} + log.info(f'output: {{status}}') + return json.dumps(status) + """""" + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'json'} + ] + return modules + + def addMainCode(self, models, indent=1): + self.codeText += """""" +if __name__ == '__main__': + config = validateConfig() + targetPath = Path('aion') / config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + log_file = targetPath / IOFiles['log'] + log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + print(register(config, targetPath, log)) + except Exception as e: + + status = {'Status': 'Failure', 'Message': str(e)} + print(json.dumps(status)) + """""" + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + def getCode(self, indent=1): + return self.function_code + '\\n' + self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +from .imports import importModule + +utility_functions = { +'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], +} + +#TODO convert read and write functions in to class functions +functions_code = { + 'read_json':{'imports':[{'mod':'json'}],'code':""\\n\\ + \\ndef read_json(file_path):\\ + \\n data = None\\ + \\n with open(file_path,'r') as f:\\ + \\n data = json.load(f)\\ + \\n return data\\ + \\n""}, + 'write_json':{'imports':[{'mod':'json'}],'code':""\\n\\ + \\ndef write_json(data, file_path):\\ + \\n with open(file_path,'w') as f:\\ + \\n json.dump(data, f)\\ + \\n""}, + 'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':""\\n\\ + \\ndef read_data(file_path, encoding='utf-8', sep=','):\\ + \\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\ + \\n""}, + 'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':""\\n\\ + \\ndef write_data(data, file_path, index=False):\\ + \\n return data.to_csv(file_path, index=index)\\ + \\n\\ + \\n#Uncomment and change below code for google storage\\ + \\n#from google.cloud import storage\\ + \\n#def write_data(data, file_path, index=False):\\ + \\n# file_name= file_path.name\\ + \\n# data.to_csv('output_data.csv')\\ + \\n# storage_client = storage.Client()\\ + \\n# bucket = storage_client.bucket('aion_data')\\ + \\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\ + \\n# return data\\ + \\n""}, + 'is_file_name_url':{'imports':[],'code':""\\n\\ + \\ndef is_file_name" +"_url(file_name):\\ + \\n supported_urls_starts_with = ('gs://','https://','http://')\\ + \\n return file_name.startswith(supported_urls_starts_with)\\ + \\n""}, + 'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':""\\n\\ + \\nclass logger():\\ + \\n #setup the logger\\ + \\n def __init__(self, log_file, mode='w', logger_name=None):\\ + \\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\ + \\n self.log = logging.getLogger(logger_name)\\ + \\n\\ + \\n #get logger\\ + \\n def getLogger(self):\\ + \\n return self.log\\ + \\n\\ + \\n def info(self, msg):\\ + \\n self.log.info(msg)\\ + \\n\\ + \\n def error(self, msg, exc_info=False):\\ + \\n self.log.error(msg,exc_info)\\ + \\n\\ + \\n # format and log dataframe\\ + \\n def log_dataframe(self, df, rows=2, msg=None):\\ + \\n buffer = io.StringIO()\\ + \\n df.info(buf=buffer)\\ + \\n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\\ + \\n log_text += '\\\\n\\\\t'+str(df.head(rows)).replace('\\\\n','\\\\n\\\\t')\\ + \\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\ + \\n self.log.info(log_text)\\ + \\n""}, +} + +class utility_function(): + + def __init__(self, module): + if module in utility_functions.keys(): + self.module_name = module + else: + self.module_name = None + self.importer = importModule() + self.codeText = """" + + def get_code(self): + code = """" + if self.module_name: + functions = utility_functions[self.module_name] + for function in functions: + self.codeText += self.get_function_code(function) + code = self.importer.getCode() + code += self.codeText + return code + + def get_function_code(self, name): + code = """" + if name in functions_code.keys(): + code += functions_code[name]['code'] + if self.importer: + if 'imports' in functions_code[name].keys(): + for module in functions_code[name]['imports']: + mod_name = module['mod'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + return code + + def get_importer(self): + return self.importer + +if __name__ == '__main__': + obj = utility_function('load_data') + p = obj.get_utility_code() + print(p) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from mlac.timeseries.core.imports import importModule +from mlac.timeseries.core.load_data import tabularDataReader +from mlac.timeseries.core.transformer import transformer as profiler +from mlac.timeseries.core.selector import selector +from mlac.timeseries.core.trainer import learner +from mlac.timeseries.core.register import register +from mlac.timeseries.core.deploy import deploy +from mlac.timeseries.core.drift_analysis import drift +from mlac.timeseries.core.functions import global_function +from mlac.timeseries.core.data_reader import data_reader +from mlac.timeseries.core.utility import utility_function + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class deploy(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = """" + self.input_files = {} + self.output_files = {} + self.addInputFiles({'metaData' : 'modelMetaData.json','log':'predict.log'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + text += '\\n' + text += self.getOutputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def addStatement(self, statement, indent=1): + pass + + def getPredictionCodeModules(self): + modules = [{'module':'json'} + ,{'module':'joblib'} + ,{'module':'pandas', 'mod_as':'pd'} + ,{'module':'numpy', 'mod_as':'np'} + ,{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'json_normalize', 'mod_from':'pandas'} + ,{'module':'load_model', 'mod_from':'tensorflow.keras.models'} + ] + return modules + + def addPredictionCode(self): + self.codeText += """""" +class deploy(): + + def __init__(self, base_config, log=None): + self.targetPath = (Path('aion') / base_config['targetPath']).resolve() + if log: + self.logger = log + else: + log_file = self.targetPath / IOFiles['log'] + self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + self.initialize(base_config) + except Exception as e: + self.logger.error(e, exc_info=True) + + def initialize(self, base_config): + targetPath = Path('aion') / base_config['targetPath'] + meta_data_file = targetPath / IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = utils.read_json(meta_data_file) + self.dateTimeFeature = meta_data['training']['dateTimeFeature'] + self.targetFeature = meta_data['training']['target_feature'] + + normalization_file = meta_data['transformation']['Status']['Normalization_file'] + self.normalizer = joblib.load(normalization_file) + self.lag_order = base_config['lag_order'] + self.noofforecasts = base_config['noofforecasts'] + run_id = str(meta_data['register']['runId']) + model_path = str(targetPath/'runs'/str(meta_data['register']['runId'])/meta_data['register']['model']/'model') + self.model = load_model(model_path) + self.model_name = meta_data['register']['model'] + + def predict(self, data=None): + try: + return self.__predict(data) + except Exception as e: + if self.logger: + self.logger.error(e, exc_info=True) + raise ValueError(json.dumps({'Status': 'Failure', 'Message': str(e)})) + + def __predict(self, data=None): + jsonData = json.loads(data) + + dataFrame = json_normalize(jsonData) + + xtrain = dataFrame + if len(dataFrame) == 0: + raise ValueError('No data record found') + + df_l = len(dataFrame) + pred_threshold = 0.1 + max_pred_by_user = round((df_l) * pred_threshold) + # prediction for 24 steps or next 24 hours + if self.noofforecasts == -1: + self.noofforecasts = max_pred_by_user + no_of_prediction = self.noofforecasts + if (str(no_of_prediction) > str(max_pred_by_user)): + no_of_prediction = max_pred_by_user + noofforecasts = no_of_prediction + + # self.sfeatures.remove(self.datetimeFeature) + features = self.targetFeature + if len(features) == 1: + xt = xtrain[features].values + else: + xt = xtrain[features].values + xt = xt.astype('float32') + xt = self.normalizer.transform(xt) + pred_data = xt + y_future = [] + self.lag_order = int(self.lag_order) + for i in range(int(no_of_prediction)): + pdata = pred_data[-self.lag_order:] + if len(features) == 1: + pdata = pdata.reshape((1, self.lag_order)) + else: + pdata = pdata.reshape((1, self.lag_order, len(features))) + + if (len(features) > 1): + pred = self.model.predict(pdata) + predout = self.normalizer.inverse_transform(pred) + y_future.append(predout) + pred_data = np.append(pred_data, pred, axis=0) + else: + pred = self.model.predict(pdata) + predout = self.normalizer.inverse_transform(pred) + y_future.append(predout.flatten()[-1]) + pred_data = np.append(pred_data, pred) + pred = pd.DataFrame(index=range(0, len(y_future)), columns=self.targetFeature) + for i in range(0, len(y_future)): + pred.iloc[i] = y_future[i] + predictions = pred + forecast_output = predictions.to_json(orient='records') + return forecast_output + """""" + + def getCode(self): + return self.codeText + + def getServiceCode(self): + return """""" + +from http.server import BaseHTTPRequestHandler,HTTPServer +from socketserver import ThreadingMixIn +import os +from os.path import expanduser +import platform +import threading +import subprocess +import argparse +import re +import cgi +import json +import shutil +import logging +import sys +import time +import seaborn as sns +from pathlib import Path +from predict import deploy +import pandas as pd +import scipy.stats as st +import numpy as np +import warnings +from utility import * + +warnings.filterwarnings(""ignore"") +config_input = None + +IOFiles = { + ""inputData"": ""rawData.dat"", + ""metaData"": ""modelMetaData.json"", + ""production"": ""production.json"", + ""log"": ""aion.log"", + ""monitoring"":""monitoring.json"", + ""prodData"": ""prodData"", + ""prodDataGT"":""prodDataGT"" +} + +def DistributionFinder(data): + try: + distributionName = """" + sse = 0.0 + KStestStatic = 0.0 + dataType = """" + if (data.dtype == ""float64"" or data.dtype == ""float32""): + dataType = ""Continuous"" + elif (data.dtype == ""int""): + dataType = ""Discrete"" + elif (data.dtype == ""int64""): + dataType = ""Discrete"" + if (dataType == ""Discrete""): + distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] + index, counts = np.unique(data.astype(int), return_counts=True) + + if (len(index) >= 2): + best_sse = np.inf + y1 = [] + total = sum(counts) + mean = float(sum(index * counts)) / total + variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) + dispersion = mean / float(variance) + theta = 1 / float(dispersion) + r = mean * (float(theta) / 1 - theta) + + for j in counts: + y1.append(float(j) / total) + + pmf1 = st.bernoulli.pmf(index, mean) + pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) + pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) + pmf4 = st.nbinom.pmf(index, mean, r) + pmf5 = st.poisson.pmf(index, mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1 - pmf5, 2.0)) + + sselist = [sse1, sse2, sse3, sse4, sse5] + best_distribution = 'NA' + for i in range(0, len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName = best_distribution + sse = best_sse + + elif (dataType == ""Continuous""): + + distributions = [st." +"uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, + st.gamma, st.beta] + best_distribution = st.norm.name + best_sse = np.inf + datamin = data.min() + datamax = data.max() + nrange = datamax - datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + params = distribution.fit(data.astype(float)) + arg = params[:-2] + loc = params[-2] + scale = params[-1] + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if (best_sse > sse > 0): + best_distribution = distribution.name + best_sse = sse + distributionName = best_distribution + sse = best_sse + except: + response = str(sys.exc_info()[0]) + message = 'Job has Failed' + response + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + print(message) + return distributionName, sse + +def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()): + import matplotlib.pyplot as plt + import math + import io, base64, urllib + np.seterr(divide='ignore', invalid='ignore') + try: + plt.clf() + except: + pass + plt.rcParams.update({'figure.max_open_warning': 0}) + sns.set(color_codes=True) + pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + if len(feature) > 4: + numneroffeatures = len(feature) + plt.figure(figsize=(10, numneroffeatures*2)) + else: + plt.figure(figsize=(10,5)) + + for i in enumerate(feature): + + dataType = dataframe[i[1]].dtypes + if dataType not in pandasNumericDtypes: + dataframe[i[1]] = pd.Categorical(dataframe[i[1]]) + dataframe[i[1]] = dataframe[i[1]].cat.codes + dataframe[i[1]] = dataframe[i[1]].astype(int) + dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0]) + else: + dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean()) + + plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1) + plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1) + distname, sse = DistributionFinder(dataframe[i[1]]) + print(distname) + ax = sns.distplot(dataframe[i[1]], label=distname) + ax.legend(loc='best') + if newdataframe.empty == False: + dataType = newdataframe[i[1]].dtypes + if dataType not in pandasNumericDtypes: + newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]]) + newdataframe[i[1]] = newdataframe[i[1]].cat.codes + newdataframe[i[1]] = newdataframe[i[1]].astype(int) + newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0]) + else: + newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean()) + distname, sse = DistributionFinder(newdataframe[i[1]]) + print(distname) + ax = sns.distplot(newdataframe[i[1]],label=distname) + ax.legend(loc='best') + buf = io.BytesIO() + plt.savefig(buf, format='png') + buf.seek(0) + string = base64.b64encode(buf.read()) + uri = urllib.parse.quote(string) + return uri + +def read_json(file_path): + data = None + with open(file_path,'r') as f: + data = json.load(f) + return data + +class HTTPRequestHandler(BaseHTTPRequestHandler): + + def do_POST(self): + print('PYTHON ######## REQUEST ####### STARTED') + if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + usecase = self.path.split('/')[-2] + if usecase.lower() == config_input['targetPath'].lower(): + operation = self.path.split('/')[-1] + data = json.loads(data) + dataStr = json.dumps(data) + if operation.lower() == 'predict': + output=deployobj.predict(dataStr) + resp = output + elif operation.lower() == 'groundtruth': + gtObj = groundtruth(config_input) + output = gtObj.actual(dataStr) + resp = output + elif operation.lower() == 'delete': + targetPath = Path('aion')/config_input['targetPath'] + for file in data: + x = targetPath/file + if x.exists(): + os.remove(x) + resp = json.dumps({'Status':'Success'}) + else: + outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'}) + resp = outputStr + else: + outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'}) + resp = outputStr + + else: + outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'}) + resp = outputStr + resp=resp+'\\\\n' + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print('python ==> else1') + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + print('PYTHON ######## REQUEST ####### ENDED') + return + + def do_GET(self): + print('PYTHON ######## REQUEST ####### STARTED') + if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): + usecase = self.path.split('/')[-2] + self.send_response(200) + self.targetPath = Path('aion')/config_input['targetPath'] + meta_data_file = self.targetPath/IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {meta_data_file}') + production_file = self.targetPath/IOFiles['production'] + if production_file.exists(): + production_data = read_json(production_file) + else: + raise ValueError(f'Production Details not found: {production_file}') + operation = self.path.split('/')[-1] + if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'): + self.send_header('Content-Type', 'text/html') + self.end_headers() + ModelString = production_data['Model'] + ModelPerformance = ModelString+'_performance.json' + performance_file = self.targetPath/ModelPerformance + if performance_file.exists(): + performance_data = read_json(performance_file) + else: + raise ValueError(f'Production Details not found: {performance_data}') + Scoring_Creteria = performance_data['scoring_criteria'] + train_score = round(performance_data['metrices']['train_score'],2) + test_score = round(performance_data['metrices']['test_score'],2) + current_score = 'NA' + monitoring = read_json(self.targetPath/IOFiles['monitoring']) + reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config']) + inputDatafile = self.targetPath/IOFiles['inputData'] + NoOfPrediction = 0 + NoOfGroundTruth = 0 + inputdistribution = '' + if reader.file_exists(IOFiles['prodData']): + dfPredict = reader.read(IOFiles['prodData']) + dfinput = pd.read_csv(inputDatafile) + features = meta_data['training']['features'] + inputdistribution = getDriftDistribution(features,dfinput,dfPredict) + NoOfPrediction = len(dfPredict) + if reader.file_exists(IOFiles['prodDataGT']): + dfGroundTruth = reader.read(IOFiles['prodDataGT']) + NoOfGroundTruth = len(dfGroundTruth) + common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()] + proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner') + if Scoring_Creteria.lower() == 'accuracy': + from sklearn.metrics import accuracy_score + current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction']) + current_score = round((current_score*100),2) + elif Scoring_Creteria.lower() == 'recall': + from sklearn.metrics import accuracy_score + current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro') + current_score = round((current_score*100),2) + msg = \\"""""" + +Performance Details + + + +

Deployed Model:{ModelString}

+
+ + + + + + + + + +
No of Prediction{NoOfPrediction}
No of GroundTruth{NoOfGroundTruth}
+
+ + + + + + + + + + + + + +
Score TypeTrain ScoreTest ScoreProduction Score
{Scoring_Creteria}{train_score}{test_score}{current_score}
+
+
+ + + +\\"""""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution) + elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'): + self.send_header('Content-Type', 'text/plain') + self.end_headers() + log_file = self.targetPath/IOFiles['log'] + if log_file.exists(): + with open(log_file) as f: + msg = f.read() + f.close() + else: + raise ValueError(f'Log Details not found: {log_file}') + else: + self.send_header('Content-Type', 'application/json') + self.end_headers() + features = meta_data['load_data']['selected_features'] + bodydes='[' + for x in features: + if bodydes != '[': + bodydes = bodydes+',' + bodydes = bodydes+'{""'+x+'"":""value""}' + bodydes+=']' + urltext = '/AION/'+config_input['targetPath']+'/predict' + urltextgth='/AION/'+config_input['targetPath']+'/groundtruth' + urltextproduction='/AION/'+config_input['targetPath']+'/metrices' + msg=\\"""""" +" +"Version:{modelversion} +RunNo: {runNo} +URL for Prediction +================== +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} +Output: prediction,probability(if Applicable),remarks corresponding to each row. + +URL for GroundTruth +=================== +URL:{urltextgth} +RequestType: POST +Content-Type=application/json +Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work + +URL for Model In Production Analysis +==================================== +URL:{urltextproduction} +RequestType: GET +Content-Type=application/json + +\\"""""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes) + self.wfile.write(msg.encode()) + else: + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + return + +class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): + allow_reuse_address = True + + def shutdown(self): + self.socket.close() + HTTPServer.shutdown(self) + +class file_status(): + + def __init__(self, reload_function, params, file, logger): + self.files_status = {} + self.initializeFileStatus(file) + self.reload_function = reload_function + self.params = params + self.logger = logger + + def initializeFileStatus(self, file): + self.files_status = {'path': file, 'time':file.stat().st_mtime} + + def is_file_changed(self): + if self.files_status['path'].stat().st_mtime > self.files_status['time']: + self.files_status['time'] = self.files_status['path'].stat().st_mtime + return True + return False + + def run(self): + global config_input + while( True): + time.sleep(30) + if self.is_file_changed(): + production_details = targetPath/IOFiles['production'] + if not production_details.exists(): + raise ValueError(f'Model in production details does not exist') + productionmodel = read_json(production_details) + config_file = Path(__file__).parent/'config.json' + if not Path(config_file).exists(): + raise ValueError(f'Config file is missing: {config_file}') + config_input = read_json(config_file) + config_input['deployedModel'] = productionmodel['Model'] + config_input['deployedRunNo'] = productionmodel['runNo'] + self.logger.info('Model changed Reloading.....') + self.logger.info(f'Model: {config_input[""deployedModel""]}') + self.logger.info(f'Version: {str(config_input[""modelVersion""])}') + self.logger.info(f'runNo: {str(config_input[""deployedRunNo""])}') + self.reload_function(config_input) + +class SimpleHttpServer(): + def __init__(self, ip, port, model_file_path,reload_function,params, logger): + self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) + self.status_checker = file_status( reload_function, params, model_file_path, logger) + + def start(self): + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.daemon = True + self.server_thread.start() + self.status_thread = threading.Thread(target=self.status_checker.run) + self.status_thread.start() + + def waitForThread(self): + self.server_thread.join() + self.status_thread.join() + + def stop(self): + self.server.shutdown() + self.waitForThread() + +if __name__=='__main__': + parser = argparse.ArgumentParser(description='HTTP Server') + parser.add_argument('-ip','--ipAddress', help='HTTP Server IP') + parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server') + args = parser.parse_args() + config_file = Path(__file__).parent/'config.json' + if not Path(config_file).exists(): + raise ValueError(f'Config file is missing: {config_file}') + config = read_json(config_file) + if args.ipAddress: + config['ipAddress'] = args.ipAddress + if args.portNo: + config['portNo'] = args.portNo + targetPath = Path('aion')/config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + production_details = targetPath/IOFiles['production'] + if not production_details.exists(): + raise ValueError(f'Model in production details does not exist') + productionmodel = read_json(production_details) + config['deployedModel'] = productionmodel['Model'] + config['deployedRunNo'] = productionmodel['runNo'] + #server = SimpleHttpServer(config['ipAddress'],int(config['portNo'])) + config_input = config + logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S') + logger = logging.getLogger(Path(__file__).parent.name) + deployobj = deploy(config_input, logger) + server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger) + logger.info('HTTP Server Running...........') + logger.info(f""IP Address: {config['ipAddress']}"") + logger.info(f""Port No.: {config['portNo']}"") + print('HTTP Server Running...........') + print('For Prediction') + print('================') + print('Request Type: Post') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/predict') + print('\\\\nFor GroundTruth') + print('================') + print('Request Type: Post') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/groundtruth') + print('\\\\nFor Help') + print('================') + print('Request Type: Get') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/help') + print('\\\\nFor Model In Production Analysis') + print('================') + print('Request Type: Get') + print('Content-Type: application/json') + print('URL: /AION/'+config['targetPath']+'/metrices') + server.start() + server.waitForThread() +"""""" """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +class global_function(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = """" + self.available_functions = { + 'iqr':{'name':'iqrOutlier','code':f""\\n\\ndef iqrOutlier(df):\\ + \\n{self.tab}Q1 = df.quantile(0.25)\\ + \\n{self.tab}Q3 = df.quantile(0.75)\\ + \\n{self.tab}IQR = Q3 - Q1\\ + \\n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\\ + \\n{self.tab}return index""}, + 'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f""\\n\\ndef zscoreOutlier(df):\\ + \\n{self.tab}z = numpy.abs(stats.zscore(df))\\ + \\n{self.tab}index = (z < 3).all(axis=1)\\ + \\n{self.tab}return index""}, + 'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f""\\n\\ndef iforestOutlier(df):\\ + \\n{self.tab}from sklearn.ensemble import IsolationForest\\ + \\n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\\ + \\n{self.tab}isolation_forest.fit(df)\\ + \\n{self.tab}y_pred_train = isolation_forest.predict(df)\\ + \\n{self.tab}return y_pred_train == 1""}, + 'minMaxImputer':{'name':'minMaxImputer','code':f""\\n\\nclass minMaxImputer(TransformerMixin):\\ + \\n{self.tab}def __init__(self, strategy='max'):\\ + \\n{self.tab}{self.tab}self.strategy = strategy\\ + \\n{self.tab}def fit(self, X, y=None):\\ + \\n{self.tab}{self.tab}self.feature_names_in_ = X.columns\\ + \\n{self.tab}{self.tab}if self.strategy == 'min':\\ + \\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\\ + \\n{self.tab}{self.tab}else:\\ + \\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\\ + \\n{self.tab}{self.tab}return self\\ + \\n{self.tab}def transform(self, X):\\ + \\n{self.tab}{self.tab}import numpy\\ + \\n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)""}, + 'DummyEstimator':{'name':'DummyEstimator','code':f""\\n\\nclass DummyEstimator(BaseEstimator):\\ + \\n{self.tab}def fit(self): pass\\ + \\n{self.tab}def score(self): pass""}, + 'start_reducer':{'name':'start_reducer','code':""\\n\\ + \\ndef start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05):\\ + \\n import numpy as np\\ + \\n import pandas as pd\\ + \\n import itertools\\ + \\n from sklearn.feature_selection import VarianceThreshold\\ + \\n\\ + \\n train_features = df.columns.tolist()\\ + \\n train_features.remove(target_feature)\\ + \\n df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature\\ + \\n numeric_features = df.select_dtypes(include='number').columns.tolist()\\ + \\n non_numeric_features = df.select_dtypes(exclude='number').columns.tolist()\\ + \\n if numeric_features and var_threshold:\\ + \\n qconstantFilter = VarianceThreshold(threshold=var_threshold)\\ + \\n tempDf=df[numeric_features]\\ + \\n qconstantFilter.fit(tempDf)\\ + \\n numeric_features = [x for x,y in zip(numeric_features,qconstantFilter.get_support()) if y]\\ + \\n if numeric_features:\\ + \\n numColPairs = list(itertools.product(numeric_features, numeric_features))\\ + \\n for item in numColPairs:\\ + \\n if(item[0] == item[1]):\\ + \\n numColPairs.remove(item)\\ + \\n tempArray = []\\ + \\n for item in numColPairs:\\ + \\n tempCorr = np.abs(df[item[0]].corr(df[item[1]]))\\ + \\n if(tempCorr > corr_threshold):\\ + \\n tempArray.append(item[0])\\ + \\n tempArray = np.unique(tempArray).tolist()\\ + \\n nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray))\\ + \\n groupedFeatures = []\\ + \\n if tempArray:\\ + \\n corrDic = {}\\ + \\n for feature in tempArray:\\ + \\n temp = []\\ + \\n for col in tempArray:\\ + \\n tempCorr = np.abs(df[feature].corr(df[col]))\\ + \\n temp.append(tempCorr)\\ + \\n corrDic[feature] = temp\\ + \\n #Similar correlation df\\ + \\n corrDF = pd.DataFrame(corrDic,index = tempArray)\\ + \\n corrDF.loc[:,:] = np.tril(corrDF, k=-1)\\ + \\n alreadyIn = set()\\ + \\n similarFeatures = []\\ + \\n for col in corrDF:\\ + \\n perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist()\\ + \\n if perfectCorr and col not in alreadyIn:\\ + \\n alreadyIn.update(set(perfectCorr))\\ + \\n perfectCorr.append(col)\\ + \\n similarFeatures.append(perfectCorr)\\ + \\n updatedSimFeatures = []\\ + \\n for items in similarFeatures:\\ + \\n if(target_feature != '' and target_feature in items):\\ + \\n for p in items:\\ + \\n updatedSimFeatures.append(p)\\ + \\n else:\\ + \\n updatedSimFeatures.append(items[0])\\ + \\n newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))\\ + \\n updatedFeatures = list(set(newTempFeatures + non_numeric_features))\\ + \\n else:\\ + \\n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\\ + \\n else:\\ + " +" \\n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\\ + \\n return updatedFeatures""}, + 'feature_importance_class':{'name':'feature_importance_class','code':""\\n\\ + \\ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\\ + \\n import pandas as pd\\ + \\n from sklearn.feature_selection import chi2\\ + \\n from sklearn.feature_selection import f_classif\\ + \\n from sklearn.feature_selection import mutual_info_classif\\ + \\n \\ + \\n impFeatures = []\\ + \\n if cat_features:\\ + \\n categoricalData=df[cat_features]\\ + \\n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\\ + \\n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\\ + \\n impFeatures.append(corrSeries[corrSeriescorrTh].index.tolist())\\ + \\n pearsonScore=df.corr() \\ + \\n targetPScore=abs(pearsonScore[target_feature])\\ + \\n impFeatures.append(targetPScore[targetPScorecorrTh].index.tolist())\\ + \\n pearsonScore=df.corr()\\ + \\n targetPScore=abs(pearsonScore[target_feature])\\ + \\n impFeatures.append(targetPScore[targetPScore 2):\\ + \\n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\\ + \\n else:\\ + \\n class_type = 'binary_class' if class_count == 2 else 'multi_class'\\ + \\n if score_param in scorer_mapping.keys():\\ + \\n score_param = scorer_mapping[score_param][class_type]\\ + \\n else:\\ + \\n score_param = 'accuracy'\\ + \\n return score_param""}, + 'log_dataframe':{'name':'log_dataframe','code':f""\\n\\ + \\ndef log_dataframe(df, msg=None):\\ + \\n import io\\ + \\n buffer = io.StringIO()\\ + \\n df.info(buf=buffer)\\ + \\n if msg:\\ + \\n log_text = f'Data frame after {{msg}}:'\\ + \\n else:\\ + \\n log_text = 'Data frame:'\\ + \\n log_text += '\\\\n\\\\t'+str(df.head(2)).replace('\\\\n','\\\\n\\\\t')\\ + \\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\ + \\n get_logger().info(log_text)""}, + 'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':""\\n\\ + \\nclass BayesSearchCV():\\ + \\n\\ + \\n def __init__(self, estimator, params, scoring, n_iter, cv):\\ + \\n self.estimator = estimator\\ + \\n self.params = params\\ + \\n self.scoring = scoring\\ + \\n self.iteration = n_iter\\ + \\n self.cv = cv\\ + \\n self.best_estimator_ = None\\ + \\n self.best_score_ = None\\ + \\n self.best_params_ = None\\ + \\n\\ + \\n def __min_fun(self, params):\\ + \\n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\\ + \\n acc = score.mean()\\ + \\n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\\ + \\n\\ + \\n def fit(self, X, y):\\ + \\n trials = Trials()\\ + \\n self.X = X\\ + \\n self.y = y\\ + \\n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\\ + \\n result = sorted(trials.results, key = lambda x: x['loss'])[0]\\ + \\n self.best_estimator_ = result['model']\\ + \\n self.best_score_ = result['score']\\ + \\n self.best_params_ = result['params']\\ + \\n self.best_estimator_.fit(X, y)\\ + \\n\\ + \\n def hyperOptParamConversion( paramSpace):\\ + \\n paramDict = {}\\ + \\n for j in list(paramSpace.keys()):\\ + \\n inp = paramSpace[j]\\ + \\n isLog = False\\ + \\n isLin = False\\ + \\n isRan = False\\ + \\n isList = False\\ + \\n isString = False\\ + \\n try:\\ + \\n # check if functions are given as input and reassign paramspace\\ + \\n v = paramSpace[j]\\ + \\n if 'logspace' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isLog = True\\ + \\n elif 'linspace' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isLin = True\\ + \\n elif 'range' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isRan = True\\ + \\n elif 'list' in paramSpace[j]:\\ + \\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\ + \\n isList = True\\ + \\n elif '[' and ']' in paramSpace[j]:\\ + \\n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\\ + \\n isList = True\\ + \\n x = paramSpace[j].split(',')\\ + \\n except:\\ + \\n x = paramSpace[j]\\ + \\n str_arg = paramSpace[j]\\ + \\n\\ + \\n # check if arguments are string\\ + \\n try:\\ + \\n test = eval(x[0])\\ + \\n except:\\ + \\n isString = True\\ + \\n\\ + \\n if isString:\\ + \\n paramDict.update({j: hp.choice(j, x)})\\ + \\n else:\\ + \\n res = eval(str_arg)\\ + \\n if isLin:\\ + \\n y = eval('np.linspace' + str(res))\\ + \\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\ + \\n elif isLog:\\ + \\n y = eval('np.logspace' + str(res))\\ + \\n paramDict.update(\\ + \\n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\\ + \\n elif isRan:\\ + \\n y = eval('np.arange' + str(res))\\ + \\n paramDict.update({j: hp.choice(j, y)})\\ + \\n # check datatype of argument\\ + \\n elif isinstance(eval(x[0]), bool):\\ + \\n y = list(map(lambda i: eval(i), x))\\ + \\n paramDict.update({j: hp.choice(j, eval(str(y)))})\\ + \\n elif isinstance(eval(x[0]), float):\\ + \\n res = eval(str_arg)\\ + \\n if len(str_arg.split(',')) == 3 and not isList:\\ + \\n y = eval('np.linspace' + str(res))\\ + \\n #print(y)\\ + \\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\ + \\n else:\\ + \\n y = list(res) if isinstance(res, tuple) else [res]\\ + \\n paramDict.update({j: hp.choice(j, y)})\\ + \\n else:\\ + \\n res = eval(str_arg)\\ + \\n if len(str_arg.split(',')) == 3 and not isList:\\ + \\n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\\ + \\n else:\\ + \\n y = list(res) if isinstance(res, tuple) else [res]\\ + \\n paramDict.update({j: hp.choice(j, y)})\\ + \\n return paramDict""}, + 's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':""\\n\\ + \\ndef s2n(value):\\ + \\n try:\\ + \\n x=eval(value)\\ + \\n return x\\ + \\n except:\\ + \\n try:\\ + \\n return w2n.word_to_num(value)\\ + \\n except:\\ + \\n return np.nan""}, + 'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':""\\n\\ + \\ndef read_json(file_path):\\ + \\n data = None\\ + \\n with open(file_path,'r') as f:\\ + \\n data = json.load(f)\\ + \\n return data\\ + \\n\\ + \\ndef write_json(data, file_path):\\ + \\n with open(file_path,'w') as f:\\ + \\n json.dump(data, f)\\ + \\n\\ + \\ndef read_data(file_path, encoding='utf-8', sep=','):\\ + \\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\ + \\n\\ + \\ndef write_data(data, file_path, index=False):\\ + \\n return data.to_csv(file_path, index=index)\\ + \\n\\ + \\n#Uncomment and change below code for google storage\\ + \\n#def write_data(data, file_path, index=False):\\ + \\n# file_name= file_path.name\\ + \\n# data.to_csv('output_data.csv')\\ + \\n# storage_client = storage.Client()\\ + \\n# bucket = storage_client.bucket('aion_data')\\ + \\n# bucket.blob" +"('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\ + \\n# return data\\ + \\n\\ + \\ndef is_file_name_url(file_name):\\ + \\n supported_urls_starts_with = ('gs://','https://','http://')\\ + \\n return file_name.startswith(supported_urls_starts_with)\\ + \\n""}, + 'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f""\\n\\ + \\nlog = None\\ + \\ndef set_logger(log_file, mode='a'):\\ + \\n global log\\ + \\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\ + \\n log = logging.getLogger(Path(__file__).parent.name)\\ + \\n return log\\ + \\n\\ + \\ndef get_logger():\\ + \\n return log\\n""}, + 'mlflowSetPath':{'name':'mlflowSetPath','code':f""\\n\\ndef mlflowSetPath(path, name):\\ + \\n{self.tab}db_name = str(Path(path)/'mlruns')\\ + \\n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\\ + \\n{self.tab}mlflow.set_experiment(str(Path(path).name))\\ + \\n""}, + 'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f""\\n\\ndef mlflow_create_experiment(config, path, name):\\ + \\n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\\ + \\n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\\ + \\n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\\ + \\n{self.tab}client = mlflow.tracking.MlflowClient()\\ + \\n{self.tab}experiment = client.get_experiment_by_name(name)\\ + \\n{self.tab}if experiment:\\ + \\n{self.tab}{self.tab}experiment_id = experiment.experiment_id\\ + \\n{self.tab}else:\\ + \\n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\\ + \\n{self.tab}return client, experiment_id\\ + \\n""}, + 'get_mlflow_uris':{'name':'get_mlflow_uris','code':f""\\n\\ndef get_mlflow_uris(config, path):\\ + \\n artifact_uri = None\\ + \\n tracking_uri_type = config.get('tracking_uri_type',None)\\ + \\n if tracking_uri_type == 'localDB':\\ + \\n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\\ + \\n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\\ + \\n tracking_uri = config['tracking_uri']\\ + \\n if config.get('artifacts_uri', None):\\ + \\n if Path(config['artifacts_uri']).exists():\\ + \\n artifact_uri = 'file:' + config['artifacts_uri']\\ + \\n else:\\ + \\n artifact_uri = config['artifacts_uri']\\ + \\n else:\\ + \\n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\\ + \\n else:\\ + \\n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\\ + \\n artifact_uri = None\\ + \\n if config.get('registry_uri', None):\\ + \\n registry_uri = config['registry_uri']\\ + \\n else:\\ + \\n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\\ + \\n return tracking_uri, artifact_uri, registry_uri\\ + \\n""}, + 'logMlflow':{'name':'logMlflow','code':f""\\n\\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\\ + \\n{self.tab}run_id = None\\ + \\n{self.tab}for k,v in params.items():\\ + \\n{self.tab}{self.tab}mlflow.log_param(k, v)\\ + \\n{self.tab}for k,v in metrices.items():\\ + \\n{self.tab}{self.tab}mlflow.log_metric(k, v)\\ + \\n{self.tab}if 'CatBoost' in algoName:\\ + \\n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\\ + \\n{self.tab}else:\\ + \\n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\\ + \\n{self.tab}tags['processed'] = 'no'\\ + \\n{self.tab}tags['registered'] = 'no'\\ + \\n{self.tab}mlflow.set_tags(tags)\\ + \\n{self.tab}if model_info:\\ + \\n{self.tab}{self.tab}run_id = model_info.run_id\\ + \\n{self.tab}return run_id\\ + \\n""}, + 'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':""\\ndef get_classification_metrices( actual_values, predicted_values):\\ + \\n result = {}\\ + \\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\ + \\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\ + \\n average='macro')\\ + \\n\\ + \\n result['accuracy'] = math.floor(accuracy_score*10000)/100\\ + \\n result['precision'] = math.floor(avg_precision*10000)/100\\ + \\n result['recall'] = math.floor(avg_recall*10000)/100\\ + \\n result['f1'] = math.floor(avg_f1*10000)/100\\ + \\n return result\\ + \\n""}, + 'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':""\\ndef get_regression_metrices( actual_values, predicted_values):\\ + \\n result = {}\\ + \\n\\ + \\n me = np.mean(predicted_values - actual_values)\\ + \\n sde = np.std(predicted_values - actual_values, ddof = 1)\\ + \\n\\ + \\n abs_err = np.abs(predicted_values - actual_values)\\ + \\n mae = np.mean(abs_err)\\ + \\n sdae = np.std(abs_err, ddof = 1)\\ + \\n\\ + \\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\ + \\n mape = np.mean(abs_perc_err)\\ + \\n sdape = np.std(abs_perc_err, ddof = 1)\\ + \\n\\ + \\n result['mean_error'] = me\\ + \\n result['mean_abs_error'] = mae\\ + \\n result['mean_abs_perc_error'] = mape\\ + \\n result['error_std'] = sde\\ + \\n result['abs_error_std'] = sdae\\ + \\n result['abs_perc_error_std'] = sdape\\ + \\n return result\\ + \\n""} + } + + def add_function(self, name, importer=None): + if name in self.available_functions.keys(): + self.codeText += self.available_functions[name]['code'] + if importer: + if 'imports' in self.available_functions[name].keys(): + for module in self.available_functions[name]['imports']: + mod_name = module['mod'] + mod_from = module.get('mod_from', None) + mod_as = module.get('mod_as', None) + importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) + + def get_function_name(self, name): + if name in self.available_functions.keys(): + return self.available_functions[name]['name'] + return None + + def getCode(self): + return self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from importlib.metadata import version +import sys + + +class importModule(): + + def __init__(self): + self.importModule = {} + self.stdlibModule = [] + self.localModule = {} + + def addLocalModule(self,module, mod_from=None, mod_as=None): + if module == '*': + if module not in self.localModule.keys(): + self.localModule[module]= [mod_from] + else: + self.localModule[module].append(mod_from) + elif module not in self.localModule.keys(): + self.localModule[module] = {'from':mod_from, 'as':mod_as} + + def addModule(self, module, mod_from=None, mod_as=None): + if module not in self.importModule.keys(): + self.importModule[module] = {'from':mod_from, 'as':mod_as} + if module in sys.stdlib_module_names: + self.stdlibModule.append(module) + elif isinstance(self.importModule[module], list): + if mod_as not in [x['as'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as not in [x['from'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as != self.importModule[module]['as']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + elif mod_from != self.importModule[module]['from']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + + def getModules(self): + return (self.importModule, self.stdlibModule) + + def getBaseModule(self, extra_importers=[]): + modules_alias = { 'sklearn':'scikit-learn', + 'genetic_selection':'sklearn-genetic', + 'google': 'google-cloud-storage', + 'azure':'azure-storage-file-datalake'} + local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} + modules = [] + require = """" + if extra_importers: + extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] + importers_module = [self.importModule] + extra_importers + for importer_module in importers_module: + for k,v in importer_module.items(): + if v['from']: + mod = v['from'].split('.')[0] + else: + mod = k + if mod in modules_alias.keys(): + mod = modules_alias[mod] + modules.append(mod) + modules = list(set(modules)) + for mod in modules: + try: + if mod in local_modules.keys(): + require += f""{local_modules[mod]}\\n"" + else: + require += f""{mod}=={version(mod)}\\n"" + except : + if mod not in sys.stdlib_module_names: + raise + return require + + def getCode(self): + def to_string(k, v): + mod = '' + if v['from']: + mod += 'from {} '.format(v['from']) + mod += 'import {}'.format(k) + if v['as']: + mod += ' as {} '.format(v['as']) + return mod + + modules = """" + local_modules = """" + std_lib_modules = """" + third_party_modules = """" + for k,v in self.importModule.items(): + if k in self.stdlibModule: + std_lib_modules = std_lib_modules + '\\n' + to_string(k, v) + elif isinstance(v, dict): + third_party_modules = third_party_modules + '\\n' + to_string(k, v) + elif isinstance(v, list): + for alias in v: + third_party_modules = third_party_modules + '\\n' + to_string(k, alias) + for k,v in self.localModule.items(): + if k != '*': + local_modules = local_modules + '\\n' + to_string(k, v) + else: + for mod_from in v: + local_modules = local_modules + '\\n' + f'from {mod_from} import {k}' + if std_lib_modules: + modules = modules + ""\\n#Standard Library modules"" + std_lib_modules + if third_party_modules: + modules = modules + ""\\n\\n#Third Party modules"" + third_party_modules + if local_modules: + modules = modules + ""\\n\\n#local modules"" + local_modules + '\\n' + return modules + + def copyCode(self, importer): + self.importModule, self.stdlibModule = importer.getModules() + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 20" +"22 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +import json + +class learner(): + + def __init__(self, problem_type=""classification"", target_feature="""", sample_method=None,indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.df_name = 'df' + self.problem_type = problem_type + self.target_feature = target_feature + self.search_space = [] + self.codeText = f""\\ndef train(log):"" + self.input_files = {} + self.output_files = {} + self.function_code = '' + self.addInputFiles({'inputData' : 'featureEngineeredData.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = utils.read_json(config_file)\\ + \\n return config"" + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + return self.function_code + '\\n' + self.codeText + + def addLocalFunctionsCode(self): + self.function_code += self.__addValidateConfigCode() + + def getPrefixModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas', 'mod_as':'pd'} + ] + return modules + + def addPrefixCode(self, indent=1): + self.codeText += ""\\ + "" + def getSuffixModules(self): + modules = [] + return modules + + def addSuffixCode(self, indent=1): + self.codeText += ""\\n\\ + "" + def getMainCodeModules(self): + modules = [{'module':'logging'} + ] + return modules + + def getMlpCodeModules(self): + modules = [{'module':'math'} + ,{'module':'json'} + ,{'module':'joblib'} + ,{'module':'keras_tuner'} + ,{'module':'pandas', 'mod_as':'pd'} + ,{'module':'numpy', 'mod_as':'np'} + ,{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'r2_score', 'mod_from':'sklearn.metrics'} + ,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'} + ,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'} + ,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'} + ,{'module':'Sequential', 'mod_from':'tensorflow.keras'} + ,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'} + ] + return modules + + def addMlpCode(self): + self.codeText = """""" +def getdlparams(config): + for k, v in config.items(): + if (k == ""activation""): + activation_fn = str(v) + elif (k == ""optimizer""): + optimizer = str(v) + elif (k == ""loss""): + loss_fn = str(v) + elif (k == ""first_layer""): + if not isinstance(k, list): + first_layer = str(v).split(',') + else: + first_layer = k + elif (k == ""lag_order""): + lag_order = int(v) + elif (k == ""hidden_layers""): + hidden_layers = int(v) + elif (k == ""dropout""): + if not isinstance(k, list): + dropout = str(v).split(',') + else: + dropout = k + elif (k == ""batch_size""): + batch_size = int(v) + elif (k == ""epochs""): + epochs = int(v) + elif (k == ""model_name""): + model_name = str(v) + return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs, model_name +def numpydf(dataset, look_back): + dataX, dataY = [], [] + for i in range(len(dataset) - look_back - 1): + subset = dataset[i:(i + look_back), 0] + dataX.append(subset) + dataY.append(dataset[i + look_back, 0]) + return np.array(dataX), np.array(dataY) + +def startTraining(dataset,train_size,mlpConfig,filename_scaler,target_feature,scoreParam,log): + log.info('Training started') + activation_fn, optimizer, loss_fn, first_layer, hidden_layers, look_back, dropout, batch_size, epochs, model_name = getdlparams(mlpConfig) + hp = keras_tuner.HyperParameters() + first_layer_min = round(int(first_layer[0])) + first_layer_max = round(int(first_layer[1])) + dropout_min = float(dropout[0]) + dropout_max = float(dropout[1]) + dataset = dataset.values + train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :] + trainX, trainY = numpydf(train, look_back) + testX, testY = numpydf(test, look_back) + # create and fit Multilayer Perceptron model + model = Sequential() + model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), input_dim=look_back, activation=activation_fn)) #BUGID 13484 + model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) #BUGID 13484 + model.add(Dense(1, activation='sigmoid')) + model.compile(loss=loss_fn, optimizer=optimizer) + model_fit = model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, verbose=2) + # Estimate model performance + trainScore = model.evaluate(trainX, trainY, verbose=0) + testScore = model.evaluate(testX, testY, verbose=0) + # Scoring values for the model + mse_eval = testScore + rmse_eval = math.sqrt(testScore) + # generate predictions for training + trainPredict = model.predict(trainX) + testPredict = model.predict(testX) + scaler = joblib.load(filename_scaler) + + trainY = scaler.inverse_transform([trainY]) + trainPredict = scaler.inverse_transform(trainPredict) + ## For test data + testY = scaler.inverse_transform([testY]) + testPredict = scaler.inverse_transform(testPredict) + mse_mlp = mean_squared_error(testY.T, testPredict) + scores = {} + r2 = round(r2_score(testY.T, testPredict), 2) + scores['R2'] = r2 + + mae = round(mean_absolute_error(testY.T, testPredict), 2) + scores['MAE'] = mae + + scores['MSE'] = round(mse_mlp, 2) + + rmse = round(math.sqrt(mse_mlp), 2) + scores['RMSE'] = rmse + + + scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE']) + log.info(""mlp rmse: ""+str(rmse)) + log.info(""mlp mse: ""+str(round(mse_mlp, 2))) + log.info(""mlp r2: ""+str(r2)) + log.info(""mlp mae: ""+str(mae)) + + return model, look_back, scaler,testScore,trainScore,scores + +def train(config, targetPath, log): + + dataLoc = targetPath / IOFiles['inputData'] + if not dataLoc.exists(): + return {'Status': 'Failure', 'Message': 'Data location does not exists.'} + + status = dict() + usecase = config['targetPath'] + df = utils.read_data(dataLoc) + target_feature = config['target_feature'] + dateTimeFeature= config['dateTimeFeature'] + df.set_index(dateTimeFeature, inplace=True) + train_size = int(len(df) * (1-config['test_ratio'])) #BugID:13217 + mlpConfig = config['algorithms']['MLP'] + filename = meta_data['transformation']['Status']['Normalization_file'] + scoreParam = config['scoring_criteria'] + log.info('Training MLP for TimeSeries') + mlp_model, look_back, scaler,testScore,trainScore, error_matrix = startTraining(df,train_size,mlpConfig,filename,target_feature,scoreParam,log) + score = error_matrix[scoreParam] + # Training model + + + model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name + model_file_name = str(model_path/'model') + mlp_model.save(model_file_name) + meta_data['training'] = {} + meta_data['training']['model_filename'] = model_file_name + meta_data['training']['dateTimeFeature'] = dateTimeFeature + meta_data['training']['target_feature'] = target_feature + utils.write_json(meta_data, targetPath / IOFiles['metaData']) + utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics']) + # return status + status = {'Status': 'Success', 'errorMatrix': error_matrix, 'test_score':testScore, 'train_score': trainScore,'score':error_matrix[scoreParam]} + log.info(f'Test score: {testScore}') + log.info(f'Train score: {trainScore}') + log.info(f'output: {status}') + return json.dumps(status) + """""" + + def getLstmCodeModules(self): + modules = [{'module':'math'} + ,{'module':'json'} + ,{'module':'joblib'} + ,{'module':'keras_tuner'} + ,{'module':'pandas', 'mod_as':'pd'} + ,{'module':'numpy', 'mod_as':'np'} + ,{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'r2_score', 'mod_from':'sklearn.metrics'} + ,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'} + ,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'} + ,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'} + ,{'module':'Sequential', 'mod_from':'tensorflow.keras'} + ,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'} + ,{'module':'LSTM', 'mod_from':'tensorflow.keras.layers'} + ,{'module':'TimeseriesGenerator', 'mod_from':'tensorflow.keras.preprocessing.sequence'} + ,{'module':'train_test_split', 'mod_from':'sklearn.model_selection'} + ] + return modules + + def addLstmCode(self): + self.codeText = """""" +def getdlparams(config): + for k, v in config.items(): + if (k == ""activation""): + activation_fn = str(v) + elif (k == ""optimizer""): + optimizer = str(v) + elif (k == ""loss""): + loss_fn = str(v) + elif (k == ""first_layer""): + if not isinstance(k, list): + first_layer = str(v).split(',') + else: + first_layer = k + elif (k == ""lag_order""): + lag_order = int(v) + elif (k == ""hidden_layers""): + hidden_layers = int(v) + elif (k == ""dropout""): + if not isinstance(k, list): + dropout = str(v).split(',') + else: + dropout = k + elif (k == ""batch_size""): + batch_size = int(v) + elif (k == ""epochs""): + epochs = int(v) + + return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs + +def numpydf(dataset, look_back): + dataX, dataY = [], [] + for i in range(len(dataset) - look_back - 1): + subset = dataset[i:(i + look_back), 0] + dataX.append(subset) + dataY.append(dataset[i + look_back, 0]) + return np.array(dataX), np.array(dataY) + +def startTraining(dataset,test_size,mlpConfig,filename_scaler,target_feature,scoreParam,log): + log.info('Training started') + activation_fn, optimizer, loss_fn, first_layer, look_back,hidden_layers, dropout, batch_size, epochs= getdlparams(mlpConfig) + n_features = len(target_feature) + n_input = look_back + hp = keras_tuner.HyperParameters() + first_layer_min = round(int(first_layer[0])) + first_layer_max = round(int(first_layer[1])) + dropout_min = float(dropout[0]) + dropout_max = float(dropout[1]) + dataset = dataset[target_feature] + dataset_np = dataset.values + train, test = train_test_split(dataset_np, test_size=test_size, shuffle=False) + generatorTra" +"in = TimeseriesGenerator(train, train, length=n_input, batch_size=8) + generatorTest = TimeseriesGenerator(test, test, length=n_input, batch_size=8) + batch_0 = generatorTrain[0] + x, y = batch_0 + epochs = int(epochs) + ##Multivariate LSTM model + model = Sequential() + model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), activation=activation_fn, input_shape=(n_input, n_features))) + model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) + model.add(Dense(n_features)) + model.compile(optimizer=optimizer, loss=loss_fn) + # model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False) + model.fit_generator(generatorTrain, steps_per_epoch=1, epochs=epochs, shuffle=False, verbose=0) + # lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0) + + + predictions = [] + future_pred_len = n_input + # To get values for prediction,taking look_back steps of rows + first_batch = train[-future_pred_len:] + c_batch = first_batch.reshape((1, future_pred_len, n_features)) + current_pred = None + for i in range(len(test)): + # get pred for firstbatch + current_pred = model.predict(c_batch)[0] + predictions.append(current_pred) + # remove first val + c_batch_rmv_first = c_batch[:, 1:, :] + # update + c_batch = np.append(c_batch_rmv_first, [[current_pred]], axis=1) + ## Prediction, inverse the minmax transform + scaler = joblib.load(filename_scaler) + prediction_actual = scaler.inverse_transform(predictions) + test_data_actual = scaler.inverse_transform(test) + mse = None + rmse = None + ## Creating dataframe for actual,predictions + pred_cols = list() + for i in range(len(target_feature)): + pred_cols.append(target_feature[i] + '_pred') + + predictions = pd.DataFrame(prediction_actual, columns=pred_cols) + actual = pd.DataFrame(test_data_actual, columns=target_feature) + actual.columns = [str(col) + '_actual' for col in dataset.columns] + df_predicted = pd.concat([actual, predictions], axis=1) + print(""LSTM Multivariate prediction dataframe: \\\\n"" + str(df_predicted)) + # df_predicted.to_csv('mlp_prediction.csv') + from math import sqrt + from sklearn.metrics import mean_squared_error + from sklearn.metrics import r2_score + from sklearn.metrics import mean_absolute_error + target = target_feature + mse_dict = {} + rmse_dict = {} + mae_dict = {} + r2_dict = {} + lstm_var = 0 + for name in target: + index = dataset.columns.get_loc(name) + mse = mean_squared_error(test_data_actual[:, index], prediction_actual[:, index]) + mse_dict[name] = mse + rmse = sqrt(mse) + rmse_dict[name] = rmse + lstm_var = lstm_var + rmse + print(""Name of the target feature: "" + str(name)) + print(""RMSE of the target feature: "" + str(rmse)) + r2 = r2_score(test_data_actual[:, index], prediction_actual[:, index]) + r2_dict[name] = r2 + mae = mean_absolute_error(test_data_actual[:, index], prediction_actual[:, index]) + mae_dict[name] = mae + ## For VAR comparison, send last target mse and rmse from above dict + lstm_var = lstm_var / len(target) + select_msekey = list(mse_dict.keys())[-1] + l_mse = list(mse_dict.values())[-1] + select_rmsekey = list(rmse_dict.keys())[-1] + l_rmse = list(rmse_dict.values())[-1] + select_r2key = list(r2_dict.keys())[-1] + l_r2 = list(r2_dict.values())[-1] + select_maekey = list(mae_dict.keys())[-1] + l_mae = list(mae_dict.values())[-1] + log.info('Selected target feature of LSTM for best model selection: ' + str(select_rmsekey)) + scores = {} + scores['R2'] = l_r2 + scores['MAE'] = l_mae + scores['MSE'] = l_mse + scores['RMSE'] = l_rmse + scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE']) + log.info(""lstm rmse: ""+str(l_rmse)) + log.info(""lstm mse: ""+str(l_mse)) + log.info(""lstm r2: ""+str(l_r2)) + log.info(""lstm mae: ""+str(l_mae)) + + return model,look_back,scaler, scores + +def train(config, targetPath, log): + dataLoc = targetPath / IOFiles['inputData'] + if not dataLoc.exists(): + return {'Status': 'Failure', 'Message': 'Data location does not exists.'} + status = dict() + usecase = config['targetPath'] + df = utils.read_data(dataLoc) + target_feature = config['target_feature'] + dateTimeFeature= config['dateTimeFeature'] + scoreParam = config['scoring_criteria'] + testSize = config['test_ratio'] + lstmConfig = config['algorithms']['LSTM'] + filename = meta_data['transformation']['Status']['Normalization_file'] + if (type(target_feature) is list): + pass + else: + target_feature = list(target_feature.split("","")) + df.set_index(dateTimeFeature, inplace=True) + log.info('Training LSTM for TimeSeries') + mlp_model, look_back, scaler, error_matrix = startTraining(df,testSize,lstmConfig,filename,target_feature,scoreParam,log) + score = error_matrix[scoreParam] + log.info(""LSTM Multivariant all scoring param results: ""+str(error_matrix)) + # Training model + + + + model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name + model_file_name = str(model_path/'model') + mlp_model.save(model_file_name) + meta_data['training'] = {} + meta_data['training']['model_filename'] = model_file_name + meta_data['training']['dateTimeFeature'] = dateTimeFeature + meta_data['training']['target_feature'] = target_feature + utils.write_json(meta_data, targetPath / IOFiles['metaData']) + utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics']) + # return status + status = {'Status': 'Success', 'errorMatrix': error_matrix,'score':error_matrix[scoreParam]} + log.info(f'score: {error_matrix[scoreParam]}') + log.info(f'output: {status}') + return json.dumps(status) + """""" + def addMainCode(self, indent=1): + self.codeText += """""" +if __name__ == '__main__': + config = validateConfig() + targetPath = Path('aion') / config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + meta_data_file = targetPath / IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = utils.read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {meta_data_file}') + log_file = targetPath / IOFiles['log'] + log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + print(train(config, targetPath, log)) + except Exception as e: + + status = {'Status': 'Failure', 'Message': str(e)} + print(json.dumps(status)) + """""" + + def add_variable(self, name, value, indent=1): + if isinstance(value, str): + self.codeText += f""\\n{self.tab * indent}{name} = '{value}'"" + else: + self.codeText += f""\\n{self.tab * indent}{name} = {value}"" + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +class input_drift(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.codeText = '' + + def addInputDriftClass(self): + text = ""\\ + \\nclass inputdrift():\\ + \\n\\ + \\n def __init__(self,base_config):\\ + \\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\ + \\n self.currentDataLocation = base_config['currentDataLocation']\\ + \\n home = Path.home()\\ + \\n if platform.system() == 'Windows':\\ + \\n from pathlib import WindowsPath\\ + \\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\ + \\n else:\\ + \\n from pathlib import PosixPath\\ + \\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\ + \\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\ + \\n if not output_model_dir.exists():\\ + \\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\ + \\n\\ + \\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\ + \\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\ + \\n mlflow.set_tracking_uri(tracking_uri)\\ + \\n mlflow.set_registry_uri(registry_uri)\\ + \\n client = mlflow.tracking.MlflowClient(\\ + \\n tracking_uri=tracking_uri,\\ + \\n registry_uri=registry_uri,\\ + \\n )\\ + \\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\ + \\n model = mlflow.pyfunc.load_model(model_version_uri)\\ + \\n run = client.get_run(model.metadata.run_id)\\ + \\n if run.info.artifact_uri.startswith('file:'):\\ + \\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\ + \\n else:\\ + \\n artifact_path = Path(run.info.artifact_uri)\\ + \\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\\ + \\n\\ + \\n def get_input_drift(self,current_data, historical_data):\\ + \\n curr_num_feat = current_data.select_dtypes(include='number')\\ + \\n hist_num_feat = historical_data.select_dtypes(include='number')\\ + \\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\\ + \\n alert_count = 0\\ + \\n data = {\\ + \\n 'current':{'data':current_data},\\ + \\n 'hist': {'data': historical_data}\\ + \\n }\\ + \\n dist_changed_columns = []\\ + \\n dist_change_message = []\\ + \\n for feature in num_features:\\ + \\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\\ + \\n if (curr_static_value < 0.05):\\ + \\n distribution = {}\\ + \\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\\ + \\n distribution['curr'] = self.DistributionFinder( current_data[feature])\\ + \\n if(distribution['hist']['name'] == distribution['curr']['name']):\\ + \\n pass\\ + \\n else:\\ + \\n alert_count = alert_count + 1\\ + \\n dist_changed_columns.append(feature)\\ + \\n changed_column = {}\\ + \\n changed_column['Feature'] = feature\\ + \\n changed_column['KS_Training'] = curr_static_value\\ + \\n changed_column['Training_Distribution'] = distribution['hist']['name']\\ + \\n changed_column['New_Distribution'] = distribution['curr']['name']\\ + \\n dist_change_message.append(changed_column)\\ + \\n if alert_count:\\ + \\n resultStatus = dist_change_message\\ + \\n else :\\ + \\n resultStatus='Model is working as expected'\\ + \\n return(alert_count, resultStatus)\\ + \\n\\ + \\n def DistributionFinder(self,data):\\ + \\n best_distribution =''\\ + \\n best_sse =0.0\\ + \\n if(data.dtype in ['int','int64']):\\ + \\n distributions= {'bernoulli':{'algo':st.bernoulli},\\ + \\n 'binom':{'algo':st.binom},\\ + \\n 'geom':{'algo':st.geom},\\ + \\n 'nbinom':{'algo':st.nbinom},\\ + \\n 'poisson':{'algo':st.poisson}\\ + \\n }\\ + \\n index, counts = np.unique(data.astype(int),return_counts=True)\\ + \\n if(len(index)>=2):\\ + \\n best_sse = np.inf\\ + \\n y1=[]\\ + \\n total=sum(counts)\\ + \\n mean=float(sum(index*counts))/total\\ +" +" \\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\\ + \\n dispersion=mean/float(variance)\\ + \\n theta=1/float(dispersion)\\ + \\n r=mean*(float(theta)/1-theta)\\ + \\n\\ + \\n for j in counts:\\ + \\n y1.append(float(j)/total)\\ + \\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\\ + \\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\\ + \\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\\ + \\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\\ + \\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\\ + \\n\\ + \\n sselist = []\\ + \\n for dist in distributions.keys():\\ + \\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\\ + \\n if np.isnan(distributions[dist]['sess']):\\ + \\n distributions[dist]['sess'] = float('inf')\\ + \\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\\ + \\n best_distribution = best_dist\\ + \\n best_sse = distributions[best_dist]['sess']\\ + \\n\\ + \\n elif (len(index) == 1):\\ + \\n best_distribution = 'Constant Data-No Distribution'\\ + \\n best_sse = 0.0\\ + \\n elif(data.dtype in ['float64','float32']):\\ + \\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\\ + \\n best_distribution = st.norm.name\\ + \\n best_sse = np.inf\\ + \\n nrange = data.max() - data.min()\\ + \\n\\ + \\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\\ + \\n x = (x + np.roll(x, -1))[:-1] / 2.0\\ + \\n\\ + \\n for distribution in distributions:\\ + \\n with warnings.catch_warnings():\\ + \\n warnings.filterwarnings('ignore')\\ + \\n params = distribution.fit(data.astype(float))\\ + \\n arg = params[:-2]\\ + \\n loc = params[-2]\\ + \\n scale = params[-1]\\ + \\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\\ + \\n sse = np.sum(np.power(y - pdf, 2.0))\\ + \\n if( sse < best_sse):\\ + \\n best_distribution = distribution.name\\ + \\n best_sse = sse\\ + \\n\\ + \\n return {'name':best_distribution, 'sse': best_sse}\\ + \\n\\ + "" + return text + + def addSuffixCode(self, indent=1): + text =""\\n\\ + \\ndef check_drift( config):\\ + \\n inputdriftObj = inputdrift(config)\\ + \\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\\ + \\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\\ + \\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\\ + \\n if message == 'Model is working as expected':\\ + \\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\\ + \\n else:\\ + \\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\\ + \\n return(output_json)\\ + \\n\\ + \\nif __name__ == '__main__':\\ + \\n try:\\ + \\n if len(sys.argv) < 2:\\ + \\n raise ValueError('config file not present')\\ + \\n config = sys.argv[1]\\ + \\n if Path(config).is_file() and Path(config).suffix == '.json':\\ + \\n with open(config, 'r') as f:\\ + \\n config = json.load(f)\\ + \\n else:\\ + \\n config = json.loads(config)\\ + \\n output = check_drift(config)\\ + \\n status = {'Status':'Success','Message':output}\\ + \\n print('input_drift:'+json.dumps(status))\\ + \\n except Exception as e:\\ + \\n status = {'Status':'Failure','Message':str(e)}\\ + \\n print('input_drift:'+json.dumps(status))"" + return text + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def generateCode(self): + self.codeText += self.addInputDriftClass() + self.codeText += self.addSuffixCode() + + def getCode(self): + return self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class tabularDataReader(): + + def __init__(self, tab_size=4): + self.tab = ' ' * tab_size + self.function_code = '' + self.codeText = '' + self.code_generated = False + + def getInputFiles(self): + IOFiles = { + ""rawData"": ""rawData.dat"", + ""metaData"" : ""modelMetaData.json"", + ""log"" : ""aion.log"", + ""outputData"" : ""rawData.dat"", + ""monitoring"":""monitoring.json"", + ""prodData"": ""prodData"", + ""prodDataGT"":""prodDataGT"" + } + text = 'IOFiles = ' + if not IOFiles: + text += '{ }' + else: + text += json.dumps(IOFiles, indent=4) + return text + + def getOutputFiles(self): + output_files = { + 'metaData' : 'modelMetaData.json', + 'log' : 'aion.log', + 'outputData' : 'rawData.dat' + } + text = 'output_file = ' + if not output_files: + text += '{ }' + else: + text += json.dumps(output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n if not config['targetPath']:\\ + \\n raise ValueError(f'Target Path is not configured')\\ + \\n return config"" + return text + + def addMainCode(self, indent=1): + self.codeText += """""" +if __name__ == '__main__': + config = validateConfig() + targetPath = Path('aion') / config['targetPath'] + targetPath.mkdir(parents=True, exist_ok=True) + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + meta_data_file = targetPath / IOFiles['metaData'] + if not meta_data_file.exists(): + raise ValueError(f'Configuration file not found: {meta_data_file}') + log_file = targetPath / IOFiles['log'] + log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + print(load_data(config, targetPath, log)) + except Exception as e: + status = {'Status': 'Failure', 'Message': str(e)} + print(json.dumps(status)) + """""" + + def addLoadDataCode(self): + self.codeText += """""" +#This function will read the data and save the data on persistent storage +def load_data(config, targetPath, log): + meta_data_file = targetPath / IOFiles['metaData'] + meta_data = read_json(meta_data_file) + if meta_data.get('monitoring', False) and not meta_data['monitoring'].get('retrain', False): + raise ValueError('New data is not enougth to retrain model') + df = read_data(config['dataLocation']) + status = {} + output_data_path = targetPath / IOFiles['outputData'] + log.log_dataframe(df) + required_features = list(set(config['selected_features'] + config['dateTimeFeature'] + config['target_feature'])) + log.info('Dataset features required: ' + ','.join(required_features)) + missing_features = [x for x in required_features if x not in df.columns.tolist()] + if missing_features: + raise ValueError(f'Some feature/s is/are missing: {missing_features}') + log.info('Removing unused features: ' + ','.join(list(set(df.columns) - set(required_features)))) + df = df[required_features] + log.info(f'Required features: {required_features}') + try: + log.info(f'Saving Dataset: {str(output_data_path)}') + write_data(df, output_data_path, index=False) + status = {'Status': 'Success', 'DataFilePath': IOFiles['outputData'], 'Records': len(df)} + except: + raise ValueError('Unable to create data file') + + meta_data['load_data'] = {} + meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if + x != config['target_feature']] + meta_data['load_data']['Status'] = status + write_json(meta_data, meta_data_file) + output = json.dumps(status) + log.info(output) + return output +"""""" + def addValidateConfigCode(self, indent=1): + self.function_code += self.__addValidateConfigCode() + + def addLocalFunctionsCode(self): + self.addValidateConfigCode() + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + return self.function_code + '\\n' + self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class drift(): + + def __init__(self, indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.codeText = """" + self.function_code = """" + self.input_files = {} + self.output_files = {} + self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = utils.read_json(config_file)\\ + \\n return config\\ + "" + return text + + def addLocalFunctionsCode(self): + self.function_code += self.__addValidateConfigCode() + + def addPrefixCode(self, smaller_is_better=False, indent=1): + self.codeText += """""" +def monitoring(config, targetPath, log): + retrain = False + last_run_id = 0 + retrain_threshold = config.get('retrainThreshold', 100) + meta_data_file = targetPath / IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = utils.read_json(meta_data_file) + if not meta_data.get('register', None): + log.info('Last time Pipeline not executed properly') + retrain = True + else: + last_run_id = meta_data['register']['runId'] + df = utils.read_data(config['dataLocation']) + df_len = len(df) + if not meta_data['monitoring'].get('endIndex', None): + meta_data['monitoring']['endIndex'] = int(meta_data['load_data']['Status']['Records']) + meta_data['monitoring']['endIndexTemp'] = meta_data['monitoring']['endIndex'] + if meta_data['register'].get('registered', False): + meta_data['monitoring']['endIndex'] = meta_data['monitoring']['endIndexTemp'] + meta_data['register']['" +"registered'] = False #ack registery + if (meta_data['monitoring']['endIndex'] + retrain_threshold) < df_len: + meta_data['monitoring']['endIndexTemp'] = df_len + retrain = True + else: + log.info('Pipeline running first time') + meta_data = {} + meta_data['monitoring'] = {} + retrain = True + if retrain: + meta_data['monitoring']['runId'] = last_run_id + 1 + meta_data['monitoring']['retrain'] = retrain + utils.write_json(meta_data, targetPath/IOFiles['metaData']) + status = {'Status':'Success','retrain': retrain, 'runId':meta_data['monitoring']['runId']} + log.info(f'output: {status}') + return json.dumps(status) + """""" + def getMainCodeModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas','mod_as':'pd'} + ,{'module':'json'} + ] + return modules + + def addMainCode(self, indent=1): + self.codeText += """""" +if __name__ == '__main__': + config = validateConfig() + targetPath = Path('aion') / config['targetPath'] + targetPath.mkdir(parents=True, exist_ok=True) + log_file = targetPath / IOFiles['log'] + log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + print(monitoring(config, targetPath, log)) + except Exception as e: + status = {'Status': 'Failure', 'Message': str(e)} + print(json.dumps(status)) + """""" + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + def getCode(self, indent=1): + return self.function_code + '\\n' + self.codeText + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" +import json + +class selector(): + + def __init__(self, indent=0, tab_size=4): + self.tab = "" ""*tab_size + self.codeText = """" + self.pipe = 'pipe' + self.code_generated = False + self.input_files = {} + self.output_files = {} + self.function_code = '' + self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'}) + + def addInputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def addOutputFiles(self, files): + if not isinstance(files, dict): + raise TypeError(f""Required dict type got {type(files)} type"") + for k,v in files.items(): + self.input_files[k] = v + + def getInputFiles(self): + text = 'IOFiles = ' + if not self.input_files: + text += '{ }' + else: + text += json.dumps(self.input_files, indent=4) + return text + + def getOutputFiles(self): + text = 'output_file = ' + if not self.output_files: + text += '{ }' + else: + text += json.dumps(self.output_files, indent=4) + return text + + def getInputOutputFiles(self, indent=0): + text = '\\n' + text += self.getInputFiles() + if indent: + text = text.replace('\\n', self.tab * indent + '\\n') + return text + + def __addValidateConfigCode(self): + text = ""\\n\\ + \\ndef validateConfig():\\ + \\n config_file = Path(__file__).parent/'config.json'\\ + \\n if not Path(config_file).exists():\\ + \\n raise ValueError(f'Config file is missing: {config_file}')\\ + \\n config = read_json(config_file)\\ + \\n return config"" + return text + + def addMainCode(self): + self.codeText += """""" +if __name__ == '__main__': + config = validateConfig() + targetPath = Path('aion') / config['targetPath'] + if not targetPath.exists(): + raise ValueError(f'targetPath does not exist') + meta_data_file = targetPath / IOFiles['metaData'] + if meta_data_file.exists(): + meta_data = read_json(meta_data_file) + else: + raise ValueError(f'Configuration file not found: {meta_data_file}') + log_file = targetPath / IOFiles['log'] + log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) + try: + print(featureSelector(config,targetPath, log)) + except Exception as e: + + status = {'Status': 'Failure', 'Message': str(e)} + print(json.dumps(status)) + """""" + + def addValidateConfigCode(self, indent=1): + self.function_code += self.__addValidateConfigCode() + + def addStatement(self, statement, indent=1): + self.codeText += '\\n' + self.tab * indent + statement + + def getCode(self): + return self.function_code + '\\n' + self.codeText + + def addLocalFunctionsCode(self): + self.addValidateConfigCode() + + def getPrefixModules(self): + modules = [{'module':'Path', 'mod_from':'pathlib'} + ,{'module':'pandas', 'mod_as':'pd'} + ] + return modules + + def addPrefixCode(self, indent=1): + self.codeText += """""" +def featureSelector(config, targetPath, log): + dataLoc = targetPath / IOFiles['inputData'] + if not dataLoc.exists(): + return {'Status': 'Failure', 'Message': 'Data location does not exists.'} + + status = dict() + df = pd.read_csv(dataLoc) + log.log_dataframe(df) + + csv_path = str(targetPath / IOFiles['outputData']) + write_data(df, csv_path, index=False) + status = {'Status': 'Success', 'dataFilePath': IOFiles['outputData']} + log.info(f'Selected data saved at {csv_path}') + meta_data['featureengineering'] = {} + meta_data['featureengineering']['Status'] = status + write_json(meta_data, str(targetPath / IOFiles['metaData'])) + log.info(f'output: {status}') + return json.dumps(status) + """""" + + def getSuffixModules(self): + modules = [] + return modules + + def addSuffixCode(self, indent=1): + self.codeText += """" + + def getMainCodeModules(self): + modules = [ + {'module':'json'} + ,{'module':'logging'} + ] + return modules + + def addStatement(self, statement, indent=1): + self.codeText += f""\\n{self.tab * indent}{statement}"" + + def getPipe(self): + return self.pipe + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +import platform +from mlac.timeseries.core import * +from .utility import * + +output_file_map = { + 'text' : {'text' : 'text_profiler.pkl'}, + 'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'}, + 'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'}, + 'normalizer' : {'normalizer' : 'normalizer.pkl'} +} + +def add_common_imports(importer): + common_importes = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'argparse', 'mod_from': None, 'mod_as': None}, + {'module': 'platform', 'mod_from': None, 'mod_as': None } + ] + for mod in common_importes: + importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + +def get_transformer_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""train_features"",""text_features"",""profiler"",""test_ratio"",""dateTimeFeature""] #BugID:13217 + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_transformer(config): + transformer = profiler() + importer = importModule() + function = global_function() + importModules(importer, transformer.getPrefixModules()) + importer.addModule('warnings') + transformer.addPrefixCode() + importModules(importer, transformer.getMainCodeModules()) + transformer.addMainCode() + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'DataTransformation' + deploy_path.mkdir(parents=True, exist_ok=True) + generated_files = [] + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('transformer') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = file_header(usecase) + code += ""\\nimport os\\nos.path.abspath(os.path.join(__file__, os.pardir))\\n"" #chdir to import from current dir + code += importer.getCode() + code += '\\nwarnings.filterwarnings(""ignore"")\\n' + code += transformer.getInputOutputFiles() + code += function.getCode() + transformer.addLocalFunctionsCode() + code += transformer.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + config_file = deploy_path/""config.json"" + config_data = get_transformer_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('transformer', deploy_path,config['modelName'], generated_files) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.timeseries.core import * +from .utility import * + +def get_register_params(config, models): + param_keys = [""modelVersion"",""problem_type""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + data['models'] = models + return data + +def run_register(config): + importer = importModule() + + registration = register(importer) + models = get_variable('models_name') + smaller_is_better = get_variable('smaller_is_better', False) + registration.addLocalFunctionsCode(models) + registration.addPrefixCode(smaller_is_better) + registration.addMainCode(models) + importModules(importer, registration.getMainCodeModules()) + importer.addModule('warnings') + + generated_files = [] + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'ModelRegistry' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('utility', mod_as='utils') + utility_obj = utility_function('register') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file required for creating a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = importer.getCode() + code += '\\nwarnings.filterwarnings(""ignore"")\\n' + code += registration.getInputOutputFiles() + code += registration.getCode() + # create serving file + with open(deploy_path/""aionCode.py"", 'w') as f: + f.write(file_header(usecase) + code) + generated_files.append(""aionCode.py"") + + # create requirements file + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + # create config file + with open (deploy_path/""config.json"", ""w"") as f: + json.dump(get_register_params(config, models), f, indent=4) + generated_files.append(""config.json"") + + # create docker file + create_docker_file('register'," +"deploy_path,config['modelName'], generated_files) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +import datetime +from pathlib import Path + +variables = {} + +def update_variable(name, value): + variables[name] = value + +def get_variable(name, default=None): + return variables.get(name, default) + +def append_variable(name, value): + data = get_variable(name) + if not data: + update_variable(name, [value]) + elif not isinstance(data, list): + update_variable(name, [data, value]) + else: + data.append(value) + update_variable(name, data) + +def addDropFeature(feature, features_list, coder, indent=1): + coder.addStatement(f'if {feature} in {features_list}:', indent=indent) + coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1) + +def importModules(importer, modules_list): + for module in modules_list: + mod_from = module.get('mod_from',None) + mod_as = module.get('mod_as',None) + importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) + +def file_header(use_case, module_name=None): + time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ') + text = ""#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n"" + return text + f""'''\\nThis file is automatically generated by AION for {use_case} usecase.\\nFile generation time: {time_str}\\n'''"" + +def get_module_mapping(module): + mapping = { + ""LogisticRegression"": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'} + ,""GaussianNB"": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'} + ,""DecisionTreeClassifier"": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'} + ,""SVC"": {'module':'SVC', 'mod_from':'sklearn.svm'} + ,""KNeighborsClassifier"": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'} + ,""GradientBoostingClassifier"": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'} + ,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'} + ,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'} + ,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'} + ,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'} + + ,""LinearRegression"": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'} + ,""Lasso"": {'module':'Lasso', 'mod_from':'sklearn.linear_model'} + ,""Ridge"": {'module':'Ridge', 'mod_from':'sklearn.linear_model'} + ,""DecisionTreeRegressor"": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'} + ,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'} + ,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'} + ,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'} + ,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'} + } + return mapping.get(module, None) + +def create_docker_file(name, path,usecasename,files=[],text_feature=False): + text = """" + if name == 'load_data': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'transformer': + text='FROM python:3.8-slim-buster\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + if text_feature: + text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\\n' + text+='\\n' + text+='''RUN \\ +''' + if text_feature: + text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\\ && +''' + text+=''' pip install --no-cache-dir -r requirements.txt\\ +''' + if text_feature: + text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\ +''' + text+='\\n' + elif name == 'selector': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'train': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'register': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + elif name == 'Prediction': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + if text_feature: + text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\\n' + text+='''RUN \\ +''' + if text_feature: + text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\\ && +''' + text+='''pip install --no-cache-dir -r requirements.txt\\ +''' + if text_feature: + text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\ +''' + text+='\\n' + text+='ENTRYPOINT [""python"", ""aionCode.py"",""-ip"",""0.0.0.0"",""-pn"",""8094""]\\n' + elif name == 'input_drift': + text='FROM python:3.8-slim-buster' + text+='\\n' + text+='LABEL ""usecase""=""'+str(usecasename)+'""' + text+='\\n' + text+='LABEL ""usecase_test""=""'+str(usecasename)+'_test'+'""' + text+='\\n' + for file in files: + text+=f'\\nCOPY {file} {file}' + text+='\\n' + text+='RUN pip install --no-cache-dir -r requirements.txt' + file_name = Path(path)/'Dockerfile' + with open(file_name, 'w') as f: + f.write(text) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from .load_data import run_loader +from .transformer import run_transformer +from .selector import run_selector +from .trainer import run_trainer +from .register import run_register +from .deploy import run_deploy +from .drift_analysis import run_drift_analysis + + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.timeseries.core import * +from .utility import * + +def get_deploy_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""lag_order"",""noofforecasts""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + data['ipAddress'] = '127.0.0.1' + data['portNo'] = '8094' + return data + +def import_trainer_module(importer): + non_sklearn_modules = get_variable('non_sklearn_modules') + if non_sklearn_modules: + for mod in non_sklearn_modules: + module = get_module_mapping(mod) + mod_from = module.get('mod_from',None) + mod_as = module.get('mod_as',None) + importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) + +imported_modules = [ + ] + +def run_deploy(config): + generated_files = [] + importer = importModule() + deployer = deploy() + importModules(importer, imported_modules) + + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'ModelServing' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('utility', mod_as='utils') + utility_obj = utility_function('Prediction') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file required for creating a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + importModules(importer,deployer.getPredictionCodeModules()) + code = file_header(usecase) + code += importer.getCode() + code += deployer.getInputOutputFiles() + deployer.addPredictionCode() + code += deployer.getCode() + + # create prediction file + with open(deploy_path/""predict.py"", 'w') as f: + f.write(code) + generated_files.append(""predict.py"") + + # create create service file + with open(deploy_path/""aionCode.py"", 'w') as f: + f.write(file_header(usecase) + deployer.getServiceCode()) + generated_files.append(""aionCode.py"") + importer.addModule('seaborn') + importer.addModule('sklearn') + + # create requirements file + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + # create config file + config_file = deploy_path/""config.json"" + config_data = get_deploy_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + # create docker file + create_docker_file('Prediction', deploy_path,config['modelName'], generated_files) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.timeseries.core import * +from mlac.timeseries.app import utility as utils + +def get_model_name(algo, method): + if method == 'modelBased': + return algo + '_' + 'MLBased' + if method == 'statisticalBased': + return algo + '_' + 'StatisticsBased' + " +"else: + return algo + + +def get_training_params(config, algo): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""train_features"",""scoring_criteria"",""test_ratio"",""optimization_param"",""dateTimeFeature""]#BugID:13217 + data = {key:value for (key,value) in config.items() if key in param_keys} + data['algorithms'] = {algo: config['algorithms'][algo]} + data['targetPath'] = config['modelName'] + return data + +def update_score_comparer(scorer): + smaller_is_better_scorer = ['neg_mean_squared_error','mse','neg_root_mean_squared_error','rmse','neg_mean_absolute_error','mae'] + if scorer.lower() in smaller_is_better_scorer: + utils.update_variable('smaller_is_better', True) + else: + utils.update_variable('smaller_is_better', False) + +def run_trainer(config): + trainer = learner() + importer = importModule() + function = global_function() + utils.importModules(importer,trainer.getPrefixModules()) + update_score_comparer(config['scoring_criteria']) + model_name = list(config['algorithms'].keys())[0] + if model_name == 'MLP': + utils.importModules(importer,trainer.getMlpCodeModules()) + trainer.addMlpCode() + elif model_name == 'LSTM': + utils.importModules(importer,trainer.getLstmCodeModules()) + trainer.addLstmCode() + trainer.addMainCode() + + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/('ModelTraining'+'_' + model_name) + deploy_path.mkdir(parents=True, exist_ok=True) + generated_files = [] + + # create the utility file + importer.addLocalModule('utility', mod_as='utils') + utility_obj = utility_function('train') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(utils.file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(utils.file_header(usecase)) + generated_files.append(""__init__.py"") + importer.addModule(""warnings"") + code = importer.getCode() + code += 'warnings.filterwarnings(""ignore"")\\n' + code += f""\\nmodel_name = '{model_name}'\\n"" + utils.append_variable('models_name',model_name) + out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','metrics':'metrics.json','metaDataOutput':f'{model_name}_modelMetaData.json','production':'production.json'} + trainer.addOutputFiles(out_files) + code += trainer.getInputOutputFiles() + code += function.getCode() + trainer.addLocalFunctionsCode() + code += trainer.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + with open (deploy_path/""config.json"", ""w"") as f: + json.dump(get_training_params(config, model_name), f, indent=4) + generated_files.append(""config.json"") + + utils.create_docker_file('train', deploy_path,config['modelName'], generated_files) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +import platform +from mlac.timeseries.core import * +from .utility import * + +imported_modules = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'argparse', 'mod_from': None, 'mod_as': None}, + {'module': 'platform', 'mod_from': None, 'mod_as': None } + ] + +def get_load_data_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""selected_features"",""dateTimeFeature"",""dataLocation""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_loader(config): + generated_files = [] + importer = importModule() + loader = tabularDataReader() + importModules(importer, imported_modules) + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'DataIngestion' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('load_data') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + # create the production data reader file + importer.addLocalModule('dataReader', mod_from='data_reader') + readers = ['sqlite','influx'] + if 's3' in config.keys(): + readers.append('s3') + reader_obj = data_reader(readers) + with open(deploy_path/""data_reader.py"", 'w') as f: + f.write(file_header(usecase) + reader_obj.get_code()) + generated_files.append(""data_reader.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = file_header(usecase) + code += importer.getCode() + code += loader.getInputOutputFiles() + loader.addLocalFunctionsCode() + loader.addLoadDataCode() + loader.addMainCode() + code += loader.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + config_file = deploy_path/""config.json"" + config_data = get_load_data_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('load_data', deploy_path,config['modelName'],generated_files) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +from mlac.timeseries.core import * +from .utility import * + +def get_drift_params(config): + param_keys = [""modelVersion"",""problem_type"",""retrainThreshold"",""dataLocation""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_drift_analysis(config): + importer = importModule() + + monitor = drift() + monitor.addLocalFunctionsCode() + monitor.addPrefixCode() + monitor.addMainCode() + importModules(importer, monitor.getMainCodeModules()) + importer.addModule('warnings') + + generated_files = [] + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'ModelMonitoring' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('utility', mod_as='utils') + utility_obj = utility_function('load_data') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file required for creating a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = importer.getCode() + code += '\\nwarnings.filterwarnings(""ignore"")\\n' + code += monitor.getInputOutputFiles() + code += monitor.getCode() + # create serving file + with open(deploy_path/""aionCode.py"", 'w') as f: + f.write(file_header(usecase) + code) + generated_files.append(""aionCode.py"") + + # create requirements file + req_file = deploy_path/""requirements.txt"" + with open(req_file, ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + # create config file + with open (deploy_path/""config.json"", ""w"") as f: + json.dump(get_drift_params(config), f, indent=4) + generated_files.append(""config.json"") + + # create docker file + create_docker_file('input_drift', deploy_path,config['modelName'], generated_files) + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from pathlib import Path +import json +import platform +from mlac.timeseries.core import * +from .utility import * + +output_file_map = { + 'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'} +} + +def get_selector_params(config): + param_keys = [""modelVersion"",""problem_type"",""target_feature"",""train_features"",""cat_features"",""n_components""] + data = {key:value for (key,value) in config.items() if key in param_keys} + data['targetPath'] = config['modelName'] + return data + +def run_selector(config): + select = selector() + importer = importModule() + function = global_function() + importModules(importer,select.getPrefixModules()) + importModules(importer, select.getSuffixModules()) + importModules(importer, select.getMainCodeModules()) + select.addPrefixCode() + select.addSuffixCode() + select.addMainCode() + + generated_files = [] + usecase = config['modelName']+'_'+config['modelVersion'] + deploy_path = Path(config[""deploy_path""])/'MLaC'/'FeatureEngineering' + deploy_path.mkdir(parents=True, exist_ok=True) + + # create the utility file + importer.addLocalModule('*', mod_from='utility') + utility_obj = utility_function('selector') + with open(deploy_path/""utility.py"", 'w') as f: + f.write(file_header(usecase) + utility_obj.get_code()) + generated_files.append(""utility.py"") + + # create empty init file to make a package + with open(deploy_path/""__init__.py"", 'w') as f: + f.write(file_header(usecase)) + generated_files.append(""__init__.py"") + + code = file_header(usecase) + code += importer.getCode() + code += select.getInputOutputFiles() + code += function.getCode() + select.addLocalFunctionsCode() + code += select.getCode() + with open(deploy_path/""aionCode.py"", ""w"") as f: + f.write(code) + generated_files.append(""aionCode.py"") + + with open(deploy_path/""requirements.txt"", ""w"") as f: + req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) + f.write(req) + generated_files.append(""requirements.txt"") + + config_file = deploy_path/""config.json"" + config_data = get_selector_params(config) + with open (config_file, ""w"") as f: + json.dump(config_data, f, indent=4) + generated_files.append(""config.json"") + + create_docker_file('selector', deploy_path,config['modelName'], generated_files) """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from .imports import importModule +from .load_data import tabularDataReader +from .transformer import transformer as profiler +from .selector import selector +from .trainer import learner +from .deploy import deploy +from .functions import global_function + +import pandas as pd +import numpy as np +from appbe.eda import ux_eda +from sklearn.preprocessing import LabelEncoder +import json +import matplotlib.pyplot as plt +import os +import mpld" +"3 +import subprocess +import os +import sys +import re +import json +import pandas as pd + +from appbe.eda import ux_eda +from aif360.datasets import StandardDataset +from aif360.metrics import ClassificationMetric +from aif360.datasets import BinaryLabelDataset + +def get_metrics(request): + dataFile = os.path.join(request.session['deploypath'], ""data"", ""preprocesseddata.csv.gz"") + predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py') + displaypath = os.path.join(request.session['deploypath'], ""etc"", ""display.json"") + f = open(displaypath, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + Target_feature = configSettings['targetFeature'] + + outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + predict_dict = json.loads(outputStr) + + df = pd.read_csv(dataFile) + df_p = pd.DataFrame.from_dict(predict_dict['data']) + + d3_url = request.GET.get('d3_url') + mpld3_url = request.GET.get('mpld3_url') + df_temp = request.GET.get('feature') + global metricvalue + metricvalue = request.GET.get('metricvalue') + + Protected_feature = df_temp + + df_p = df_p.drop(columns=[Target_feature, 'remarks', 'probability']) + df_p.rename(columns={'prediction': Target_feature}, inplace=True) + + + eda_obj = ux_eda(dataFile, optimize=1) + features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() + + features_to_Encode = features + + categorical_names = {} + encoders = {} + + for feature in features_to_Encode: + le = LabelEncoder() + le.fit(df[feature]) + df[feature] = le.transform(df[feature]) + le.fit(df_p[feature]) + df_p[feature] = le.transform(df_p[feature]) + categorical_names[feature] = le.classes_ + encoders[feature] = le + + new_list = [item for item in categorical_names[Protected_feature] if not(pd.isnull(item)) == True] + claas_size = len(new_list) + + if claas_size > 10: + return 'HeavyFeature' + + metrics = fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p) + figure = plot_fair_metrics(metrics) + html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url) + return html_graph + +def fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p): + cols = [metricvalue] + obj_fairness = [[0]] + fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols) + for indx in range(claas_size): + priv_group = categorical_names[Protected_feature][indx] + privileged_class = np.where(categorical_names[Protected_feature] == priv_group)[0] + data_orig = StandardDataset(df, + label_name=Target_feature, + favorable_classes=[1], + protected_attribute_names=[Protected_feature], + privileged_classes=[privileged_class]) + attr = data_orig.protected_attribute_names[0] + idx = data_orig.protected_attribute_names.index(attr) + privileged_groups = [{attr:data_orig.privileged_protected_attributes[idx][0]}] + + unprivileged_size = data_orig.unprivileged_protected_attributes[0].size + unprivileged_groups = [] + for idx2 in range(unprivileged_size): + unprivileged_groups.extend([{attr:data_orig.unprivileged_protected_attributes[idx][idx2]}]) + + bld = BinaryLabelDataset(df=df, label_names=[Target_feature], protected_attribute_names=[Protected_feature]) + bld_p = BinaryLabelDataset(df=df_p, label_names=[Target_feature], protected_attribute_names=[Protected_feature]) + + ClsMet = ClassificationMetric(bld, bld_p,unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) + + if metricvalue == ""Theil Index"": + row = pd.DataFrame([[ClsMet.theil_index()]], + columns = cols , + index = [priv_group]) + elif metricvalue == ""Equal Opportunity Difference"": + row = pd.DataFrame([[ClsMet.equal_opportunity_difference()]], + columns = cols , + index = [priv_group]) + elif metricvalue == ""Disparate Impact"": + row = pd.DataFrame([[ClsMet.disparate_impact()]], + columns = cols , + index = [priv_group]) + elif metricvalue == ""Statistical Parity Difference"": + row = pd.DataFrame([[ClsMet.statistical_parity_difference()]], + columns = cols , + index = [priv_group]) + + #fair_metrics = fair_metrics.append(row) + fair_metrics = pd.concat([fair_metrics,row]) + + return fair_metrics + +def plot_fair_metrics(fair_metrics): + import matplotlib.patches as patches + plt.style.use('default') + + import seaborn as sns + fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1) + + plt.subplots_adjust( + left = 0.125, + bottom = 0.1, + right = 0.9, + top = 0.9, + wspace = .5, + hspace = 1.1 + ) + + y_title_margin = 1.2 + + plt.suptitle(""Fairness metrics"", y = 1.09, fontsize=20) + sns.set(style=""dark"") + + cols = fair_metrics.columns.values + obj = fair_metrics.loc['objective'] + if metricvalue == ""Theil Index"": + size_rect = [0.5] + rect = [-0.1] + bottom = [-0.1] + top = [2] + bound = [[-0.1,0.1]] + + elif metricvalue == ""Equal Opportunity Difference"": + size_rect = [0.2] + rect = [-0.1] + bottom = [-1] + top = [1] + bound = [[-0.1,0.1]] + elif metricvalue == ""Disparate Impact"": + size_rect = [0.4] + rect = [0.8] + bottom = [0] + top = [2] + bound = [[-0.1,0.1]] + elif metricvalue == ""Statistical Parity Difference"": + size_rect = [0.2] + rect = [-0.1] + bottom = [-1] + top = [1] + bound = [[-0.1,0.1]] + + for attr in fair_metrics.index[1:len(fair_metrics)].values: + check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)] + + for i in range(0,1): + plt.subplot(1, 1, i+1) + xx = fair_metrics.index[1:len(fair_metrics)].values.tolist() + yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist() + + palette = sns.color_palette('husl', len(xx)) + ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx) + + index = 0 + for p in zip(ax.get_xticks(), yy): + if (p[1] > 2.0): + _color = palette.as_hex()[index] + _val = 'Outlier(' + str(round(p[1],3)) + ')' + ax.text(p[0]-0.5, 0.02, _val, color=_color) + else: + ax.text(p[0], p[1]+0.05, round(p[1],3), color='k') + index = index + 1 + + plt.ylim(bottom[i], top[i]) + plt.setp(ax.patches, linewidth=0) + ax.get_xaxis().set_visible(False) + + ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1) + ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor=""green"", linewidth=1, linestyle='solid')) + # plt.axhline(obj[i], color='black', alpha=0.3) + + plt.title(cols[i], fontname=""Times New Roman"", size=20,fontweight=""bold"") + ax.set_ylabel('') + ax.set_xlabel('') + + return fig import json +import os + +def get_brier_score(request): + try: + displaypath = os.path.join(request.session['deploypath'], ""etc"", ""output.json"") + with open(displaypath) as file: + config = json.load(file) + problem_type = config[""data""][""ModelType""] + brier_score = config[""data""][""matrix""][""BRIER_SCORE""] + print(problem_type,brier_score) + except Exception as e: + #print(str(e)) + raise ValueError(str(e)) + return problem_type, brier_score + import numpy as np +import joblib +import pandas as pd +from appbe.eda import ux_eda +from sklearn.preprocessing import MinMaxScaler, LabelEncoder + +# from pathlib import Path +import configparser +import json +import matplotlib.pyplot as plt +import numpy as np +import os +def trustedai_uq(request): + try: + displaypath = os.path.join(request.session['deploypath'], ""etc"", ""display.json"") + f = open(displaypath, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + + TargetFeature = configSettings['targetFeature'] + problemType = configSettings['problemType'] + raw_data_loc = configSettings['preprocessedData'] + dataLocation = configSettings['postprocessedData'] + selectedfeatures = request.GET.get('values') + + if problemType.lower() == ""classification"": + model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model'])) + df = pd.read_csv(dataLocation) + trainfea = df.columns.tolist() + + feature = json.loads(selectedfeatures) + # feature = "","".join(featurs) + # features = ['PetalLengthCm','PetalWidthCm'] + targ = TargetFeature + tar =[targ] + from bin.aion_uncertainties import aion_uq + outputStr = aion_uq(model,dataLocation,feature,tar) + return outputStr + + if problemType.lower() == ""regression"": + model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model'])) + df = pd.read_csv(dataLocation) + trainfea = df.columns.tolist() + feature = json.loads(selectedfeatures) + # feature = "","".join(featurs) + # features = ['PetalLengthCm','PetalWidthCm'] + targ = TargetFeature + tar =[targ] + from bin.aion_uncertainties import aion_uq + outputStr = aion_uq(model,dataLocation,feature,tar) + print(outputStr) + return outputStr + except Exception as e: + print('error',e) + return e ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +from pathlib import Path + +def get_metrics(request): + output = {} + output_path = Path(request.session['deploypath'])/""etc""/""output.json"" + if not output_path.exists(): + raise ValueError('output json path does not exist, something unexpected happen') + with open(output_path) as file: + config = json.load(file) + output['problem_type'] = config.get('data',{}).get('ModelType') + output['best_model'] = config.get('data',{}).get('BestModel') + output['hyper_params'] = config.get('data',{}).get('params') + output['best_score'] = str(round(float(config.get('data',{}).get('BestScore')), 2)) + output['scoring_method'] = config.get('data',{}).get('ScoreType') + if output['problem_type'] == 'classification': + output['mcc_score'] = str(round(float(config.get('data',{}).get('matrix',{}).get('MCC_SCORE', 0.0)), 2)) + else: + output['mcc_score'] = 'NA' + return output + import base64 +import io +import json +import os +import urllib + +import joblib +import numpy as np +import pandas as pd +from SALib.analyze import sobol + + +class sensitivityAnalysis(): + def __init__(self, model, problemType, data, target, featureName): + self.model = model + self.probemType = problemType + self.data = data + self.target = target + " +"self.featureName = featureName + self.paramvales = [] + self.X = [] + self.Y = [] + self.problem = {} + + def preprocess(self): + + self.X = self.data[self.featureName].values + self.Y = self.data[self.target].values + + + bounds = [[np.min(self.X[:, i]), np.max(self.X[:, i])] for i in range(self.X.shape[1])] + + self.problem = { + 'num_vars': self.X.shape[1], + 'names': self.featureName, + 'bounds': bounds + } + + def generate_samples(self,size): + from SALib.sample import sobol + self.param_values = sobol.sample(self.problem, size) + + def calSiClass(self, satype,isML,isDL): + try: + + D = self.problem['num_vars'] + S = np.zeros(self.X.shape[1]) + + for class_label in np.unique(self.Y): + if isML: + y_pred_poba = self.model.predict_proba(self.param_values)[:, class_label] + if isDL: + y_pred_poba = self.model.predict(self.param_values)[:,class_label] + + + if not y_pred_poba.size % (2 * D + 2) == 0: + lim = y_pred_poba.size - y_pred_poba.size % (2 * D + 2) + y_pred_poba = y_pred_poba[:lim] + Si = sobol.analyze(self.problem, y_pred_poba) + if satype.lower() == 'first': + S += Si['S1'] + else: + S += Si['ST'] + S /= len(np.unique(self.Y)) + return S + except Exception as e: + print('Error in calculating Si for Classification: ', str(e)) + raise ValueError(str(e)) + + + def calSiReg(self, satype,isML,isDL): + try: + + D = self.problem['num_vars'] + + Y = np.array([self.model.predict(X_sample.reshape(1, -1)) for X_sample in self.param_values]) + + Y = Y.reshape(-1) + + if not Y.size % (2 * D + 2) == 0: + lim = Y.size - Y.size % (2 * D + 2) + Y = Y[:lim] + Si = sobol.analyze(self.problem, Y) + if satype.lower() == 'first': + S = Si['S1'] + else: + S = Si['ST'] + return S + except Exception as e: + print('Error in calculating Si for Regression: ', str(e)) + raise ValueError(str(e)) + + + def plotSi(self, S, saType): + try: + import matplotlib.pyplot as plt + if saType.lower() == 'first': + title, label = 'Sensitivity Analysis', 'First order' + else: + title, label = 'Sensitivity Analysis', 'Total order' + x = np.arange(len(self.problem['names'])) + width = 0.35 + fig, ax = plt.subplots() + ax.bar(x - width / 2, S, width, label=label) + ax.set_xticks(x) + ax.set_xlabel('Features') + ax.set_ylabel('Sensitivity Indices') + ax.set_title(title) + ax.set_xticklabels(self.problem['names'], rotation=45, ha=""right"") + ax.legend() + + plt.tight_layout() + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + SAimage = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as e: + print(e) + SAimage = '' + return SAimage + +def checkModelType(modelName): + isML= False + isDL = False + + if modelName in [""Neural Network"", ""Convolutional Neural Network (1D)"", ""Recurrent Neural Network"",""Recurrent Neural Network (GRU)"", + ""Recurrent Neural Network (LSTM)"", ""Neural Architecture Search"", ""Deep Q Network"", ""Dueling Deep Q Network""]: + isDL = True + elif modelName in [""Linear Regression"",""Lasso"",""Ridge"",""Logistic Regression"", ""Naive Bayes"", ""Decision Tree"", ""Random Forest"", ""Support Vector Machine"", ""K Nearest Neighbors"", ""Gradient Boosting"", + ""Extreme Gradient Boosting (XGBoost)"", ""Light Gradient Boosting (LightGBM)"", ""Categorical Boosting (CatBoost)"",""Bagging (Ensemble)""]: + isML = True + + return isML,isDL + + + +def startSA(request): + + try: + displaypath = os.path.join(request.session['deploypath'], ""etc"", ""display.json"") + if not os.path.exists(displaypath): + raise Exception('Config file not found.') + with open(displaypath) as file: + config = json.load(file) + + probelmType = config['problemType'] + + if probelmType.lower() not in ['classification','regression']: + raise Exception(f""Probolem Type: {probelmType} not supported"") + + isML,isDL = checkModelType(config['modelname']) + sample_size = 1024 + if isML: + model = joblib.load(os.path.join(request.session['deploypath'], 'model', config['saved_model'])) + sample_size = 2048 + if isDL: + from tensorflow.keras.models import load_model + model = load_model(os.path.join(request.session['deploypath'], 'model', config['saved_model'])) + sample_size = 512 + + target = config['targetFeature'] + featureName = config['modelFeatures'] + dataPath = os.path.join(request.session['deploypath'], 'data', 'postprocesseddata.csv.gz') + if not os.path.exists(dataPath): + raise Exception('Data file not found.') + + from utils.file_ops import read_df_compressed + read_status,dataFrame = read_df_compressed(dataPath) + + obj = sensitivityAnalysis(model, probelmType, dataFrame, target, featureName) + obj.preprocess() + obj.generate_samples(sample_size) + submitType = str(request.GET.get('satype')) + saType = 'first' if submitType == 'first' else 'total' + if probelmType.lower() == 'classification': + SA_values = obj.calSiClass(saType,isML,isDL) + else: + SA_values = obj.calSiReg(saType,isML,isDL) + if SA_values.size and saType: + + graph = obj.plotSi(SA_values, saType) + if graph: + outputJson = {'Status': ""Success"", ""graph"": graph} + else: + outputJson = {'Status': ""Error"", ""graph"": '','reason':'Error in Plotting Graph'} + else: + outputJson = {'Status': ""Error"", ""graph"": '','reason':'Error in calculating Si values'} + output_json = json.dumps(outputJson) + return output_json + except Exception as e: + print(str(e)) + raise ValueError(str(e)) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +import numpy as np +import pandas as pd +import sklearn.metrics as metrics +from collections import defaultdict +from sklearn.metrics import confusion_matrix +import re +import shutil +import scipy.stats as st +import json +import os,sys +import glob +import logging +from utils.file_ops import read_df_compressed +class Visualization(): + def __init__(self,usecasename,version,dataframe,visualizationJson,dateTimeColumn,deployPath,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,modelFeatures,targetFeature,modeltype,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,vectorizerFeatures,textFeatures,numericalFeatures,nonNumericFeatures,emptyFeatures,nrows,ncols,saved_model,scoreParam,learner_type,modelname,featureReduction,reduction_data_file): + self.dataframe = dataframe + self.displayjson = {} + self.visualizationJson = visualizationJson + self.dateTimeColumn = dateTimeColumn + self.deployPath = deployPath + #shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'aion_portal.py'),self.deployPath) + if learner_type == 'ML' and modelname != 'Neural Architecture Search': + if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))): + os.remove(os.path.join(self.deployPath,'explainable_ai.py')) + shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainable_ai.py'),self.deployPath) + # os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + try: + os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + except FileExistsError: + os.remove(os.path.join(self.deployPath,'aion_xai.py')) + os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + elif learner_type == 'DL' or modelname == 'Neural Architecture Search': + if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))): + os.remove(os.path.join(self.deployPath,'explainable_ai.py')) + shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainabledl_ai.py'),self.deployPath) + # os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + try: + os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + except FileExistsError: + os.remove(os.path.join(self.deployPath,'aion_xai.py')) + os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py')) + self.jsondeployPath = deployPath + #self.deployPath = self.deployPath+'visualization/' + self.dataFolderLocation = dataFolderLocation + self.vectorizerFeatures = vectorizerFeatures + self.textFeatures = textFeatures + self.emptyFeatures = emptyFeatures + ''' + try: + os.makedirs(self.deployPath) + except OSError as e: + print(""\\nFolder Already Exists"") + ''' + self.numericContinuousFeatures = numericContinuousFeatures + self.discreteFeatures = discreteFeatures + self.categoricalFeatures = categoricalFeatures + self.modelFeatures = modelFeatures + self.modeltype = modeltype + self.targetFeature = targetFeature + self.displayjson['usecasename'] = str(usecasename) + self.displayjson['version'] = str(version) + self.displayjson['problemType'] = str(self.modeltype) + self.displayjson['targetFeature'] = self.targetFeature + self.displayjson['numericalFeatures'] = numericalFeatures + self.displayjson['nonNumericFeatures'] = nonNumericFeatures + self.displayjson['modelFeatures'] = self.modelFeatures + self.displayjson['textFeatures'] = self.textFeatures + self.displayjson['emptyFeatures'] = self.emptyFeatures + self.displayjson['modelname']= str(modelname) + self.displayjson['preprocessedData'] = str(original_data_file) + self.displayjson['nrows'] = str(nrows) + self.displayjson['ncols'] = str(ncols) + self.displayjson['saved_model'] = str(saved_model) + self.displayjson['scoreParam'] = str(scoreParam) + self.displayjson['labelMaps'] = eval(str(labelMaps)) + self.original_data_file = original_data_file + self.displayjson['featureReduction'] = featureReduction + if featureReduction == 'True': + self.displayjson['reduction_data_file'] = reduction_data_file + else: + self.displayjson['reduction_data_file'] = '' + self.pred_filename = predicted_data_file + self.profiled_data_file = profiled_data_file + self.displayjson['predictedData'] = predicted_data_file + self.displayjson['postprocessedData'] = profiled_data_file + #self.trained_data_file = trained_data_file + #self.displayjson['trainingData'] = trained_data_file + #self.displayjson['categorialFeatures']=categoricalFeatures + #self.displayjson['discreteFeatures']=discreteFeatures + #self.displayjson['continuousFeatures']=numericContinuousFeatures + #y = json.dumps(self.displayjson) + #print(y) + self.labelMaps = labelMaps + self.log = logging.getLogger('eion') + + def visualizationrecommandsystem(self): + try: + import tensorflow.keras.utils as kutils + datasetid = self.visualizationJson['datasetid'] + self." +"log.info('\\n================== Data Profiling Details==================') + datacolumns=list(self.dataframe.columns) + self.log.info('================== Data Profiling Details End ==================\\n') + self.log.info('================== Features Correlation Details ==================\\n') + self.log.info('\\n================== Model Performance Analysis ==================') + if os.path.exists(self.pred_filename): + try: + status,df=read_df_compressed(self.pred_filename) + if self.modeltype == 'Classification' or self.modeltype == 'ImageClassification' or self.modeltype == 'anomaly_detection': + y_actual = df['actual'].values + y_predict = df['predict'].values + y_actual = kutils.to_categorical(y_actual) + y_predict = kutils.to_categorical(y_predict) + classes = df.actual.unique() + n_classes = y_actual.shape[1] + self.log.info('-------> ROC AUC CURVE') + roc_curve_dict = [] + for i in classes: + try: + classname = i + if str(self.labelMaps) != '{}': + inv_map = {v: k for k, v in self.labelMaps.items()} + classname = inv_map[i] + fpr, tpr, threshold = metrics.roc_curve(y_actual[:,i],y_predict[:,i]) + roc_auc = metrics.auc(fpr, tpr) + class_roc_auc_curve = {} + class_roc_auc_curve['class'] = str(classname) + fprstring = ','.join(str(v) for v in fpr) + tprstring = ','.join(str(v) for v in tpr) + class_roc_auc_curve['FP'] = str(fprstring) + class_roc_auc_curve['TP'] = str(tprstring) + roc_curve_dict.append(class_roc_auc_curve) + self.log.info('----------> Class: '+str(classname)) + self.log.info('------------> ROC_AUC: '+str(roc_auc)) + self.log.info('------------> False Positive Rate (x Points): '+str(fpr)) + self.log.info('------------> True Positive Rate (y Points): '+str(tpr)) + except: + pass + self.displayjson['ROC_AUC_CURVE'] = roc_curve_dict + self.log.info('-------> Precision Recall CURVE') + precision_recall_curve_dict = [] + for i in range(n_classes): + try: + lr_precision, lr_recall, threshold = metrics.precision_recall_curve(y_actual[:,i],y_predict[:,i]) + classname = i + if str(self.labelMaps) != '{}': + inv_map = {v: k for k, v in self.labelMaps.items()} + classname = inv_map[i] + roc_auc = metrics.auc(lr_recall,lr_precision) + class_precision_recall_curve = {} + class_precision_recall_curve['class'] = str(classname) + Precisionstring = ','.join(str(round(v,2)) for v in lr_precision) + Recallstring = ','.join(str(round(v,2)) for v in lr_recall) + class_precision_recall_curve['Precision'] = str(Precisionstring) + class_precision_recall_curve['Recall'] = str(Recallstring) + precision_recall_curve_dict.append(class_precision_recall_curve) + except: + pass + + self.log.info('----------> Class: '+str(classname)) + self.log.info('------------> ROC_AUC: '+str(roc_auc)) + self.log.info('------------> Recall (x Points): '+str(lr_precision)) + self.log.info('------------> Precision (y Points): '+str(lr_recall)) + self.displayjson['PRECISION_RECALL_CURVE'] = precision_recall_curve_dict + status,predictdataFrame=read_df_compressed(self.displayjson['predictedData']) + except Exception as e: + self.log.info('================== Error in Calculation ROC_AUC/Recall Precision Curve '+str(e)) + self.log.info('================== Model Performance Analysis End ==================\\n') + self.log.info('\\n================== For Descriptive Analysis of Model Features ==================') + + + outputfile = os.path.join(self.jsondeployPath,'etc','display.json') + with open(outputfile, 'w') as fp: + json.dump(self.displayjson, fp) + self.log.info('================== For Descriptive Analysis of Model Features End ==================\\n') + except Exception as inst: + self.log.info('Visualization Failed !....'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def drawlinechart(self,xcolumn,ycolumn,deploy_path,datasetid): + title = 'aion_visualization_'+xcolumn+""_""+ycolumn+""_linechart"" + yaxisname = 'Average '+ycolumn + datasetindex = datasetid + visulizationjson = '[{""_id"": ""543234"",""_type"": ""visualization"",""_source"": {""title"": ""'+title+'"",' + visulizationjson = visulizationjson+'""visState"": ""{\\\\""title\\\\"":\\\\""'+title+'\\\\"",' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""line\\\\"",\\\\""params\\\\"":{\\\\""type\\\\"":\\\\""line\\\\"",\\\\""grid\\\\"":{\\\\""categoryLines\\\\"":false,\\\\""style\\\\"":{\\\\""color\\\\"":\\\\""#eee\\\\""}},\\\\""categoryAxes\\\\"":[{\\\\""id\\\\"":\\\\""CategoryAxis-1\\\\"",\\\\""type\\\\"":\\\\""category\\\\"",\\\\""position\\\\"":\\\\""bottom\\\\"",\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""scale\\\\"":{\\\\""type\\\\"":\\\\""linear\\\\""},\\\\""labels\\\\"":{\\\\""show\\\\"":true,\\\\""truncate\\\\"":100},\\\\""title\\\\"":{}}],\\\\""valueAxes\\\\"":[{\\\\""id\\\\"":\\\\""ValueAxis-1\\\\"",\\\\""name\\\\"":\\\\""LeftAxis-1\\\\"",\\\\""type\\\\"":\\\\""value\\\\"",\\\\""position\\\\"":\\\\""left\\\\"",\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""scale\\\\"":{\\\\""type\\\\"":\\\\""linear\\\\"",\\\\""mode\\\\"":\\\\""normal\\\\""},\\\\""labels\\\\"":{\\\\""show\\\\"":true,\\\\""rotate\\\\"":0,\\\\""filter\\\\"":false,\\\\""truncate\\\\"":100},\\\\""title\\\\"":' + visulizationjson = visulizationjson+'{\\\\""text\\\\"":\\\\""'+yaxisname+'\\\\""}}],\\\\""seriesParams\\\\"":[{\\\\""show\\\\"":\\\\""true\\\\"",\\\\""type\\\\"":\\\\""line\\\\"",\\\\""mode\\\\"":\\\\""normal\\\\"",\\\\""data\\\\"":' + visulizationjson = visulizationjson+'{\\\\""label\\\\"":\\\\""'+yaxisname+'\\\\"",\\\\""id\\\\"":\\\\""1\\\\""},\\\\""valueAxis\\\\"":\\\\""ValueAxis-1\\\\"",\\\\""drawLinesBetweenPoints\\\\"":true,\\\\""showCircles\\\\"":true}],\\\\""addTooltip\\\\"":true,\\\\""addLegend\\\\"":true,\\\\""legendPosition\\\\"":\\\\""right\\\\"",\\\\""times\\\\"":[],\\\\""addTimeMarker\\\\"":false},\\\\""aggs\\\\"":[{\\\\""id\\\\"":\\\\""1\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""avg\\\\"",\\\\""schema\\\\"":\\\\""metric\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+str(ycolumn)+'\\\\""}},{\\\\""id\\\\"":\\\\""2\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""terms\\\\"",\\\\""schema\\\\"":\\\\""segment\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+xcolumn+'\\\\"",\\\\""size\\\\"":100,\\\\""order\\\\"":\\\\""desc\\\\"",\\\\""orderBy\\\\"":\\\\""1\\\\"",\\\\""otherBucket\\\\"":false,\\\\""otherBucketLabel\\\\"":\\\\""Other\\\\"",\\\\""missingBucket\\\\"":false,\\\\""missingBucketLabel\\\\"":\\\\""Missing\\\\""}}]}"",""uiStateJSON"": ""{}"", ""description"": """",""version"": 1,""kibanaSavedObjectMeta"": {""searchSourceJSON"": ""{\\\\""index\\\\"":\\\\""'+datasetindex+'\\\\"",\\\\""query\\\\"":{\\\\""query\\\\"":\\\\""\\\\"",\\\\""language\\\\"":\\\\""lucene\\\\""},\\\\""filter\\\\"":[]}""}},""_migrationVersion"": {""visualization"": ""6.7.2""}}]' + filename = deploy_path+title+'.json' + f = open(filename, ""w"") + f.write(str(visulizationjson)) + f.close() + + def drawbarchart(self,xcolumn,ycolumn,deploy_path,datasetid): + title = 'aion_visualization_'+xcolumn+""_""+ycolumn+""_barchart"" + yaxisname = 'Average '+ycolumn + datasetindex = datasetid + visulizationjson = '[{""_id"": ""123456"",""_type"": ""visualization"",""_source"": {""title"":""'+title+'"",' + visulizationjson = visulizationjson+'""visState"": ""{\\\\""title\\\\"":\\\\""'+title+'\\\\"",' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""histogram\\\\"",\\\\""params\\\\"":{\\\\""addLegend\\\\"":true,\\\\""addTimeMarker\\\\"":false,\\\\""addTooltip\\\\"":true,\\\\""categoryAxes\\\\"":[{\\\\""id\\\\"":\\\\""CategoryAxis-1\\\\"",\\\\""labels\\\\"":{\\\\""show\\\\"":true,\\\\""truncate\\\\"":100},\\\\""position\\\\"":\\\\""bottom\\\\"",\\\\""scale\\\\"":{\\\\""type\\\\"":\\\\""linear\\\\""},\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""title\\\\"":{},\\\\""type\\\\"":\\\\""category\\\\""}],\\\\""grid\\\\"":{\\\\""categoryLines\\\\"":false,\\\\""style\\\\"":{\\\\""color\\\\"":\\\\""#eee\\\\""}},\\\\""legendPosition\\\\"":\\\\""right\\\\"",\\\\""seriesParams\\\\"":[{\\\\""data\\\\"":{\\\\""id\\\\"":\\\\""1\\\\"",' + visulizationjson = visulizationjson+'\\\\""label\\\\"":\\\\""'+yaxisname+'\\\\""},' + visulizationjson = visulizationjson+'\\\\""drawLinesBetweenPoints\\\\"":true,\\\\""mode\\\\"":\\\\""stacked\\\\"",\\\\""show\\\\"":\\\\""true\\\\"",\\\\""showCircles\\\\"":true,\\\\""type\\\\"":\\\\""histogram\\\\"",\\\\""valueAxis\\\\"":\\\\""ValueAxis-1\\\\""}],\\\\""times\\\\"":[],\\\\""type\\\\"":\\\\""histogram\\\\"",\\\\""valueAxes\\\\"":[{\\\\""id\\\\"":\\\\""ValueAxis-1\\\\"",\\\\""labels\\\\"":{\\\\""filter\\\\"":false,\\\\""rotate\\\\"":0,\\\\""show\\\\"":true,\\\\""truncate\\\\"":100},\\\\""name\\\\"":\\\\""LeftAxis-1\\\\"",\\\\""position\\\\"":\\\\""left\\\\"",\\\\""scale\\\\"":{\\\\""mode\\\\"":\\\\""normal\\\\"",\\\\""type\\\\"":\\\\""linear\\\\""},\\\\""show\\\\"":true,\\\\""style\\\\"":{},\\\\""title\\\\"":' + visulizationjson = visulizationjson+'{\\\\""text\\\\"":\\\\""'+yaxisname+'\\\\""},' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""value\\\\""}]},\\\\""aggs\\\\"":[{\\\\""id\\\\"":\\\\""1\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""avg\\\\"",\\\\""schema\\\\"":\\\\""metric\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+str(xcolumn)+'\\\\""}},{\\\\""id\\\\"":\\\\""2\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""terms\\\\"",\\\\""schema\\\\"":\\\\""segment\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+ycolumn+'\\\\"",\\\\""size\\\\"":100,\\\\""order\\\\"":\\\\""asc\\\\"",\\\\""orderBy\\\\"":\\\\""1\\\\"",\\\\""otherBucket\\\\"":false,\\\\""otherBucketLabel\\\\"":\\\\""Other\\\\"",\\\\""missingBucket\\\\"":false,\\\\""missingBucketLabel\\\\"":\\\\""Missing\\\\""}}]}"",""uiStateJSON"":""{}"",""description"": """",""version"": 1,""kibanaSavedObjectMeta"": {' + visulizationjson = visulizationjson+'""searchSourceJSON"": ""{\\\\""index\\\\"":\\\\""'+datasetindex+'\\\\"",\\\\""query\\\\"":{\\\\""language\\\\"":\\\\""lucene\\\\"",\\\\""query\\\\"":\\\\""\\\\""},\\\\""filter\\\\"":[]}""}},""_migrationVersion"":{""visualization"": ""6.7.2""}}]' + filename = deploy_path+title+'.json' + f = open(filename, ""w"") + f.write(str(visulizationjson)) + f.close() + + def drawpiechart(self,xcolumn,deploy_path,datasetid): + title = 'aion_visualization_'+xcolumn+""_piechart"" + datasetindex = datasetid + visulizationjson = '[{""_id"": ""123456"",""_type"": ""visualization"",""_source"": {""title"":""'+title+'"",' + visulizationjson = visulizationjson+'""visState"": ""{\\\\""title\\\\"":\\\\""'+title+'\\\\"",' + visulizationjson = visulizationjson+'\\\\""type\\\\"":\\\\""pie\\\\"",\\\\""params\\\\"":{\\\\""type\\\\"":\\\\""pie\\\\"",\\\\""addTooltip\\\\"":true,\\\\""addLegend\\\\"":true,\\\\""legendPosition\\\\"":\\\\""right\\\\"",\\\\""isDonut\\\\"":true,\\\\""labels\\\\"":{\\\\""show\\\\"":false,\\\\""values\\\\"":true,\\\\""last_level\\\\"":true,\\\\""truncate\\\\"":100}},\\\\""ag" +"gs\\\\"":[{\\\\""id\\\\"":\\\\""1\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""count\\\\"",\\\\""schema\\\\"":\\\\""metric\\\\"",\\\\""params\\\\"":{}},{\\\\""id\\\\"":\\\\""2\\\\"",\\\\""enabled\\\\"":true,\\\\""type\\\\"":\\\\""terms\\\\"",\\\\""schema\\\\"":\\\\""segment\\\\"",\\\\""params\\\\"":{\\\\""field\\\\"":\\\\""'+xcolumn+'\\\\"",\\\\""size\\\\"":100,\\\\""order\\\\"":\\\\""asc\\\\"",\\\\""orderBy\\\\"":\\\\""1\\\\"",\\\\""otherBucket\\\\"":false,\\\\""otherBucketLabel\\\\"":\\\\""Other\\\\"",\\\\""missingBucket\\\\"":false,\\\\""missingBucketLabel\\\\"":\\\\""Missing\\\\""}}]}"",' + visulizationjson = visulizationjson+'""uiStateJSON"": ""{}"",""description"": """",""version"": 1,""kibanaSavedObjectMeta"": {""searchSourceJSON"":""{\\\\""index\\\\"":\\\\""'+datasetid+'\\\\"",\\\\""query\\\\"":{\\\\""query\\\\"":\\\\""\\\\"",\\\\""language\\\\"":\\\\""lucene\\\\""},\\\\""filter\\\\"":[]}""}},""_migrationVersion"": {""visualization"": ""6.7.2""}}]' + filename = deploy_path+title+'.json' + f = open(filename, ""w"") + f.write(str(visulizationjson)) + f.close() + + def get_confusion_matrix(self,df): + setOfyTrue = set(df['actual']) + unqClassLst = list(setOfyTrue) + if(str(self.labelMaps) != '{}'): + inv_mapping_dict = {v: k for k, v in self.labelMaps.items()} + unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) + unqClassLst2 = list(unqClassLst2) + else: + unqClassLst2 = unqClassLst + indexName = [] + columnName = [] + for item in unqClassLst2: + indexName.append(""act:""+str(item)) + columnName.append(""pre:""+str(item)) + result = pd.DataFrame(confusion_matrix(df['actual'], df['predict'], labels = unqClassLst),index = indexName, columns = columnName) + resultjson = result.to_json(orient='index') + return(resultjson) + + def DistributionFinder(self,data): + try: + + distributionName ="""" + sse =0.0 + KStestStatic=0.0 + dataType="""" + if(data.dtype == ""float64""): + dataType =""Continuous"" + elif(data.dtype ==""int"" or data.dtype ==""int64""): + dataType=""Discrete"" + + if(dataType == ""Discrete""): + distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson] + index, counts = np.unique(abs(data.astype(int)),return_counts=True) + + if(len(index)>=2): + best_sse = np.inf + y1=[] + total=sum(counts) + mean=float(sum(index*counts))/total + variance=float((sum(index**2*counts) -total*mean**2))/(total-1) + dispersion=mean/float(variance) + theta=1/float(dispersion) + r=mean*(float(theta)/1-theta) + + for j in counts: + y1.append(float(j)/total) + + pmf1=st.bernoulli.pmf(index,mean) + pmf2=st.binom.pmf(index,len(index),p=mean/len(index)) + pmf3=st.geom.pmf(index,1/float(1+mean)) + pmf4=st.nbinom.pmf(index,mean,r) + pmf5=st.poisson.pmf(index,mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1- pmf5, 2.0)) + + sselist=[sse1,sse2,sse3,sse4,sse5] + for i in range(0,len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName =best_distribution + sse=best_sse + + elif(dataType == ""Continuous""): + + distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] + + best_distribution = st.norm.name + best_sse = np.inf + datamin=data.min() + datamax=data.max() + nrange=datamax-datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') + + params = distribution.fit(data.astype(float)) + # Separate parts of parameters + arg = params[:-2] + loc = params[-2] + scale = params[-1] + + # Calculate fitted PDF and error with fit in distribution + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if(best_sse >sse > 0): + best_distribution = distribution.name + best_sse = sse + + distributionName =best_distribution + sse=best_sse + except: + response = str(sys.exc_info()[0]) + message='Job has Failed'+response + print(message) + return distributionName,sse + + + + import autograd +import autograd.numpy as np +import scipy.optimize +from autograd import grad +from autograd.scipy.special import logsumexp +from sklearn.cluster import KMeans + + +class HMM: + """""" + A Hidden Markov Model with Gaussian observations with + unknown means and known precisions. + """""" + def __init__(self, X, config_dict=None): + self.N, self.T, self.D = X.shape + self.K = config_dict['K'] # number of HMM states + self.I = np.eye(self.K) + self.Precision = np.zeros([self.D, self.D, self.K]) + self.X = X + if config_dict['precision'] is None: + for k in np.arange(self.K): + self.Precision[:, :, k] = np.eye(self.D) + else: + self.Precision = config_dict['precision'] + self.dParams_dWeights = None + self.alphaT = None # Store the final beliefs. + self.beta1 = None # store the first timestep beliefs from the beta recursion. + self.forward_trellis = {} # stores \\alpha + self.backward_trellis = {} # stores \\beta + + def initialize_params(self, seed=1234): + np.random.seed(seed) + param_dict = {} + A = np.random.randn(self.K, self.K) + # use k-means to initialize the mean parameters + X = self.X.reshape([-1, self.D]) + kmeans = KMeans(n_clusters=self.K, random_state=seed, + n_init=15).fit(X) + labels = kmeans.labels_ + _, counts = np.unique(labels, return_counts=True) + pi = counts + phi = kmeans.cluster_centers_ + + param_dict['A'] = np.exp(A) + param_dict['pi0'] = pi + param_dict['phi'] = phi + return self.pack_params(param_dict) + + def unpack_params(self, params): + param_dict = dict() + K = self.K + # For unpacking simplex parameters: have packed them as + # log(pi[:-1]) - log(pi[-1]). + unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1), + np.zeros((K, 1)), + axis=1) + ) + Z = np.sum(unnorm_A[:, :-1], axis=1) + unnorm_A /= Z[:, np.newaxis] + norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True) + param_dict['A'] = norm_A + + unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], 0.0)) + Z = np.sum(unnorm_pi[:-1]) + unnorm_pi /= Z + param_dict['pi0'] = unnorm_pi / unnorm_pi.sum() + param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K) + return param_dict + + def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False): + """""" + Computes the weighted marginal probability of the sequence xseq given parameters; + weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B) + :param xseq: T * D + :param pi: K * 1 + :param phi: D * K + :param wseq: T * 1 + :param A: + :return: + """""" + ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma) + alpha = np.log(pi.ravel()) + wseq[0] * ll[0] + if wseq[0] == 0: + self.forward_trellis[0] = alpha[:, np.newaxis] + for t in np.arange(1, self.T): + alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t] + if wseq[t] == 0: + # store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T + self.forward_trellis[t] = alpha[:, np.newaxis] + if store_belief: + # store the final belief + self.alphaT = alpha + return logsumexp(alpha) + + def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False): + """""" + Runs beta recursion; + weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B) + :param xseq: T * D + :param pi: K * 1 + :param phi: D * K + :param wseq: T * 1 + :param A: + :return: + """""" + ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma) + beta = np.zeros_like(pi.ravel()) # log(\\beta) of all ones. + max_t = ll.shape[0] + if wseq[max_t - 1] == 0: + # store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T + self.backward_trellis[max_t - 1] = beta[:, np.newaxis] + for i in np.arange(1, max_t): + t = max_t - i - 1 + beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1) + if wseq[t] == 0: + # store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T + self.backward_trellis[t] = beta[:, np.newaxis] + # account for the init prob + beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel()) + if store_belief: + # store the final belief + self.beta1 = beta + return logsumexp(beta) + + def weighted_loss(self, params, weights): + """""" + For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion + :param params: + :param weights: + :return: + """""" + param_dict = self.unpack_params(params) + logp = self.get_prior_contrib(param_dict) + logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'], + param_dict['phi'], + self.Precision, + param_dict['A'], + weights) + return -logp + + def loss_at_missing_timesteps(self, weights, params): + """""" + :param weights: zeroed out weights indicate missing values + :param params: packed parameters + :return: + """""" + # empty forward and" +"backward trellis + self.clear_trellis() + param_dict = self.unpack_params(params) + # populate forward and backward trellis + lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'], + param_dict['phi'], + self.Precision, + param_dict['A'], + weights, + store_belief=True ) + lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'], + param_dict['phi'], + self.Precision, + param_dict['A'], + weights, + store_belief=True) + assert np.allclose(lpx, lpx_alt) # sanity check + test_ll = [] + # compute loo likelihood + ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision) + # compute posterior p(z_t | x_1,...t-1, t+1,...T) \\forall missing t + tsteps = [] + for t in self.forward_trellis.keys(): + lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx + test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel())) + tsteps.append(t) + # empty forward and backward trellis + self.clear_trellis() + return -np.array(test_ll) + + def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None): + if maxiter: + options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter} + else: + options_dict = {'disp': verbose, 'gtol': 1e-10} + + # Define a function that returns gradients of training loss using Autograd. + training_loss_fun = lambda params: self.weighted_loss(params, weights) + training_gradient_fun = grad(training_loss_fun, 0) + if init_params is None: + init_params = self.initialize_params() + if verbose: + print(""Initial loss: "", training_loss_fun(init_params)) + res = scipy.optimize.minimize(fun=training_loss_fun, + jac=training_gradient_fun, + x0=init_params, + tol=1e-10, + options=options_dict) + if verbose: + print('grad norm =', np.linalg.norm(res.jac)) + return res.x + + def clear_trellis(self): + self.forward_trellis = {} + self.backward_trellis = {} + + #### Required for IJ computation ### + def compute_hessian(self, params_one, weights_one): + return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one) + + def compute_jacobian(self, params_one, weights_one): + return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\\ + (params_one, weights_one).squeeze() + ################################################### + + @staticmethod + def log_obs_lik(x, phi, Sigma): + """""" + :param x: T*D*1 + :param phi: 1*D*K + :param Sigma: D*D*K --- precision matrices per state + :return: ll + """""" + centered_x = x - phi + ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma ) + return ll + + @staticmethod + def pack_params(params_dict): + param_list = [(np.log(params_dict['A'][:, :-1]) - + np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(), + np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]), + params_dict['phi'].ravel()] + return np.concatenate(param_list) + + @staticmethod + def get_prior_contrib(param_dict): + logp = 0.0 + # Prior + logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum() + logp += (1.1 - 1) * np.log(param_dict['A']).sum() + logp += (1.1 - 1) * np.log(param_dict['pi0']).sum() + return logp + + @staticmethod + def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False): + """""" + :param T: length of the sequence + :param pct_to_drop: % of T in the held out fold + :param contiguous: if True generate a block of indices to drop else generate indices by iid sampling + :return: o (the set of indices in the fold) + """""" + if contiguous: + l = np.floor(pct_to_drop / 100. * T) + anchor = np.random.choice(np.arange(l + 1, T)) + o = np.arange(anchor - l, anchor).astype(int) + else: + # i.i.d LWCV + o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1 + return o + + @staticmethod + def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0, + diagonal_upweight=False): + """""" + :param K: Number of HMM states + :param T: length of the sequence + """""" + N = 1 # For structured IJ we will remove data / time steps from a single sequence + np.random.seed(seed) + if sigma0 is None: + sigma0 = np.eye(D) + + A = np.random.dirichlet(alpha=np.ones(K), size=K) + if diagonal_upweight: + A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions + A = A / A.sum(axis=1) + + pi0 = np.random.dirichlet(alpha=np.ones(K)) + mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean)) + zs = np.empty((N, T), dtype=np.int) + X = np.empty((N, T, D)) + + for n in range(N): + zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0)) + X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0) + for t in range(1, T): + zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :])) + X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0) + + return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus} + from builtins import range + +import autograd.numpy as np + + +def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False): + """"""Adapted from autograd.misc.optimizers"""""" + m = np.zeros(len(x)) + v = np.zeros(len(x)) + for i in range(num_iters): + g = grad(x, i) + if callback: callback(x, i, g, polyak) + m = (1 - b1) * g + b1 * m # First moment estimate. + v = (1 - b2) * (g**2) + b2 * v # Second moment estimate. + mhat = m / (1 - b1**(i + 1)) # Bias correction. + vhat = v / (1 - b2**(i + 1)) + x = x - step_size*mhat/(np.sqrt(vhat) + eps) + return x import matplotlib.pyplot as plt +import numpy as np +import numpy.random as npr +import torch as torch + + +def make_data_gap(seed, data_count=100): + import GPy + npr.seed(0) + x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))]) + x = x[:, np.newaxis] + k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.) + K = k.K(x) + L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count)) + + # draw a noise free random function from a GP + eps = np.random.randn(data_count) + f = L @ eps + + + # use a homoskedastic Gaussian noise model N(f(x)_i, \\sigma^2). \\sigma^2 = 0.1 + eps_noise = np.sqrt(0.1) * np.random.randn(data_count) + y = f + eps_noise + y = y[:, np.newaxis] + + plt.plot(x, f, 'ko', ms=2) + plt.plot(x, y, 'ro') + plt.title(""GP generated Data"") + plt.pause(1) + return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y) + + +def make_data_sine(seed, data_count=450): + # fix the random seed + np.random.seed(seed) + noise_var = 0.1 + + X = np.linspace(-4, 4, data_count) + y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count) + + train_count = int (0.2 * data_count) + idx = npr.permutation(range(data_count)) + X_train = X[idx[:train_count], np.newaxis ] + X_test = X[ idx[train_count:], np.newaxis ] + y_train = y[ idx[:train_count] ] + y_test = y[ idx[train_count:] ] + + mu = np.mean(X_train, 0) + std = np.std(X_train, 0) + X_train = (X_train - mu) / std + X_test = (X_test - mu) / std + mu = np.mean(y_train, 0) + std = np.std(y_train, 0) + # mu = 0 + # std = 1 + y_train = (y_train - mu) / std + y_test = (y_test -mu) / std + train_stats = dict() + train_stats['mu'] = torch.FloatTensor([mu]) + train_stats['sigma'] = torch.FloatTensor([std]) + return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\\ + train_stats import autograd +import autograd.numpy as np +import numpy.random as npr +import scipy.optimize + +sigmoid = lambda x: 0.5 * (np.tanh(x / 2.) + 1) +get_num_train = lambda inputs: inputs.shape[0] +logistic_predictions = lambda params, inputs: sigmoid(np.dot(inputs, params)) + + +class LogisticRegression: + def __init__(self): + self.params = None + + def set_parameters(self, params): + self.params = params + + def predict(self, X): + if self.params is not None: + # Outputs probability of a label being true according to logistic model + return np.atleast_2d(sigmoid(np.dot(X, self.params))).T + else: + raise RuntimeError(""Params need to be fit before predictions can be made."") + + def loss(self, params, weights, inputs, targets): + # Training loss is the negative log-likelihood of the training labels. + preds = logistic_predictions(params, inputs) + label_probabilities = preds * targets + (1 - preds) * (1 - targets) + return -np.sum(weights * np.log(label_probabilities + 1e-16)) + + def fit(self, weights, init_params, inputs, targets, verbose=True): + training_loss_fun = lambda params: self.loss(params, weights, inputs, targets) + # Define a function that returns gradients of training loss using Autograd. + training_gradient_fun = autograd.grad(training_loss_fun, 0) + # optimize params + if verbose: + print(""Initial loss:"", self.loss(init_params, weights, inputs, targets)) + # opt_params = sgd(training_gradient_fun, params, hyper=1, num_iters=5000, step_size=0.1) + res = scipy.optimize.minimize(fun=training_loss_fun, + jac=training_gradient_fun, + x0=init_params, + tol=1e-10, + options={'disp': verbose}) + opt_params = res.x + if verbose: + print(""Trained loss:"", self.loss(opt_params, weights, inputs, targets)) + self.params = opt_params + return opt_params + + def get_test_acc(self, params, test_targets, test_inputs): + preds = np.round(self.predict(test_inputs).T).astype(np.int) + err = np.abs(test_targets - preds).sum() + return 1 - err/ test_targets.shape[1] + + #### Required for IJ computation ### + def compute_hessian(self, params_one, weights_one, inputs, targets): + return autograd.hessian(self.loss, argnum=0)(params_one, weights_one, inputs, targets) + + def compute_jacobian(self, params_one, weights_one, inputs, targets): + return autograd.jacobian(autograd.jacobian(self.loss, argnum=0), argnum=1)\\ + (params_one, weights_one, inputs, targets).squeeze() + ################################################### + + @staticmethod + def synthetic_lr_data(N=10000, D=10): + x = 1. * npr.randn(N, D) + x_test = 1. * npr.randn(int(0.3 * N" +"), D) + w = npr.randn(D, 1) + y = sigmoid((x @ w)).ravel() + y = npr.binomial(n=1, p=y) # corrupt labels + y_test = sigmoid(x_test @ w).ravel() + # y_test = np.round(y_test) + y_test = npr.binomial(n=1, p=y_test) + return x, np.atleast_2d(y), x_test, np.atleast_2d(y_test) + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + +from copy import deepcopy + +import numpy as np +import numpy.random as npr + + +def make_batches(n_data, batch_size): + return [slice(i, min(i+batch_size, n_data)) for i in range(0, n_data, batch_size)] + + +def generate_regression_data(seed, data_count=500): + """""" + Generate data from a noisy sine wave. + :param seed: random number seed + :param data_count: number of data points. + :return: + """""" + np.random.seed(seed) + noise_var = 0.1 + + x = np.linspace(-4, 4, data_count) + y = 1*np.sin(x) + np.sqrt(noise_var)*npr.randn(data_count) + + train_count = int (0.2 * data_count) + idx = npr.permutation(range(data_count)) + x_train = x[idx[:train_count], np.newaxis ] + x_test = x[ idx[train_count:], np.newaxis ] + y_train = y[ idx[:train_count] ] + y_test = y[ idx[train_count:] ] + + mu = np.mean(x_train, 0) + std = np.std(x_train, 0) + x_train = (x_train - mu) / std + x_test = (x_test - mu) / std + mu = np.mean(y_train, 0) + std = np.std(y_train, 0) + y_train = (y_train - mu) / std + train_stats = dict() + train_stats['mu'] = mu + train_stats['sigma'] = std + + return x_train, y_train, x_test, y_test, train_stats + + +def form_D_for_auucc(yhat, zhatl, zhatu): + # a handy routine to format data as needed by the UCC fit() method + D = np.zeros([yhat.shape[0], 3]) + D[:, 0] = yhat.squeeze() + D[:, 1] = zhatl.squeeze() + D[:, 2] = zhatu.squeeze() + return D + + +def fitted_ucc_w_nullref(y_true, y_pred_mean, y_pred_lower, y_pred_upper): + """""" + Instantiates an UCC object for the target predictor plus a 'null' (constant band) reference + :param y_pred_lower: + :param y_pred_mean: + :param y_pred_upper: + :param y_true: + :return: ucc object fitted for two systems: target + null reference + """""" + # form matrix for ucc: + X_for_ucc = form_D_for_auucc(y_pred_mean.squeeze(), + y_pred_mean.squeeze() - y_pred_lower.squeeze(), + y_pred_upper.squeeze() - y_pred_mean.squeeze()) + # form matrix for a 'null' system (constant band) + X_null = deepcopy(X_for_ucc) + X_null[:,1:] = np.std(y_pred_mean) # can be set to any other constant (no effect on AUUCC) + # create an instance of ucc and fit data + from uq360.metrics.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc + u = ucc() + u.fit([X_for_ucc, X_null], y_true.squeeze()) + return u + + +def make_sklearn_compatible_scorer(task_type, metric, greater_is_better=True, **kwargs): + """""" + + Args: + task_type: (str) regression or classification. + metric: (str): choice of metric can be one of these - [aurrrc, ece, auroc, nll, brier, accuracy] for + classification and [""rmse"", ""nll"", ""auucc_gain"", ""picp"", ""mpiw"", ""r2""] for regression. + greater_is_better: is False the scores are negated before returning. + **kwargs: additional arguments specific to some metrics. + + Returns: + sklearn compatible scorer function. + + """""" + + from uq360.metrics.classification_metrics import compute_classification_metrics + from uq360.metrics.regression_metrics import compute_regression_metrics + + def sklearn_compatible_score(model, X, y_true): + """""" + + Args: + model: The model being scored. Currently uq360 and sklearn models are supported. + X: Input features. + y_true: ground truth values for the target. + + Returns: + Computed score of the model. + + """""" + + from uq360.algorithms.builtinuq import BuiltinUQ + from uq360.algorithms.posthocuq import PostHocUQ + if isinstance(model, BuiltinUQ) or isinstance(model, PostHocUQ): + # uq360 models + if task_type == ""classification"": + score = compute_classification_metrics( + y_true=y_true, + y_prob=model.predict(X).y_prob, + option=metric, + **kwargs + )[metric] + elif task_type == ""regression"": + y_mean, y_lower, y_upper = model.predict(X) + score = compute_regression_metrics( + y_true=y_true, + y_mean=y_mean, + y_lower=y_lower, + y_upper=y_upper, + option=metric, + **kwargs + )[metric] + else: + raise NotImplementedError + + else: + # sklearn models + if task_type == ""classification"": + score = compute_classification_metrics( + y_true=y_true, + y_prob=model.predict_proba(X), + option=metric, + **kwargs + )[metric] + else: + if metric in [""rmse"", ""r2""]: + score = compute_regression_metrics( + y_true=y_true, + y_mean=model.predict(X), + y_lower=None, + y_upper=None, + option=metric, + **kwargs + )[metric] + else: + raise NotImplementedError(""{} is not supported for sklearn regression models"".format(metric)) + + if not greater_is_better: + score = -score + return score + return sklearn_compatible_score + + +class DummySklearnEstimator(ABC): + def __init__(self, num_classes, base_model_prediction_fn): + self.base_model_prediction_fn = base_model_prediction_fn + self.classes_ = [i for i in range(num_classes)] + + def fit(self): + pass + + def predict_proba(self, X): + return self.base_model_prediction_fn(X) + # Adapted from https://github.com/Trusted-AI/AIX360/blob/master/aix360/datasets/meps_dataset.py +# Utilization target is kept as a continuous target. +import os + +import pandas as pd + + +def default_preprocessing(df): + """""" + 1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White + and 'non-White' otherwise + 2. Restrict to Panel 19 + 3. RENAME all columns that are PANEL/ROUND SPECIFIC + 4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1 + 5. Compute UTILIZATION. + """""" + def race(row): + if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE + return 'White' + return 'Non-White' + + df['RACEV2X'] = df.apply(lambda row: race(row), axis=1) + df = df.rename(columns = {'RACEV2X' : 'RACE'}) + + df = df[df['PANEL'] == 19] + + # RENAME COLUMNS + df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH', + 'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT', + 'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM', + 'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE', + 'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'}) + + df = df[df['REGION'] >= 0] # remove values -1 + df = df[df['AGE'] >= 0] # remove values -1 + + df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9 + + df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9 + + df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG', + 'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX', + 'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM', + 'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42', + 'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1 + + def utilization(row): + return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15'] + + df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1) + + df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION'}) + + df = df[['REGION','AGE','SEX','RACE','MARRY', + 'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX', + 'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX', + 'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM', + 'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42', + 'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION','PERWT15F']] + + return df + + +class MEPSDataset(): + """""" + The Medical Expenditure Panel Survey (MEPS) [#]_ data consists of large scale surveys of families and individuals, + medical providers, and employers, and collects data on health services used, costs & frequency of services, + demographics, health status and conditions, etc., of the respondents. + This specific dataset contains MEPS survey data for calendar year 2015 obtained in rounds 3, 4, and 5 of Panel 19, + and rounds 1, 2, and 3 of Panel 20. + See :file:`uq360/datasets/data/meps_data/README.md` for more details on the dataset and instructions on downloading/processing the data. + References: + .. [#] `Medical Expenditure Panel Survey data `_ + """""" + + def __init__(self, custom_preprocessing=default_preprocessing, dirpath=None): + self._dirpath = dirpath + if not self._dirpath: + self._dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'meps_data') + + self._filepath = os.path.join(self._dirpath, 'h181.csv') + try: + df = pd.read_csv(self._filepath, sep=',', na_values=[]) + except IOError as err: + print(""IOError: {}"".format(err)) + print(""To use this class, please place the heloc_dataset.csv:"") + print(""file, as-is, in the folder:"") + print(""\\n\\t{}\\n"".format(os.path.abspath(os.path.join( + os.path.abspath(__file__), 'data', 'meps_data')))) + import sys + sys.exit(1) + + if custom_preprocessing: + self._data = custom_preprocessing(df) + + def data(self): + return self._data from .meps_dataset import MEPSDataset + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + + +class BuiltinUQ(ABC): + """""" BuiltinUQ is the base class for any algorithm that has UQ built into it. + """""" + + def __init__(self, *argv, **kwargs): + """""" Initialize a BuiltinUQ object. + """""" + + @abc.abstractmethod + def fit(self, *argv, **kwargs): + """""" Learn the UQ related parameters.. + """""" + raise NotImplementedError + + @abc.abstractmethod + def predict(self, *argv, **kwargs): +" +" """""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric + uncertainty in the predictions. + """""" + raise NotImplementedError + + def set_params(self, **parameters): + for parameter, value in parameters.items(): + setattr(self, parameter, value) + return self + + + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + + +class PostHocUQ(ABC): + """""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model. + """""" + + def __init__(self, *argv, **kwargs): + """""" Initialize a BuiltinUQ object. + """""" + + @abc.abstractmethod + def _process_pretrained_model(self, *argv, **kwargs): + """""" Method to process the pretrained model that requires UQ. + """""" + raise NotImplementedError + + @abc.abstractmethod + def predict(self, *argv, **kwargs): + """""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric + uncertainty in the predictions. + """""" + raise NotImplementedError + + def set_params(self, **parameters): + for parameter, value in parameters.items(): + setattr(self, parameter, value) + return self + + def get_params(self): + """""" + This method should not take any arguments and returns a dict of the __init__ parameters. + + """""" + raise NotImplementedError + from collections import namedtuple + +import numpy as np +import torch +from scipy.stats import norm +from torch.utils.data import DataLoader +from torch.utils.data import TensorDataset +from uq360.algorithms.builtinuq import BuiltinUQ +from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet + +np.random.seed(42) +torch.manual_seed(42) + +class HeteroscedasticRegression(BuiltinUQ): + """""" Wrapper for heteroscedastic regression. We learn to predict targets given features, + assuming that the targets are noisy and that the amount of noise varies between data points. + https://en.wikipedia.org/wiki/Heteroscedasticity + """""" + + def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True): + """""" + Args: + model_type: The base model architecture. Currently supported values are [mlp]. + mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the + mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n)) + model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance. + config: dictionary containing the config parameters for the model. + device: device used for pytorch models ignored otherwise. + verbose: if True, print statements with the progress are enabled. + """""" + + super(HeteroscedasticRegression).__init__() + self.config = config + self.device = device + self.verbose = verbose + if model_type == ""mlp"": + self.model_type = model_type + self.model = _MLPNet( + num_features=self.config[""num_features""], + num_outputs=self.config[""num_outputs""], + num_hidden=self.config[""num_hidden""], + ) + + elif model_type == ""custom"": + self.model_type = model_type + self.model = model + + else: + raise NotImplementedError + + def get_params(self, deep=True): + return {""model_type"": self.model_type, ""config"": self.config, ""model"": self.model, + ""device"": self.device, ""verbose"": self.verbose} + + def _loss(self, y_true, y_pred_mu, y_pred_log_var): + return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 + + 0.5 * y_pred_log_var) + + def fit(self, X, y): + """""" Fit the Heteroscedastic Regression model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + X = torch.from_numpy(X).float().to(self.device) + y = torch.from_numpy(y).float().to(self.device) + + dataset_loader = DataLoader( + TensorDataset(X,y), + batch_size=self.config[""batch_size""] + ) + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config[""lr""]) + + for epoch in range(self.config[""num_epochs""]): + avg_loss = 0.0 + for batch_x, batch_y in dataset_loader: + self.model.train() + batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x) + loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + avg_loss += loss.item()/len(dataset_loader) + + if self.verbose: + print(""Epoch: {}, loss = {}"".format(epoch, avg_loss)) + + return self + + def predict(self, X, return_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + + self.model.eval() + + X = torch.from_numpy(X).float().to(self.device) + dataset_loader = DataLoader( + X, + batch_size=self.config[""batch_size""] + ) + + y_mean_list = [] + y_log_var_list = [] + for batch_x in dataset_loader: + batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x) + y_mean_list.append(batch_y_pred_mu.data.cpu().numpy()) + y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy()) + + y_mean = np.concatenate(y_mean_list) + y_log_var = np.concatenate(y_log_var_list) + y_std = np.sqrt(np.exp(y_log_var)) + y_lower = y_mean - 2.0*y_std + y_upper = y_mean + 2.0*y_std + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + return res + from .heteroscedastic_regression import HeteroscedasticRegression from collections import namedtuple + +import numpy as np +import torch +import torch.nn.functional as F +from scipy.stats import norm +from torch.utils.data import DataLoader +from torch.utils.data import TensorDataset + +from uq360.algorithms.builtinuq import BuiltinUQ + +np.random.seed(42) +torch.manual_seed(42) + + +class _MLPNet_Main(torch.nn.Module): + def __init__(self, num_features, num_outputs, num_hidden): + super(_MLPNet_Main, self).__init__() + self.fc = torch.nn.Linear(num_features, num_hidden) + self.fc_mu = torch.nn.Linear(num_hidden, num_outputs) + self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) + + def forward(self, x): + x = F.relu(self.fc(x)) + mu = self.fc_mu(x) + log_var = self.fc_log_var(x) + return mu, log_var + + +class _MLPNet_Aux(torch.nn.Module): + def __init__(self, num_features, num_outputs, num_hidden): + super(_MLPNet_Aux, self).__init__() + self.fc = torch.nn.Linear(num_features, num_hidden) + self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) + + def forward(self, x): + x = F.relu(self.fc(x)) + log_var = self.fc_log_var(x) + return log_var + + +class AuxiliaryIntervalPredictor(BuiltinUQ): + """""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model. + + References: + .. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep + models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on + Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079 + """""" + + def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True): + """""" + Args: + model_type: The model type used to build the main model and the auxiliary model. Currently supported values + are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user + provide `main_model` and `aux_model`. + main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance. + aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance. + config: dictionary containing the config parameters for the model. + device: device used for pytorch models ignored otherwise. + verbose: if True, print statements with the progress are enabled. + """""" + + super(AuxiliaryIntervalPredictor).__init__() + self.config = config + self.device = device + self.verbose = verbose + if model_type == ""mlp"": + self.model_type = model_type + self.main_model = _MLPNet_Main( + num_features=self.config[""num_features""], + num_outputs=self.config[""num_outputs""], + num_hidden=self.config[""num_hidden""], + ) + self.aux_model = _MLPNet_Aux( + num_features=self.config[""num_features""], + num_outputs=self.config[""num_outputs""], + num_hidden=self.config[""num_hidden""], + ) + elif model_type == ""custom"": + self.model_type = model_type + self.main_model = main_model + self.aux_model = aux_model + + else: + raise NotImplementedError + + def get_params(self, deep=True): + return {""model_type"": self.model_type, ""config"": self.config, ""main_model"": self.main_model, + ""aux_model"": self.aux_model, ""device"": self.device, ""verbose"": self.verbose} + + def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux): + r = torch.abs(y_true - y_pred_mu) + # + 0.5 * y_pred_log_var + + loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \\ + self.config[""lambda_match""] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux))) + return loss + + def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux): + deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux) + upper = y_pred_mu + deltau + lower = y_pred_mu - deltal + width = upper - lower + r = torch.abs(y_true - y_pred_mu) + + emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000)) + + loss_emce = torch.abs(self.config[""calibration_alpha""]-emce) + loss_noise = torch.mean(torch.abs(0.5 * width - r)) + loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true)) + + #print(emce) + return loss_emce + self.config[""lambda_noise""] * loss_noise + self.config[""lambda_sharpness""] * loss_sharpness + + def fit(self, X, y): + """""" Fit the Auxiliary Interval Predictor model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + + X = torch.from_numpy(X).float().to(self.device) + y = torch.from_numpy(y).float().to(self.device) + + dataset_loader = DataLoader( + TensorDataset(X,y), + batch_size=self.config[""batch_size""] + ) + optimizer_main_model = torch.optim.Adam(self." +"main_model.parameters(), lr=self.config[""lr""]) + optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config[""lr""]) + + for it in range(self.config[""num_outer_iters""]): + + # Train the main model + for epoch in range(self.config[""num_main_iters""]): + avg_mean_model_loss = 0.0 + for batch_x, batch_y in dataset_loader: + self.main_model.train() + self.aux_model.eval() + batch_y_pred_log_var_aux = self.aux_model(batch_x) + batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) + main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux) + optimizer_main_model.zero_grad() + main_loss.backward() + optimizer_main_model.step() + + avg_mean_model_loss += main_loss.item()/len(dataset_loader) + + if self.verbose: + print(""Iter: {}, Epoch: {}, main_model_loss = {}"".format(it, epoch, avg_mean_model_loss)) + + # Train the auxiliary model + for epoch in range(self.config[""num_aux_iters""]): + avg_aux_model_loss = 0.0 + for batch_x, batch_y in dataset_loader: + self.aux_model.train() + self.main_model.eval() + batch_y_pred_log_var_aux = self.aux_model(batch_x) + batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) + aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux) + optimizer_aux_model.zero_grad() + aux_loss.backward() + optimizer_aux_model.step() + + avg_aux_model_loss += aux_loss.item() / len(dataset_loader) + + if self.verbose: + print(""Iter: {}, Epoch: {}, aux_model_loss = {}"".format(it, epoch, avg_aux_model_loss)) + + return self + + def predict(self, X, return_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + + self.main_model.eval() + + X = torch.from_numpy(X).float().to(self.device) + dataset_loader = DataLoader( + X, + batch_size=self.config[""batch_size""] + ) + + y_mean_list = [] + y_log_var_list = [] + for batch_x in dataset_loader: + batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) + y_mean_list.append(batch_y_pred_mu.data.cpu().numpy()) + y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy()) + + y_mean = np.concatenate(y_mean_list) + y_log_var = np.concatenate(y_log_var_list) + y_std = np.sqrt(np.exp(y_log_var)) + y_lower = y_mean - 2.0*y_std + y_upper = y_mean + 2.0*y_std + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + return res + from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor + from .infinitesimal_jackknife import InfinitesimalJackknife + from collections import namedtuple + +import numpy as np + +from uq360.algorithms.posthocuq import PostHocUQ + + +class InfinitesimalJackknife(PostHocUQ): + """""" + Performs a first order Taylor series expansion around MLE / MAP fit. + Requires the model being probed to be twice differentiable. + """""" + def __init__(self, params, gradients, hessian, config): + """""" Initialize IJ. + Args: + params: MLE / MAP fit around which uncertainty is sought. d*1 + gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n + hessian: Hessian evaluated at the MLE / MAP fit. d*d + """""" + + super(InfinitesimalJackknife).__init__() + self.params_one = params + self.gradients = gradients + self.hessian = hessian + self.d, self.n = gradients.shape + self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients) + self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients) + self.w_one = np.ones([self.n]) + self.config = config + + def get_params(self, deep=True): + return {""params"": self.params, ""config"": self.config, ""gradients"": self.gradients, + ""hessian"": self.hessian} + + def _process_pretrained_model(self, *argv, **kwargs): + pass + + def get_parameter_uncertainty(self): + if (self.config['resampling_strategy'] == ""jackknife"") or (self.config['resampling_strategy'] == ""jackknife+""): + w_query = np.ones_like(self.w_one) + resampled_params = np.zeros([self.n, self.d]) + for i in np.arange(self.n): + w_query[i] = 0 + resampled_params[i] = self.ij(w_query) + w_query[i] = 1 + return np.cov(resampled_params), resampled_params + elif self.config['resampling_strategy'] == ""bootstrap"": + pass + else: + raise NotImplementedError(""Only jackknife, jackknife+, and bootstrap resampling strategies are supported"") + + def predict(self, X, model): + """""" + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + model: model object, must implement a set_parameters function + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + + """""" + n, _ = X.shape + y_all = model.predict(X) + _, d_out = y_all.shape + params_cov, params = self.get_parameter_uncertainty() + if d_out > 1: + print(""Quantiles are computed independently for each dimension. May not be accurate."") + y = np.zeros([params.shape[0], n, d_out]) + for i in np.arange(params.shape[0]): + model.set_parameters(params[i]) + y[i] = model.predict(X) + y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0) + y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0) + y_mean = y.mean(axis=0) + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + return res + + def ij(self, w_query): + """""" + Args: + w_query: A n*1 vector to query parameters at. + Return: + new parameters at w_query + """""" + assert w_query.shape[0] == self.n + return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T + + def approx_ij(self, w_query): + """""" + Args: + w_query: A n*1 vector to query parameters at. + Return: + new parameters at w_query + """""" + assert w_query.shape[0] == self.n + return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T import copy +from collections import namedtuple + +import numpy as np +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader +import torch.utils.data as data_utils +from scipy.stats import norm +from sklearn.preprocessing import StandardScaler + +from uq360.algorithms.builtinuq import BuiltinUQ +from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp + + +class BnnRegression(BuiltinUQ): + """""" + Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression. + + References: + .. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. ""Structured variational learning of Bayesian neural + networks with horseshoe priors."" International Conference on Machine Learning. PMLR, 2018. + """""" + def __init__(self, config, prior=""Gaussian""): + """""" + + Args: + config: a dictionary specifying network and learning hyperparameters. + prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe + """""" + super(BnnRegression, self).__init__() + self.config = config + if prior == ""Gaussian"": + self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers']) + self.config['use_reg_hshoe'] = None + elif prior == ""Hshoe"": + self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale']) + self.config['use_reg_hshoe'] = False + elif prior == ""RegHshoe"": + self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale'], + use_reg_hshoe=config['use_reg_hshoe']) + self.config['use_reg_hshoe'] = True + else: + raise NotImplementedError(""'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe"") + + def get_params(self, deep=True): + return {""prior"": self.prior, ""config"": self.config} + + def fit(self, X, y): + """""" Fit the BNN regression model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + torch.manual_seed(1234) + optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size']) + neg_elbo = torch.zeros([self.config['num_epochs'], 1]) + params_store = {} + for epoch in range(self.config['num_epochs']): + loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0] + optimizer.zero_grad() + loss.backward() + optimizer.step() + if hasattr(self.net, 'fixed_point_updates'): + # for hshoe or regularized hshoe nets + self.net.fixed_point_updates() + neg_elbo[epoch] = loss.item() + if (epoch + 1) % 10 == 0: + # print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0]) + print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}' + .format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0], + self.net.get_noise_var())) + params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all. + best_model_id = neg_elbo.argmin() # loss_val_store.argmin() # + self.net.load_state_dict(params_store[best_model_id.item()]) + + return self + + def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: +" +" X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + mc_samples: Number of Monte-Carlo samples. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + return_epistemic: if True, the epistemic upper and lower bounds are returned. + return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions + is returned. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + epistemic_out = np.zeros([mc_samples, X.shape[0]]) + total_out = np.zeros([mc_samples, X.shape[0]]) + for s in np.arange(mc_samples): + pred = self.net(X).data.numpy().ravel() + epistemic_out[s] = pred + total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0]) + y_total_std = np.std(total_out, axis=0) + y_epi_std = np.std(epistemic_out, axis=0) + y_mean = np.mean(total_out, axis=0) + y_lower = y_mean - 2 * y_total_std + y_upper = y_mean + 2 * y_total_std + y_epi_lower = y_mean - 2 * y_epi_std + y_epi_upper = y_mean + 2 * y_epi_std + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_epistemic: + Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',)) + res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + if return_epistemic_dists: + epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_epistemic_dists',)) + res = Result(*res, y_epistemic_dists=epi_dists) + + return res + + +class BnnClassification(BuiltinUQ): + """""" + Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification. + """""" + def __init__(self, config, prior=""Gaussian"", device=None): + """""" + + Args: + config: a dictionary specifying network and learning hyperparameters. + prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe + """""" + super(BnnClassification, self).__init__() + self.config = config + self.device = device + if prior == ""Gaussian"": + self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers']) + self.config['use_reg_hshoe'] = None + elif prior == ""Hshoe"": + self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale']) + self.config['use_reg_hshoe'] = False + elif prior == ""RegHshoe"": + self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], + num_nodes=config['num_nodes'], num_layers=config['num_layers'], + hshoe_scale=config['hshoe_scale'], + use_reg_hshoe=config['use_reg_hshoe']) + self.config['use_reg_hshoe'] = True + else: + raise NotImplementedError(""'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe"") + if ""batch_size"" not in self.config: + self.config[""batch_size""] = 50 + self.net = self.net.to(device) + + def get_params(self, deep=True): + return {""prior"": self.prior, ""config"": self.config, ""device"": self.device} + + def fit(self, X=None, y=None, train_loader=None): + """""" Fits BNN regression model. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + Ignored if train_loader is not None. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + Ignored if train_loader is not None. + train_loader: pytorch train_loader object. + + Returns: + self + + """""" + if train_loader is None: + train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long()) + train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True) + + torch.manual_seed(1234) + optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size']) + neg_elbo = torch.zeros([self.config['num_epochs'], 1]) + params_store = {} + for epoch in range(self.config['num_epochs']): + avg_loss = 0.0 + for batch_x, batch_y in train_loader: + loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if hasattr(self.net, 'fixed_point_updates'): + # for hshoe or regularized hshoe nets + self.net.fixed_point_updates() + + avg_loss += loss.item() + + neg_elbo[epoch] = avg_loss / len(train_loader) + + if (epoch + 1) % 10 == 0: + # print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0]) + print('Epoch[{}/{}], neg elbo: {:.6f}' + .format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item())) + params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all. + best_model_id = neg_elbo.argmin() # loss_val_store.argmin() # + self.net.load_state_dict(params_store[best_model_id.item()]) + + return self + + def predict(self, X, mc_samples=100): + """""" + Obtain calibrated predictions for the test points. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + mc_samples: Number of Monte-Carlo samples. + + Returns: + namedtuple: A namedtupe that holds + + y_pred: ndarray of shape (n_samples,) + Predicted labels of the test points. + y_prob: ndarray of shape (n_samples, n_classes) + Predicted probability scores of the classes. + y_prob_var: ndarray of shape (n_samples,) + Variance of the prediction on the test points. + y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes) + Samples from the predictive distribution. + + """""" + + X = torch.Tensor(X) + y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)] + + y_prob_samples_stacked = np.stack(y_prob_samples) + prob_mean = np.mean(y_prob_samples_stacked, 0) + prob_var = np.std(y_prob_samples_stacked, 0) ** 2 + + if len(np.shape(prob_mean)) == 1: + y_pred_labels = prob_mean > 0.5 + + else: + y_pred_labels = np.argmax(prob_mean, axis=1) + + Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples']) + res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples) + + return res + import inspect +from collections import namedtuple + +import numpy as np +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.exceptions import NotFittedError +from uq360.algorithms.posthocuq import PostHocUQ + + +class BlackboxMetamodelClassification(PostHocUQ): + """""" Extracts confidence scores from black-box classification models using a meta-model [4]_ . + + References: + .. [4] Chen, Tongfei, et al. ""Confidence scoring using whitebox meta-models with linear classifier probes."" + The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019. + """""" + + def _create_named_model(self, mdltype, config): + """""" Instantiates a model by name passed in 'mdltype'. + + Args: + mdltype: string with name (must be supported) + config: dict with args passed in the instantiation call + Returns: + mdl instance + """""" + assert (isinstance(mdltype, str)) + if mdltype == 'lr': + mdl = LogisticRegression(**config) + elif mdltype == 'gbm': + mdl = GradientBoostingClassifier(**config) + else: + raise NotImplementedError(""ERROR: Requested model type unknown: \\""%s\\"""" % mdltype) + return mdl + + def _get_model_instance(self, model, config): + """""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or + (c) passed in instance. + + :param model: string, class, or instance. Class and instance must have certain methods callable. + :param config: dict with args passed in during the instantiation + :return: model instance + """""" + assert (model is not None and config is not None) + if isinstance(model, str): # 'model' is a name, create it + mdl = self._create_named_model(model, config) + elif inspect.isclass(model): # 'model' is a class, instantiate it + mdl = model(**config) + else: # 'model' is an instance, register it + mdl = model + if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]): + raise ValueError(""ERROR: Passed model/method failed the interface test. Methods required: %s"" % + ','.join(self.callable_keys)) + return mdl + + def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42): + """""" + + :param base_model: Base model. Can be: + (1) None (default mdl will be set up), + (2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'), + (3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate. + (4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods. + Note: user-supplied classes and models must have certain callable methods ('predict', 'fit') + and be capable of raising NotFittedError. + :param meta_model: Meta model. Same values possible as with 'base_model' + :param base_config: None or a params dict to be passed to 'base_model' at instantiation + :param meta_config: None or a params dict to be passed to 'meta_model' at instantiation + :param random_seed: seed used in the various pipeline steps + """""" + super(BlackboxMetamodelClassification).__init__() + self.random_seed = random_seed + self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in + self.base_model_default = 'gbm' + self.meta_model_default = 'lr' + self.base_config_default = {'n_estimators': 300, 'max_depth': 10, + 'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10, + 'random_state': self.random_seed} + self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed} + self.base_config = base_config if base_config is not None else self.base_config_default + self.meta_config = meta_config if meta_config is not None else self.meta_config_default + self.base_model = None + self.meta_model = None + self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default, + self.base_config) + " +"self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default, + self.meta_config) + + def get_params(self, deep=True): + return {""base_model"": self.base_model, ""meta_model"": self.meta_model, ""base_config"": self.base_config, + ""meta_config"": self.meta_config, ""random_seed"": self.random_seed} + + def _process_pretrained_model(self, X, y_hat_proba): + """""" + Given the original input features and the base output probabilities, generate input features + to train a meta model. Current implementation copies all input features and appends. + + :param X: numpy [nsamples, dim] + :param y_hat_proba: [nsamples, nclasses] + :return: array with new features [nsamples, newdim] + """""" + assert (len(y_hat_proba.shape) == 2) + assert (X.shape[0] == y_hat_proba.shape[0]) + # sort the probs sample by sample + faux1 = np.sort(y_hat_proba, axis=-1) + # add delta between top and second candidate + faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1) + return np.hstack([X, faux1, faux2]) + + def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False, + meta_train_data=(None, None)): + """""" + Fit base and meta models. + + :param X: input to the base model, + array-like of shape (n_samples, n_features). + Features vectors of the training data. + :param y: ground truth for the base model, + array-like of shape (n_samples,) + :param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model + (complement will be used to train the base model) + :param randomize_samples: use shuffling when creating partitions + :param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been + instantiated outside/by the user and are already fitted. + :param meta_train_data: User supplied data to train the meta model. Note that this option should only be used + with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate. + Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode. + :return: self + """""" + X = np.asarray(X) + y = np.asarray(y) + assert (len(meta_train_data) == 2) + if meta_train_data[0] is None: + X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction, + random_state=self.random_seed) + else: + if not base_is_prefitted: + raise ValueError(""ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option"") + X_base = y_base = None + X_meta = meta_train_data[0] + y_meta = meta_train_data[1] + # fit the base model + if not base_is_prefitted: + self.base_model.fit(X_base, y_base) + # get input for the meta model from the base + try: + y_hat_meta_proba = self.base_model.predict_proba(X_meta) + # determine correct-incorrect outcome - these are targets for the meta model trainer + + # y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8) + y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int) + + except NotFittedError as e: + raise RuntimeError(""ERROR: fit(): The base model appears not pre-fitted (%s)"" % repr(e)) + # get input features for meta training + X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba) + # train meta model to predict 'correct' vs. 'incorrect' of the base + self.meta_model.fit(X_meta_in, y_hat_meta_targets) + return self + + def predict(self, X): + """""" + Generate a base prediction along with uncertainty/confidence for data X. + + :param X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + :return: namedtuple: A namedtuple that holds + + y_pred: ndarray of shape (n_samples,) + Predicted labels of the test points. + y_score: ndarray of shape (n_samples,) + Confidence score the test points. + + """""" + y_hat_proba = self.base_model.predict_proba(X) + y_hat = np.argmax(y_hat_proba, axis=-1) + X_meta_in = self._process_pretrained_model(X, y_hat_proba) + z_hat = self.meta_model.predict_proba(X_meta_in) + index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome + Result = namedtuple('res', ['y_pred', 'y_score']) + res = Result(y_hat, z_hat[:, index_of_class_1]) + + return res + from .blackbox_metamodel_regression import BlackboxMetamodelRegression +from .blackbox_metamodel_classification import BlackboxMetamodelClassification + import inspect +from collections import namedtuple + +import numpy as np +from sklearn.ensemble import GradientBoostingRegressor +from sklearn.model_selection import train_test_split +from sklearn.exceptions import NotFittedError +from uq360.algorithms.posthocuq import PostHocUQ + + +class BlackboxMetamodelRegression(PostHocUQ): + """""" Extracts confidence scores from black-box regression models using a meta-model [2]_ . + + References: + .. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes. + The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019. + + """""" + + def _create_named_model(self, mdltype, config): + """""" + Instantiates a model by name passed in 'mdltype' + + :param mdltype: string with name (must be supprted) + :param config: dict with args passed in the instantiation call + :return: mdl instance + """""" + assert (isinstance(mdltype, str)) + if mdltype == 'gbr': + mdl = GradientBoostingRegressor(**config) + else: + raise NotImplementedError(""ERROR: Requested model type unknown: \\""%s\\"""" % mdltype) + return mdl + + def _get_model_instance(self, model, config): + """""" + Returns an instance of a model based on (a) a desired name or (b) passed in class, or + (c) passed in instance + + :param model: string, class, or instance. Class and instance must have certain methods callable. + :param config: dict with args passed in during the instantiation + :return: model instance + """""" + assert (model is not None and config is not None) + if isinstance(model, str): # 'model' is a name, create it + mdl = self._create_named_model(model, config) + elif inspect.isclass(model): # 'model' is a class, instantiate it + mdl = model(**config) + else: # 'model' is an instance, register it + mdl = model + if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]): + raise ValueError(""ERROR: Passed model/method failed the interface test. Methods required: %s"" % + ','.join(self.callable_keys)) + return mdl + + def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42): + """""" + + :param base_model: Base model. Can be: + (1) None (default mdl will be set up), + (2) Named model (e.g., 'gbr'), + (3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate. + (4) Model instance (instantiated outside). Will be re-used. Must have required callable methods. + Note: user-supplied classes and models must have certain callable methods ('predict', 'fit') + and be capable of raising NotFittedError. + :param meta_model: Meta model. Same values possible as with 'base_model' + :param base_config: None or a params dict to be passed to 'base_model' at instantiation + :param meta_config: None or a params dict to be passed to 'meta_model' at instantiation + :param random_seed: seed used in the various pipeline steps + """""" + super(BlackboxMetamodelRegression).__init__() + self.random_seed = random_seed + self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in + self.base_model_default = 'gbr' + self.meta_model_default = 'gbr' + self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001, + 'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed} + self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10, + 'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10, + 'random_state': self.random_seed} + self.base_config = base_config if base_config is not None else self.base_config_default + self.meta_config = meta_config if meta_config is not None else self.meta_config_default + self.base_model = None + self.meta_model = None + self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default, + self.base_config) + self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default, + self.meta_config) + + def get_params(self, deep=True): + return {""base_model"": self.base_model, ""meta_model"": self.meta_model, ""base_config"": self.base_config, + ""meta_config"": self.meta_config, ""random_seed"": self.random_seed} + + def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False, + meta_train_data=(None, None)): + """""" + Fit base and meta models. + + :param X: input to the base model + :param y: ground truth for the base model + :param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model + (complement will be used to train the base model) + :param randomize_samples: use shuffling when creating partitions + :param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been + instantiated outside/by the user and are already fitted. + :param meta_train_data: User supplied data to train the meta model. Note that this option should only be used + with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate. + Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode. + :return: self + """""" + X = np.asarray(X) + y = np.asarray(y) + assert(len(meta_train_data)==2) + if meta_train_data[0] is None: + X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction, + random_state=self.random_seed) + else: + if not base_is_prefitted: + raise ValueError(""ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option"") + X_base = y_base = None + X_meta = meta_train_data[0] + y_meta = meta_train_data[1] + # fit the base model + if not base_is_prefitted: + self.base_model.fit(X_base, y_base) + # get input for the meta model from the base + try: + y_hat_meta = self.base_model.predict(X_meta) + except NotFittedError as e: + raise RuntimeError(""ERROR: fit(): The base model appears not pre-fitted (%s)"" % repr(e)) + # used base input and output as meta input + X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta) + # train meta model to predict abs diff + self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta)) + return self + + def _process_pretrained_model(self, X, y_hat): + """""" + Given the original input features and the base output probabilities, generate input features + to train a meta model. Current implementation copies all input features and appends. + + :param X: numpy [nsamples, dim] + :param y_hat: [nsamples,] + :return: array with new features [nsamples, newdim] + """""" + y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat + X_meta_in = np.hstack([X, y_hat_meta_prime]) + return X_meta_in + + def predict(self, X): + """""" + Generate prediction and uncertainty bounds for data X. + + :param X: input features + :return: namedtuple: A namedtuple that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + + """""" + y_hat = self.base_model.predict(X) + y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat + X_meta_in = np.h" +"stack([X, y_hat_prime]) + z_hat = self.meta_model.predict(X_meta_in) + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_hat, y_hat - z_hat, y_hat + z_hat) + + return res + from .quantile_regression import QuantileRegression + from collections import namedtuple + +from sklearn.ensemble import GradientBoostingRegressor + +from uq360.algorithms.builtinuq import BuiltinUQ + + +class QuantileRegression(BuiltinUQ): + """"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile + to obtain the prediction intervals. + """""" + + def __init__(self, model_type=""gbr"", config=None): + """""" + Args: + model_type: The base model used for predicting a quantile. Currently supported values are [gbr]. + gbr is sklearn GradientBoostingRegressor. + config: dictionary containing the config parameters for the model. + """""" + + super(QuantileRegression).__init__() + if config is not None: + self.config = config + else: + self.config = {} + if ""alpha"" not in self.config: + self.config[""alpha""] = 0.95 + if model_type == ""gbr"": + self.model_type = model_type + self.model_mean = GradientBoostingRegressor( + loss='ls', + n_estimators=self.config[""n_estimators""], + max_depth=self.config[""max_depth""], + learning_rate=self.config[""learning_rate""], + min_samples_leaf=self.config[""min_samples_leaf""], + min_samples_split=self.config[""min_samples_split""] + ) + self.model_upper = GradientBoostingRegressor( + loss='quantile', + alpha=self.config[""alpha""], + n_estimators=self.config[""n_estimators""], + max_depth=self.config[""max_depth""], + learning_rate=self.config[""learning_rate""], + min_samples_leaf=self.config[""min_samples_leaf""], + min_samples_split=self.config[""min_samples_split""] + ) + self.model_lower = GradientBoostingRegressor( + loss='quantile', + alpha=1.0 - self.config[""alpha""], + n_estimators=self.config[""n_estimators""], + max_depth=self.config[""max_depth""], + learning_rate=self.config[""learning_rate""], + min_samples_leaf=self.config[""min_samples_leaf""], + min_samples_split=self.config[""min_samples_split""]) + + else: + raise NotImplementedError + + def get_params(self, deep=True): + return {""model_type"": self.model_type, ""config"": self.config} + + def fit(self, X, y): + """""" Fit the Quantile Regression model. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + + self.model_mean.fit(X, y) + self.model_lower.fit(X, y) + self.model_upper.fit(X, y) + + return self + + def predict(self, X): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + """""" + + y_mean = self.model_mean.predict(X) + y_lower = self.model_lower.predict(X) + y_upper = self.model_upper.predict(X) + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + return res + from collections import namedtuple + +import botorch +import gpytorch +import numpy as np +import torch +from botorch.models import SingleTaskGP +from botorch.utils.transforms import normalize +from gpytorch.constraints import GreaterThan +from scipy.stats import norm +from sklearn.preprocessing import StandardScaler + +from uq360.algorithms.builtinuq import BuiltinUQ + +np.random.seed(42) +torch.manual_seed(42) + + +class HomoscedasticGPRegression(BuiltinUQ): + """""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise. + + References: + .. [1] https://botorch.org/api/models.html#singletaskgp + + """""" + + def __init__(self, + kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()), + likelihood=None, + config=None): + """""" + Args: + kernel: gpytorch kernel function with default set to `RBFKernel` with output scale. + likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`. + config: dictionary containing the config parameters for the model. + """""" + + super(HomoscedasticGPRegression).__init__() + self.config = config + self.kernel = kernel + self.likelihood = likelihood + self.model = None + self.scaler = StandardScaler() + self.X_bounds = None + + def get_params(self, deep=True): + return {""kernel"": self.kernel, ""likelihood"": self.likelihood, ""config"": self.config} + + def fit(self, X, y, **kwargs): + """""" + Fit the GP Regression model. + + Additional arguments relevant for SingleTaskGP fitting can be passed to this function. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the training data. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + **kwargs: Additional arguments relevant for SingleTaskGP fitting. + + Returns: + self + + """""" + y = self.scaler.fit_transform(y) + X, y = torch.tensor(X), torch.tensor(y) + self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]), + X.max() * torch.ones(X.shape[1])]) + + X = normalize(X, X_bounds) + + model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs) + model_homo.likelihood.noise_covar.register_constraint(""raw_noise"", GreaterThan(1e-5)) + model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo) + botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik) + + model_homo_marginal_log_lik.eval() + + self.model = model_homo_marginal_log_lik + self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze() + + return self + + def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False): + """""" + Obtain predictions for the test points. + + In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) + and full predictive distribution (return_dists=True). + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. + return_epistemic: if True, the epistemic upper and lower bounds are returned. + return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions + is returned. + + Returns: + namedtuple: A namedtuple that holds + + y_mean: ndarray of shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of epistemic component of the predictive distribution of the test points. + Only returned when `return_epistemic` is True. + dists: list of predictive distribution as `scipy.stats` objects with length n_samples. + Only returned when `return_dists` is True. + """""" + X = torch.tensor(X) + + X_test_norm = normalize(X, self.X_bounds) + + self.model.eval() + with torch.no_grad(): + posterior = self.model.model.posterior(X_test_norm) + y_mean = posterior.mean + #y_epi_std = torch.sqrt(posterior.variance) + y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region() + + predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True) + #y_std = torch.sqrt(predictive_posterior.variance) + y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region() + + y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \\ + self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\\ + self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\\ + self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\\ + self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze() + + y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0 + y_std = (y_upper_total - y_lower_total) / 4.0 + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_mean, y_lower, y_upper) + + if return_epistemic: + Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',)) + res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic) + + if return_dists: + dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_dists',)) + res = Result(*res, y_dists=dists) + + if return_epistemic_dists: + epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])] + Result = namedtuple('res', Result._fields + ('y_epistemic_dists',)) + res = Result(*res, y_epistemic_dists=epi_dists) + + return res + + from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression from .ucc_recalibration import UCCRecalibration + from collections import namedtuple + +from uq360.algorithms.posthocuq import PostHocUQ +from uq360.utils.misc import form_D_for_auucc +from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve + + +class UCCRecalibration(PostHocUQ): + """""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve. + """""" + + def __init__(self, base_model): + """""" + Args: + base_model: pretrained model to be recalibrated. + """""" + super(UCCRecalibration).__init__() + self.base_model = self._process_pretrained_model(base_model) + self.ucc = None + + def get_params(self, deep=True): + return {""base_model"": self.base_model} + + def _process_pretrained_model(self, base_model): + return base_model + + def fit(self, X, y): + """""" + Fit the Uncertainty Characteristics Curve. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3] + bwu = y_pred_upper - y_pred_mean + bwl = y_pred_mean - y_pred_lower + self.ucc = UncertaintyCharacteristicsCurve() + self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze()) + + return self + + def predict(self, X, missrate=0.05): + """""" + Generate prediction and uncertainty bounds for data X. + + Args: + X: array-like of shape (n_samples, n_features). + Features vectors of the test points. + missrate: desired missrate of the new operating point, set to 0.05 by default. + + Returns: + namedtuple: A namedtupe that holds + + y_mean: ndarray of" +"shape (n_samples, [n_output_dims]) + Mean of predictive distribution of the test points. + y_lower: ndarray of shape (n_samples, [n_output_dims]) + Lower quantile of predictive distribution of the test points. + y_upper: ndarray of shape (n_samples, [n_output_dims]) + Upper quantile of predictive distribution of the test points. + """""" + C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False) + new_scale = C['modvalue'] + + y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3] + bwu = y_pred_upper - y_pred_mean + bwl = y_pred_mean - y_pred_lower + + if C['operation'] == 'bias': + calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width + calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width + else: + calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width + calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width + + Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) + res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper) + + return res + from collections import namedtuple + +import numpy as np + +from sklearn.calibration import CalibratedClassifierCV +from sklearn.preprocessing import LabelEncoder + +from uq360.utils.misc import DummySklearnEstimator +from uq360.algorithms.posthocuq import PostHocUQ + + +class ClassificationCalibration(PostHocUQ): + """"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows + non-sklearn models to be calibrated. + + """""" + def __init__(self, num_classes, fit_mode=""features"", method='isotonic', base_model_prediction_func=None): + """""" + + Args: + num_classes: number of classes. + fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores, + useful when these are precomputed. + method: isotonic or sigmoid. + base_model_prediction_func: the function that takes in the input features and produces base model's + probability scores. This is ignored when operating in `probs` mode. + """""" + super(ClassificationCalibration).__init__() + if fit_mode == ""probs"": + # In this case, the fit assumes that it receives the probability scores of the base model. + # create a dummy estimator + self.base_model = DummySklearnEstimator(num_classes, lambda x: x) + else: + self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func) + self.method = method + + def get_params(self, deep=True): + return {""num_classes"": self.num_classes, ""fit_mode"": self.fit_mode, ""method"": self.method, + ""base_model_prediction_func"": self.base_model_prediction_func} + + def _process_pretrained_model(self, base_model): + return base_model + + def fit(self, X, y): + """""" Fits calibration model using the provided calibration set. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + y: array-like of shape (n_samples,) or (n_samples, n_targets) + Target values + + Returns: + self + + """""" + + self.base_model.label_encoder_ = LabelEncoder().fit(y) + self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model, + cv=""prefit"", + method=self.method) + self.calib_model.fit(X, y) + + return self + + def predict(self, X): + """""" + Obtain calibrated predictions for the test points. + + Args: + X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). + Features vectors of the training data or the probability scores from the base model. + + Returns: + namedtuple: A namedtupe that holds + + y_pred: ndarray of shape (n_samples,) + Predicted labels of the test points. + y_prob: ndarray of shape (n_samples, n_classes) + Predicted probability scores of the classes. + + """""" + y_prob = self.calib_model.predict_proba(X) + if len(np.shape(y_prob)) == 1: + y_pred_labels = y_prob > 0.5 + + else: + y_pred_labels = np.argmax(y_prob, axis=1) + + Result = namedtuple('res', ['y_pred', 'y_prob']) + res = Result(y_pred_labels, y_prob) + + return res + from .classification_calibration import ClassificationCalibration + import numpy as np +from scipy.stats import norm +from sklearn.metrics import mean_squared_error, r2_score + +from ..utils.misc import fitted_ucc_w_nullref + + +def picp(y_true, y_lower, y_upper): + """""" + Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies + within predicted interval. Measures the prediction interval calibration for regression. + + Args: + y_true: Ground truth + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: the fraction of samples for which the grounds truth lies within predicted interval. + """""" + satisfies_upper_bound = y_true <= y_upper + satisfies_lower_bound = y_true >= y_lower + return np.mean(satisfies_upper_bound * satisfies_lower_bound) + + +def mpiw(y_lower, y_upper): + """""" + Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the + sharpness of intervals. + + Args: + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: the average width the prediction interval across samples. + """""" + return np.mean(np.abs(y_lower - y_upper)) + + +def auucc_gain(y_true, y_mean, y_lower, y_upper): + """""" Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference + with constant band. + + Args: + y_true: Ground truth + y_mean: predicted mean + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: AUUCC gain + + """""" + u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper) + auucc = u.get_AUUCC() + assert(isinstance(auucc, list) and len(auucc) == 2), ""Failed to calculate auucc gain"" + assert (not np.isclose(auucc[1], 0.)), ""Failed to calculate auucc gain"" + auucc_gain = (auucc[1]-auucc[0])/auucc[0] + return auucc_gain + + +def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper): + """""" Computes Gaussian negative_log_likelihood assuming symmetric band around the mean. + + Args: + y_true: Ground truth + y_mean: predicted mean + y_lower: predicted lower bound + y_upper: predicted upper bound + + Returns: + float: nll + + """""" + y_std = (y_upper - y_lower) / 4.0 + nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze())) + return nll + + +def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option=""all"", nll_fn=None): + """""" + Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes + the [""rmse"", ""nll"", ""auucc_gain"", ""picp"", ""mpiw"", ""r2""] metrics. + + Args: + y_true: Ground truth + y_mean: predicted mean + y_lower: predicted lower bound + y_upper: predicted upper bound + option: string or list of string contained the name of the metrics to be computed. + nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower. + + Returns: + dict: dictionary containing the computed metrics. + """""" + + assert y_true.shape == y_mean.shape, ""y_true shape: {}, y_mean shape: {}"".format(y_true.shape, y_mean.shape) + assert y_true.shape == y_lower.shape, ""y_true shape: {}, y_mean shape: {}"".format(y_true.shape, y_lower.shape) + assert y_true.shape == y_upper.shape, ""y_true shape: {}, y_mean shape: {}"".format(y_true.shape, y_upper.shape) + + results = {} + if not isinstance(option, list): + if option == ""all"": + option_list = [""rmse"", ""nll"", ""auucc_gain"", ""picp"", ""mpiw"", ""r2""] + else: + option_list = [option] + + if ""rmse"" in option_list: + results[""rmse""] = mean_squared_error(y_true, y_mean, squared=False) + if ""nll"" in option_list: + if nll_fn is None: + nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper) + results[""nll""] = nll + else: + results[""nll""] = np.mean(nll_fn(y_true)) + if ""auucc_gain"" in option_list: + gain = auucc_gain(y_true, y_mean, y_lower, y_upper) + results[""auucc_gain""] = gain + if ""picp"" in option_list: + results[""picp""] = picp(y_true, y_lower, y_upper) + if ""mpiw"" in option_list: + results[""mpiw""] = mpiw(y_lower, y_upper) + if ""r2"" in option_list: + results[""r2""] = r2_score(y_true, y_mean) + + return results + + +def _check_not_tuple_of_2_elements(obj, obj_name='obj'): + """"""Check object is not tuple or does not have 2 elements."""""" + if not isinstance(obj, tuple) or len(obj) != 2: + raise TypeError('%s must be a tuple of 2 elements.' % obj_name) + + +def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7, + ax=None, figsize=None, dpi=None, + title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs): + """""" + Plot the uncertainty distribution for a single distribution. + + Args: + dist: scipy.stats._continuous_distns. + A scipy distribution object. + show_quantile_dots: boolean. + Whether to show quantil dots on top of the density plot. + qd_sample: int. + Number of dots for the quantile dot plot. + qd_bins: int. + Number of bins for the quantile dot plot. + ax: matplotlib.axes.Axes or None, optional (default=None). + Target axes instance. If None, new figure and axes will be created. + figsize: tuple of 2 elements or None, optional (default=None). + Figure size. + dpi : int or None, optional (default=None). + Resolution of the figure. + title : string or None, optional (default=Prediction Distribution) + Axes title. + If None, title is disabled. + xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. + xlabel : string or None, optional (default=Prediction) + X-axis title label. + If None, title is disabled. + ylabel : string or None, optional (default=Density) + Y-axis title label. + If None, title is disabled. + + Returns: + matplotlib.axes.Axes: ax : The plot with prediction distribution. + """""" + + import matplotlib.pyplot as plt + + if ax is None: + if figsize is not None: + _check_not_tuple_of_2_elements(figsize, 'figsize') + _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) + + x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100) + ax.plot(x, dist.pdf(x), **kwargs) + + if show_quantile_dots: + from matplotlib.patches import Circle + from matplotlib.collections import PatchCollection + import matplotlib.ticker as ticker + + data = dist.rvs(size=10000) + p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample) + x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf) + # Create bins + hist = np.histogram(x_, bins=qd_bins) + bins, edges = hist + radius = (edges[1] - edges[0]) / 2 + + ax2 = ax.twinx() + patches = [] + max_y = 0 + for i in range(qd_bins): + x_bin = (edges[i + 1] + edges[i]) / 2 + y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])] + + max_y = max(y_bins) if max(y_bins) > max_y else max_y + + for _, y_bin in enumerate(y_bins): + circle = Circle((x_bin, y_bin), radius) + patches.append(circle) + + p = PatchCollection(patches, alpha=0.4) + ax2.add_collection(p) + + # Axis tweek + y_scale = (max_y + radius) / max(dist.pdf(x)) + ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale)) + ax2.yaxis.set_major_formatter(ticks_y) + ax2.set_yticklabels([]) + if xlims is not None: + ax2.set_xlim(left=xlims[0], right=xlims[1]) + else: + ax2.set_xlim([min(" +"x_) - radius, max(x) + radius]) + ax2.set_ylim([0, max_y + radius]) + ax2.set_aspect(1) + + if title is not None: + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + + return ax + + +def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10, + ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale=""linear"", + title=None, xlabel=None, ylabel=None): + """""" + Plot how prediction uncertainty varies across the entire range of a feature. + + Args: + x_test: One dimensional ndarray. + Feature column of the test dataset. + y_test: One dimensional ndarray. + Ground truth label of the test dataset. + y_test_pred_lower_total: One dimensional ndarray. + Lower bound of the total uncertainty range. + y_test_pred_upper_total: One dimensional ndarray. + Upper bound of the total uncertainty range. + num_bins: int. + Number of bins used to discritize x_test into equal-sample-sized bins. + ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. + figsize: tuple of 2 elements or None, optional (default=None). Figure size. + dpi : int or None, optional (default=None). Resolution of the figure. + xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. + ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``. + xscale: Passed to ``ax.set_xscale()``. + title : string or None, optional + Axes title. + If None, title is disabled. + xlabel : string or None, optional + X-axis title label. + If None, title is disabled. + ylabel : string or None, optional + Y-axis title label. + If None, title is disabled. + + Returns: + matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature. + + """""" + from scipy.stats.mstats import mquantiles + import matplotlib.pyplot as plt + + if ax is None: + if figsize is not None: + _check_not_tuple_of_2_elements(figsize, 'figsize') + _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) + + x_uniques_sorted = np.sort(np.unique(x_test)) + + num_unique = len(x_uniques_sorted) + sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test) + if len(x_uniques_sorted) > 10: # bin the values + q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:]) + q_sample_bin_ids = np.digitize(x_test, q_bins) + picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin], + y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)]) + unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins) + picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)] + picp_replicated = np.array([item for sublist in picp_replicated for item in sublist]) + else: + picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin], + y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)]) + picp_replicated = picps + + ax.plot(x_uniques_sorted, picp_replicated, label='PICP') + ax.axhline(0.95, linestyle='--', label='95%') + ax.set_ylabel('PICP') + + ax.legend(loc='best') + + if title is None: + title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format( + picp(y_test, + y_test_pred_lower_total, + y_test_pred_upper_total), + mpiw(y_test_pred_lower_total, + y_test_pred_upper_total)) + + if xlims is not None: + ax.set_xlim(left=xlims[0], right=xlims[1]) + + if ylims is not None: + ax.set_ylim(bottom=ylims[0], top=ylims[1]) + + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + if xscale is not None: + ax.set_xscale(xscale) + + return ax + + +def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total, + y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None, + ax=None, figsize=None, dpi=None, xlims=None, xscale=""linear"", + title=None, xlabel=None, ylabel=None): + """""" + Plot how prediction uncertainty varies across the entire range of a feature. + + Args: + x_test: one dimensional ndarray. + Feature column of the test dataset. + y_test_pred_mean: One dimensional ndarray. + Model prediction for the test dataset. + y_test_pred_lower_total: One dimensional ndarray. + Lower bound of the total uncertainty range. + y_test_pred_upper_total: One dimensional ndarray. + Upper bound of the total uncertainty range. + y_test_pred_lower_epistemic: One dimensional ndarray. + Lower bound of the epistemic uncertainty range. + y_test_pred_upper_epistemic: One dimensional ndarray. + Upper bound of the epistemic uncertainty range. + ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. + figsize: tuple of 2 elements or None, optional (default=None). Figure size. + dpi : int or None, optional (default=None). Resolution of the figure. + xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. + xscale: Passed to ``ax.set_xscale()``. + title : string or None, optional + Axes title. + If None, title is disabled. + xlabel : string or None, optional + X-axis title label. + If None, title is disabled. + ylabel : string or None, optional + Y-axis title label. + If None, title is disabled. + + Returns: + matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature. + + """""" + import matplotlib.pyplot as plt + + if ax is None: + if figsize is not None: + _check_not_tuple_of_2_elements(figsize, 'figsize') + _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) + + x_uniques_sorted = np.sort(np.unique(x_test)) + + y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2 + agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted]) + agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted]) + + ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction') + ax.fill_between(x_uniques_sorted, + agg_y_mean - 2.0 * agg_y_std, + agg_y_mean + 2.0 * agg_y_std, + alpha=0.3, label='total uncertainty') + + if y_test_pred_lower_epistemic is not None: + y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2 + agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted]) + ax.fill_between(x_uniques_sorted, + agg_y_mean - 2.0 * agg_y_std_epistemic, + agg_y_mean + 2.0 * agg_y_std_epistemic, + alpha=0.3, label='model uncertainty') + + ax.legend(loc='best') + + if xlims is not None: + ax.set_xlim(left=xlims[0], right=xlims[1]) + + if title is not None: + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + if xscale is not None: + ax.set_xscale(xscale) + + return ax + import numpy as np +import pandas as pd +from scipy.stats import entropy +from sklearn.metrics import roc_auc_score, log_loss, accuracy_score + + +def entropy_based_uncertainty_decomposition(y_prob_samples): + """""" Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components. + + References: + .. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of + uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference + on Machine Learning (pp. 1184-1193). PMLR. + + Args: + y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities + corresponding to samples from the model posterior. + + Returns: + tuple: + - total_uncertainty: entropy of the predictive distribution. + - aleatoric_uncertainty: aleatoric component of the total_uncertainty. + - epistemic_uncertainty: epistemic component of the total_uncertainty. + + """""" + y_preds_samples_stacked = np.stack(y_prob_samples) + preds_mean = np.mean(y_preds_samples_stacked, 0) + + total_uncertainty = entropy(preds_mean, axis=1) + aleatoric_uncertainty = np.mean( + np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1), + axis=1) + epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty + + return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty + + +def multiclass_brier_score(y_true, y_prob): + """"""Brier score for multi-class. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + + Returns: + float: Brier score. + + """""" + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + + y_target = np.zeros_like(y_prob) + y_target[:, y_true] = 1.0 + return np.mean(np.sum((y_target - y_prob) ** 2, axis=1)) + + +def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score, + attributes=None, num_bins=10, subgroup_ids=None, + return_counts=False): + """""" Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where + coverage instead of rejection rate is used. + + References: + .. [3] Franc, Vojtech, and Daniel Prusa. ""On discriminative learning of prediction uncertainty."" + In International Conference on Machine Learning, pp. 1963-1971. 2019. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like of shape (n_samples,) + predicted labels. + selection_scores: scores corresponding to certainty in the predicted labels. + risk_func: risk function under consideration. + attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. + num_bins: number of bins. + subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids. + return_counts: set to True to return counts also. + + Returns: + float or tuple: + - aurrrc (float): area under risk rejection rate curve. + - rejection_rates (list): rejection rates for each bin (returned only if return_counts is True). + - selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True). + - risks (list): risk in each bin (returned only if return_counts is True). + + """""" + + if selection_scores is None: + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)] + + if y_pred is None: + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + y_pred = np.argmax(y_prob, axis=1) + + order = np.argsort(selection_scores)[::-1] + + rejection_rates = [] + selection_thresholds = [] + risks = [] + for bin_id in range(num_bins): + samples_in_" +"bin = len(y_true) // num_bins + selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]] + selection_thresholds.append(selection_threshold) + ids = selection_scores >= selection_threshold + if sum(ids) > 0: + if attributes is None: + if isinstance(y_true, pd.Series): + y_true_numpy = y_true.values + else: + y_true_numpy = y_true + if subgroup_ids is None: + risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids]) + else: + if sum(subgroup_ids & ids) > 0: + risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids]) + else: + risk_value = 0.0 + else: + risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes) + else: + risk_value = 0.0 + risks.append(risk_value) + rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true)) + + aurrrc = np.nanmean(risks) + + if not return_counts: + return aurrrc + else: + return aurrrc, rejection_rates, selection_thresholds, risks + + +def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False): + """""" Computes the reliability curve and the expected calibration error [1]_ . + + References: + .. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference + on Machine Learning, PMLR 70:1321-1330, 2017. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like of shape (n_samples,) + predicted labels. + num_bins: number of bins. + return_counts: set to True to return counts also. + + Returns: + float or tuple: + - ece (float): expected calibration error. + - confidences_in_bins: average confidence in each bin (returned only if return_counts is True). + - accuracies_in_bins: accuracy in each bin (returned only if return_counts is True). + - frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True). + + """""" + + assert len(y_prob.shape) > 1, ""y_prob should be array-like of shape (n_samples, n_classes)"" + num_samples, num_classes = y_prob.shape + top_scores = np.max(y_prob, axis=1) + + if y_pred is None: + y_pred = np.argmax(y_prob, axis=1) + + if num_classes == 2: + bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0)) + else: + bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0)) + + non_boundary_bin_edges = bins_edges[1:-1] + bin_centers = (bins_edges[1:] + bins_edges[:-1])/2 + + sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges) + + num_samples_in_bins = np.zeros(num_bins) + accuracies_in_bins = np.zeros(num_bins) + confidences_in_bins = np.zeros(num_bins) + + for bin in range(num_bins): + num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin]) + if num_samples_in_bins[bin] > 0: + accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin] + confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin] + + ece = np.sum( + num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples + ) + frac_samples_in_bins = num_samples_in_bins / num_samples + + if not return_counts: + return ece + else: + return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers + + +def compute_classification_metrics(y_true, y_prob, option='all'): + """""" + Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes + the [aurrrc, ece, auroc, nll, brier, accuracy] metrics. + + Args: + y_true: array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like of shape (n_samples, n_classes). + Probability scores from the base model. + option: string or list of string contained the name of the metrics to be computed. + + Returns: + dict: a dictionary containing the computed metrics. + """""" + results = {} + if not isinstance(option, list): + if option == ""all"": + option_list = [""aurrrc"", ""ece"", ""auroc"", ""nll"", ""brier"", ""accuracy""] + else: + option_list = [option] + + if ""aurrrc"" in option_list: + results[""aurrrc""] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob) + if ""ece"" in option_list: + results[""ece""] = expected_calibration_error(y_true=y_true, y_prob=y_prob) + if ""auroc"" in option_list: + results[""auroc""], _ = roc_auc_score(y_true=y_true, y_score=y_prob) + if ""nll"" in option_list: + results[""nll""] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) + if ""brier"" in option_list: + results[""brier""] = multiclass_brier_score(y_true=y_true, y_prob=y_prob) + if ""accuracy"" in option_list: + results[""accuracy""] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) + + return results + + +def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""""], num_bins=10): + """""" + Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves + can be plot by passing data as lists. + + Args: + y_true: array-like or or a list of array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like or or a list of array-like of shape (n_samples,) + predicted labels. + plot_label: (optional) list of names identifying each curve. + num_bins: number of bins. + + Returns: + tuple: + - ece_list: ece: list containing expected calibration error for each curve. + - accuracies_in_bins_list: list containing binned average accuracies for each curve. + - frac_samples_in_bins_list: list containing binned sample frequencies for each curve. + - confidences_in_bins_list: list containing binned average confidence for each curve. + """""" + import matplotlib.pyplot as plt + + if not isinstance(y_true, list): + y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred] + if len(plot_label) != len(y_true): + raise ValueError('y_true and plot_label should be of same length.') + + ece_list = [] + accuracies_in_bins_list = [] + frac_samples_in_bins_list = [] + confidences_in_bins_list = [] + + for idx in range(len(plot_label)): + ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx], + y_prob[idx], + y_pred[idx], + num_bins=num_bins, + return_counts=True) + ece_list.append(ece) + accuracies_in_bins_list.append(accuracies_in_bins) + frac_samples_in_bins_list.append(frac_samples_in_bins) + confidences_in_bins_list.append(confidences_in_bins) + + fig = plt.figure(figsize=(12, 5)) + + plt.subplot(1, 2, 1) + for idx in range(len(plot_label)): + plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx]) + plt.title(""Confidence Histogram"") + plt.xlabel(""Confidence"") + plt.ylabel(""Fraction of Samples"") + plt.grid() + plt.ylim([0.0, 1.0]) + plt.legend() + + plt.subplot(1, 2, 2) + for idx in range(len(plot_label)): + plt.plot(bins, accuracies_in_bins_list[idx], 'o-', + label=""{} ECE = {:.2f}"".format(plot_label[idx], ece_list[idx])) + plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label=""Perfect Calibration"") + plt.title(""Reliability Plot"") + plt.xlabel(""Confidence"") + plt.ylabel(""Accuracy"") + plt.grid() + plt.legend() + + plt.show() + + return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list + + +def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""""], risk_func=None, + attributes=None, num_bins=10, subgroup_ids=None): + """""" + Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves + can be plot by passing data as lists. + + Args: + y_true: array-like or or a list of array-like of shape (n_samples,) + ground truth labels. + y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). + Probability scores from the base model. + y_pred: array-like or or a list of array-like of shape (n_samples,) + predicted labels. + selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels. + risk_func: risk function under consideration. + attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. + num_bins: number of bins. + subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a + subgroup of the samples specified by subgroup_ids. + + Returns: + tuple: + - aurrrc_list: list containing the area under risk rejection rate curves. + - rejection_rate_list: list containing the binned rejection rates. + - selection_thresholds_list: list containing the binned selection thresholds. + - risk_list: list containing the binned risks. + """""" + import matplotlib.pyplot as plt + + if not isinstance(y_true, list): + y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids] + if len(plot_label) != len(y_true): + raise ValueError('y_true and plot_label should be of same length.') + + aurrrc_list = [] + rejection_rate_list = [] + risk_list = [] + selection_thresholds_list = [] + + for idx in range(len(plot_label)): + aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve( + y_true[idx], + y_prob[idx], + y_pred[idx], + selection_scores=selection_scores[idx], + risk_func=risk_func, + attributes=attributes, + num_bins=num_bins, + subgroup_ids=subgroup_ids[idx], + return_counts=True + ) + + aurrrc_list.append(aursrc) + rejection_rate_list.append(rejection_rates) + risk_list.append(risks) + selection_thresholds_list.append(selection_thresholds) + + plt.figure(figsize=(12, 5)) + + plt.subplot(1, 2, 1) + for idx in range(len(plot_label)): + plt.plot(rejection_rate_list[idx], risk_list[idx], label=""{} AURRRC={:.5f}"".format(plot_label[idx], aurrrc_list[idx])) + + plt.legend(loc=""best"") + plt.xlabel(""Rejection Rate"") + if risk_func is None: + ylabel = ""Prediction Error Rate"" + else: + if 'accuracy' in risk_func.__name__: + ylabel = ""1.0 - "" + risk_func.__name__ + else: + ylabel = risk_func.__name__ + + plt.ylabel(ylabel) + plt.title(""Risk vs Rejection Rate Plot"") + plt.grid() + + plt.subplot(1, 2, 2) + for idx in range(len(plot_label)): + plt.plot(selection_thresholds_list[idx], risk_list[idx], label=""{}"".format(plot_label[idx])) + + plt.legend(loc=""best"") + plt.xlabel(""Selection Threshold"") + if risk_func is None: + ylabel = ""Prediction Error Rate"" + else: + if 'accuracy' in risk_func.__name__: + ylabel = ""1.0 - "" + risk_func.__name__ + else: + ylabel = risk_func.__name__ + + plt.ylabel(ylabel" +") + plt.title(""Risk vs Selection Threshold Plot"") + plt.grid() + + plt.show() + + return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list + from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \\ + compute_classification_metrics, entropy_based_uncertainty_decomposition +from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \\ + plot_uncertainty_by_feature, plot_picp_by_feature +from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve + from copy import deepcopy + +import matplotlib.pyplot as plt +import numpy as np +from scipy.integrate import simps, trapz +from sklearn.isotonic import IsotonicRegression + +DEFAULT_X_AXIS_NAME = 'excess' +DEFAULT_Y_AXIS_NAME = 'missrate' + + +class UncertaintyCharacteristicsCurve: + """""" + Class with main functions of the Uncertainty Characteristics Curve (UCC). + + """""" + + def __init__(self, normalize=True, precompute_bias_data=True): + """""" + :param normalize: set initial axes normalization flag (can be changed via set_coordinates()) + :param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based + UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call + if bias-based UCC is not needed. + + """""" + self.axes_name2idx = {""missrate"": 1, ""bandwidth"": 2, ""excess"": 3, ""deficit"": 4} + self.axes_idx2descr = {1: ""Missrate"", 2: ""Bandwidth"", 3: ""Excess"", 4: ""Deficit""} + self.x_axis_idx = None + self.y_axis_idx = None + self.norm_x_axis = False + self.norm_y_axis = False + self.std_unit = None + self.normalize = normalize + self.d = None + self.gt = None + self.lb = None + self.ub = None + self.precompute_bias_data = precompute_bias_data + self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize) + + def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None): + """""" + Assigns user-specified type to the axes and normalization behavior (sticky). + + :param x_axis_name: None-> unchanged, or name from self.axes_name2idx + :param y_axis_name: ditto + :param normalize: True/False will activate/deactivate norming for specified axes. Behavior for + Axes_name that are None will not be changed. + Value None will leave norm status unchanged. + Note, axis=='missrate' will never get normalized, even with normalize == True + :return: none + """""" + normalize = self.normalize if normalize is None else normalize + if x_axis_name is None and self.x_axis_idx is None: + raise ValueError(""ERROR(UCC): x-axis has not been defined."") + if y_axis_name is None and self.y_axis_idx is None: + raise ValueError(""ERROR(UCC): y-axis has not been defined."") + if x_axis_name is None and y_axis_name is None and normalize is not None: + # just set normalization on/off for both axes and return + self.norm_x_axis = False if x_axis_name == 'missrate' else normalize + self.norm_y_axis = False if y_axis_name == 'missrate' else normalize + return + if x_axis_name is not None: + self.x_axis_idx = self.axes_name2idx[x_axis_name] + self.norm_x_axis = False if x_axis_name == 'missrate' else normalize + if y_axis_name is not None: + self.y_axis_idx = self.axes_name2idx[y_axis_name] + self.norm_y_axis = False if y_axis_name == 'missrate' else normalize + + def set_std_unit(self, std_unit=None): + """""" + Sets the UCC's unit to be used when displaying normalized axes. + + :param std_unit: if None, the unit will be calculated as stddev of the ground truth data + (ValueError raised if data has not been set at this point) + or set to the user-specified value. + :return: + """""" + if std_unit is None: # set it to stddev of data + if self.gt is None: + raise ValueError(""ERROR(UCC): No data specified - cannot set stddev unit."") + self.std_unit = np.std(self.gt) + + if np.isclose(self.std_unit, 0.): + print(""WARN(UCC): data-based stddev is zero - resetting axes unit to 1."") + self.std_unit = 1. + else: + self.std_unit = float(std_unit) + + def fit(self, X, gt): + """""" + Calculates internal arrays necessary for other methods (plotting, auc, cost minimization). + Re-entrant. + + :param X: [numsamples, 3] numpy matrix, or list of numpy matrices. + Col 1: predicted values + Col 2: lower band (deviate) wrt predicted value (always positive) + Col 3: upper band wrt predicted value (always positive) + If list is provided, all methods will output corresponding metrics as lists as well! + :param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X + :return: self + + """""" + if not isinstance(X, list): + X = [X] + newX = [] + for x in X: + assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt)) + newX.append(self._sanitize_input(x)) + self.d = [gt - x[:, 0] for x in newX] + self.lb = [x[:, 1] for x in newX] + self.ub = [x[:, 2] for x in newX] + self.gt = gt + self.set_std_unit() + self.plotdata_for_scale = [] + self.plotdata_for_bias = [] + # precompute plotdata: + for i in range(len(self.d)): + self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False)) + if self.precompute_bias_data: + self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True)) + + return self + + def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True, + search=('scale', 'bias')): + """""" + Find minima of a linear cost function for each component. + Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value. + A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg). + The function returns a 'recipe' how to achieve the corresponding minimum, for each component. + + :param x_axis_cost: weight of one unit on x_axis + :param y_axis_cost: weight of one unit on y_axis + :param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be + pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes. + :param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'. + + :return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are - + 'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to + add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that + minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point). + + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if augment_cost_by_normfactor: + if self.norm_x_axis: + x_axis_cost /= self.std_unit + if self.norm_y_axis: + y_axis_cost /= self.std_unit + print(""INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f"" % + (x_axis_cost, y_axis_cost)) + if isinstance(search, tuple): + search = list(search) + if not isinstance(search, list): + search = [search] + + min_costs = [] + for d in range(len(self.d)): + # original OP cost + m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d]) + original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][ + self.y_axis_idx] + + plotdata = self.plotdata_for_scale[d] + cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx, + x_axis_cost, y_axis_cost) + mcf_scale_multiplier = plotdata[minidx_scale][0] + mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx] + mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx] + + if 'bias' in search: + if not self.precompute_bias_data: + raise ValueError( + ""ERROR(UCC): Cannot perform minimization - instantiated without bias data computation"") + plotdata = self.plotdata_for_bias[d] + cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx, + x_axis_cost, y_axis_cost) + mcf_bias_add = plotdata[minidx_bias][0] + mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx] + mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx] + + if 'bias' in search and 'scale' in search: + if cost_bias < cost_scale: + min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add, + 'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost}) + else: + min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier, + 'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost}) + elif 'scale' in search: + min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier, + 'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost}) + elif 'bias' in search: + min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add, + 'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost}) + else: + raise ValueError(""(ERROR): Unknown search element (%s) requested."" % "","".join(search)) + + if len(min_costs) < 2: + return min_costs[0] + else: + return min_costs + + def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None, + req_critical_value=None, vary_bias=False): + """""" + Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns + a list of recipes how to achieve the point (x,y), for each component. If there is only one component, + returns a single recipe dict. + + :param req_x_axis_value: requested x value on UCC (normalization status is taken from current display) + :param req_y_axis_value: requested y value on UCC (normalization status is taken from current display) + :param vary_bias: set to True when referring to bias-induced UCC (scale UCC default) + :return: list of dicts (recipes), or a single dict + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1: + raise ValueError(""ERROR(UCC): exactly one axis value must be requested at a time."") + if vary_bias and not self.precompute_bias_data: + raise ValueError(""ERROR(UCC): Cannot vary bias - instantiated without bias data computation"") + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + recipe = [] + for dc in range(len(self.d)): + plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc] + if req_x_axis_value is not None: + tgtidx = self.x_axis_idx + req_value = req_x_axis_value * xnorm + elif req_y_axis_value is not None: + tgtidx = self.y_axis_idx + req_value = req_y_axis_value * ynorm + elif req_critical_value is not None: + req_value = req_critical_value + tgtidx = 0 # first element in plotdata is always the critical value (scale of bias) + else: + raise RuntimeError(""Unhandled case"") + closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata])) + recipe.append({'operation': ('bias' if vary_bias else 'scale'), + 'modvalue': plotdata[closestidx][0], + 'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm, + 'new_y': plotdata[closestidx][self.y_axis_" +"idx] / ynorm}) + if len(recipe) < 2: + return recipe[0] + else: + return recipe + + + def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2): + """""" + Find s minimum cost function value and corresp. position index in plotdata + + :param plotdata: liste of tuples + :param idx1: idx of x-axis item within the tuple + :param idx2: idx of y-axis item within the tuple + :param cost1: cost factor for x-axis unit + :param cost2: cost factor for y-axis unit + :return: min cost value, index within plotdata where minimum occurs + """""" + raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata] + minidx = np.argmin(raw) + return raw[minidx], minidx + + def _sanitize_input(self, x): + """""" + Replaces problematic values in input data (e.g, zero error bars) + + :param x: single matrix of input data [n, 3] + :return: sanitized version of x + """""" + if np.isclose(np.sum(x[:, 1]), 0.): + raise ValueError(""ERROR(UCC): Provided lower bands are all zero."") + if np.isclose(np.sum(x[:, 2]), 0.): + raise ValueError(""ERROR(UCC): Provided upper bands are all zero."") + for i in [1, 2]: + if any(np.isclose(x[:, i], 0.)): + print(""WARN(UCC): some band values are 0. - REPLACING with positive minimum"") + m = np.min(x[x[:, i] > 0, i]) + x = np.where(np.isclose(x, 0.), m, x) + return x + + def _calc_avg_excess(self, d, lb, ub): + """""" + Excess is amount an error bar overshoots actual + + :param d: pred-actual array + :param lb: lower band + :param ub: upper band + :return: average excess over array + """""" + excess = np.zeros(d.shape) + posidx = np.where(d >= 0)[0] + excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx]) + negidx = np.where(d < 0)[0] + excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx]) + return np.mean(excess) + + def _calc_avg_deficit(self, d, lb, ub): + """""" + Deficit is error bar insufficiency: bar falls short of actual + + :param d: pred-actual array + :param lb: lower band + :param ub: upper band + :return: average deficit over array + """""" + deficit = np.zeros(d.shape) + posidx = np.where(d >= 0)[0] + deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx]) + negidx = np.where(d < 0)[0] + deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx]) + return np.mean(deficit) + + def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0): + """""" + Calculates recall at a given scale/bias, average bandwidth and average excess + + :param d: delta + :param lb: lower band + :param ub: upper band + :param scale: scale * (x + bias) + :param bias: + :return: miss rate, average bandwidth, avg excess, avg deficit + """""" + abslband = scale * np.where((lb + bias) < 0., 0., lb + bias) + absuband = scale * np.where((ub + bias) < 0., 0., ub + bias) + recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d) + avgbandwidth = np.mean([absuband, abslband]) + avgexcess = self._calc_avg_excess(d, abslband, absuband) + avgdeficit = self._calc_avg_deficit(d, abslband, absuband) + return 1 - recall, avgbandwidth, avgexcess, avgdeficit + + def _calc_plotdata(self, d, lb, ub, vary_bias=False): + """""" + Generates data necessary for various UCC metrics. + + :param d: delta (predicted - actual) vector + :param ub: upper uncertainty bandwidth (above predicted) + :param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth) + :param vary_bias: True will switch to additive bias instead of scale + :return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit) + """""" + + # step 1: collect critical scale or bias values + critval = [] + for i in range(len(d)): + if not vary_bias: + if d[i] >= 0: + critval.append(d[i] / ub[i]) + else: + critval.append(-d[i] / lb[i]) + else: + if d[i] >= 0: + critval.append(d[i] - ub[i]) + else: + critval.append(-lb[i] - d[i]) + critval = sorted(critval) + plotdata = [] + for i in range(len(critval)): + if not vary_bias: + missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, + scale=critval[i]) + else: + missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, + bias=critval[i]) + plotdata.append((critval[i], missrate, bandwidth, excess, deficit)) + + return plotdata + + def get_AUUCC(self, vary_bias=False, aucfct=""trapz"", partial_x=None, partial_y=None): + """""" + returns approximate area under the curve on current coordinates, for each component. + + :param vary_bias: False == varies scale, True == varies bias + :param aucfct: specifies AUC integrator (can be ""trapz"", ""simps"") + :param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC. + The interval bounds refer to axes as visualized (ie. potentially normed) + :param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None. + :return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component. + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if vary_bias and not self.precompute_bias_data: + raise ValueError(""ERROR(UCC): Cannot vary bias - instantiated without bias data computation"") + if partial_x is not None and partial_y is not None: + raise ValueError(""ERROR(UCC): partial_x and partial_y can not be specified at the same time."") + assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2)) + assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2)) + + # find starting point (where the x axis value starts to actually change) + rv = [] + # do this for individual streams + xind = self.x_axis_idx + aucfct = simps if aucfct == ""simps"" else trapz + for s in range(len(self.d)): + plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s] + prev = plotdata[0][xind] + t = 1 + cval = plotdata[t][xind] + while cval == prev and t < len(plotdata) - 1: + t += 1 + prev = cval + cval = plotdata[t][xind] + startt = t - 1 # from here, it's a valid function + endtt = len(plotdata) + + if startt >= endtt - 2: + rvs = 0. # no area + else: + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)] + x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)] + if partial_x is not None: + from_i = self._find_closest_index(partial_x[0], x) + to_i = self._find_closest_index(partial_x[1], x) + 1 + elif partial_y is not None: + from_i = self._find_closest_index(partial_y[0], y) + to_i = self._find_closest_index(partial_y[1], y) + if from_i > to_i: # y is in reverse order + from_i, to_i = to_i, from_i + to_i += 1 # as upper bound in array indexing + else: + from_i = 0 + to_i = len(x) + to_i = min(to_i, len(x)) + if to_i < from_i: + raise ValueError(""ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data."") + if to_i - from_i < 2: + raise RuntimeError(""ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified"") + rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i]) + rv.append(rvs) + if len(rv) < 2: + return rv[0] + else: + return rv + + @ staticmethod + def _find_closest_index(value, array): + """""" + Returns an index of the 'array' element closest in value to 'value' + + :param value: + :param array: + :return: + """""" + return np.argmin(np.abs(np.asarray(array)-value)) + + def _get_single_OP(self, d, lb, ub, scale=1., bias=0.): + """""" + Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias. + + :param scale: + :param bias: + :return: single tuple (x point, y point, unit of x, unit of y) + """""" + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias) + op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here) + return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm) + + def get_OP(self, scale=1., bias=0.): + """""" + Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias. + + :param scale: + :param bias: + :return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only + 1 component. + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + op = [] + for dc in range(len(self.d)): + op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias)) + if len(op) < 2: + return op[0] + else: + return op + + def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None, + xlim=None, ylim=None, **kwargs): + """""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown + if there are multiple data components (via fit()) + + :param titlestr: Plot title string + :param syslabel: list is label strings to appear in the plot legend. Can be single, if one component. + :param outfn: base name of an image file to be created (will append .png before creating) + :param vary_bias: True will switch to varying additive bias (default is multiplicative scale) + :param markers: None or a list of marker styles to be used for each curve. + List must be same or longer than number of components. + Markers can be one among these ['o', 's', 'v', 'D', '+']. + :param xlim: tuples or lists of specifying the range for the x axis, or None (auto) + :param ylim: tuples or lists of specifying the range for the y axis, or None (auto) + :param `**kwargs`: Additional arguments passed to the main plot call. + + :return: list of areas under the curve (or single area, if one data component) + list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit) + """""" + if self.d is None: + raise ValueError(""ERROR(UCC): call fit() prior to using this method."") + if vary_bias and not self.precompute_bias_data: + raise ValueError(""ERROR(UCC): Cannot vary bias - instantiated without bias data computation"") + if not isinstance(syslabel, list): + syslabel = [syslabel] + assert (len(syslabel) == len(self.d)) + assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d))) + # main plot of (possibly multiple) datasets + plt.figure() + xnorm = self.std_unit if self.norm_x_axis else 1. + ynorm = self.std_unit if self.norm_y_axis else 1. + op_info = [] + auucc = self.get_AUUCC(vary_bias=vary_bias) + auucc = [auucc] if not isinstance(auucc, list) else auucc + for s in range(len(self.d)): + # original operating point + x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s]) + op_info.append((x_op, y_op, x_unit, y_unit)) + # display chart + plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s] + axisX_data" +"= [i[self.x_axis_idx] / xnorm for i in plotdata] + axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata] + marker = None + if markers is not None: marker = markers[s] + p = plt.plot(axisX_data, axisY_data, lab" +" - 0.5 + return kld_weights.sum() + kld_bias.sum() + + +class HorseshoeLayer(BayesianLinearLayer): + """""" + Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k + are vectors of all weights incident into the unit + """""" + def __init__(self, in_features, out_features, cuda=False, scale=1.): + super(HorseshoeLayer, self).__init__(in_features, out_features) + self.cuda = cuda + self.in_features = in_features + self.out_features = out_features + self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.) + self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale) + # prior on beta is N(0, I) when employing non centered parameterization + self.prior_stdv = torch.Tensor([1]) + self.prior_mean = torch.Tensor([0.]) + + def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None): + # At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample + # sample scales + scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu) + scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2) + scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp() + # sample preactivations + mu_activations = F.linear(x, self.weights, self.bias) + var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) + activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample) + return scale_sample * activ_sample + + def kl(self): + return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl() + + def fixed_point_updates(self): + self.nodescales.fixed_point_updates() + self.layerscale.fixed_point_updates() + + +class RegularizedHorseshoeLayer(HorseshoeLayer): + """""" + Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe. + For all weights w_k incident upon node k in the layer we have: + w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b). + c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe. + """""" + + def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.): + super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale) + self.c = InvGammaLayer(a=c_a, b=c_b) + + def forward(self, x, do_sample=True, **kwargs): + # At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample + # sample regularized scales + scale_mean = self.nodescales.mu + self.layerscale.mu + scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2 + scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp() + c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp() + regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample) + # sample preactivations + mu_activations = F.linear(x, self.weights, self.bias) + var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) + activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample) + return torch.sqrt(regularized_scale_sample) * activ_sample + + def kl(self): + return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl() + + +class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer): + """""" + Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe. + For all weights w_k incident upon node k in the layer we have: + w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b). + c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe + Note that we now have a per-node c_k. + """""" + + def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.): + super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale) + self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features) + + + + + import numpy as np +import torch +from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision + +def compute_test_ll(y_test, y_pred_samples, std_y=1.): + """""" + Computes test log likelihoods = (1 / Ntest) * \\sum_n p(y_n | x_n, D_train) + :param y_test: True y + :param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples + q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train) + :param std_y: True std of y (assumed known) + """""" + S, _ = y_pred_samples.shape + noise = GaussianNoiseFixedPrecision(std_y=std_y) + ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False) + ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples + return torch.mean(ll) # mean over test points + + + from abc import ABC +import torch +from torch import nn +from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer +from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision +import numpy as np +td = torch.distributions + + +class BayesianNN(nn.Module, ABC): + """""" + Bayesian neural network with zero mean Gaussian priors over weights. + """""" + def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, + activation_type='relu', num_layers=1): + super(BayesianNN, self).__init__() + self.num_layers = num_layers + if activation_type == 'relu': + # activation + self.activation = nn.ReLU() + elif activation_type == 'tanh': + self.activation = nn.Tanh() + else: + print(""Activation Type not supported"") + self.fc_hidden = [] + self.fc1 = layer(ip_dim, num_nodes,) + for _ in np.arange(self.num_layers - 1): + self.fc_hidden.append(layer(num_nodes, num_nodes, )) + self.fc_out = layer(num_nodes, op_dim, ) + self.noise_layer = None + + def forward(self, x, do_sample=True): + x = self.fc1(x, do_sample=do_sample) + x = self.activation(x) + for layer in self.fc_hidden: + x = layer(x, do_sample=do_sample) + x = self.activation(x) + return self.fc_out(x, do_sample=do_sample, scale_variances=True) + + def kl_divergence_w(self): + kld = self.fc1.kl() + self.fc_out.kl() + for layer in self.fc_hidden: + kld += layer.kl() + return kld + + def prior_predictive_samples(self, n_sample=100): + n_eval = 1000 + x = torch.linspace(-2, 2, n_eval)[:, np.newaxis] + y = np.zeros([n_sample, n_eval]) + for i in np.arange(n_sample): + y[i] = self.forward(x).data.numpy().ravel() + return x.data.numpy(), y + + ### get and set weights ### + def get_weights(self): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + weight_dict = {} + weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + return weight_dict + + def set_weights(self, weight_dict): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + to_param = lambda x: nn.Parameter(torch.Tensor(x)) + self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1]) + self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1]) + self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1]) + self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1]) + + self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1]) + self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1]) + self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1]) + self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1]) + + +class BayesianRegressionNet(BayesianNN, ABC): + """""" + Bayesian neural net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods. + """""" + def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1): + super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers, + ) + self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.) + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer.loss(y_pred=out, y_true=y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik + return neg_elbo + + def mse(self, x, y): + """""" + scaled rmse (scaled by 1 / std_y**2) + """""" + E_noise_precision = 1. / self.noise_layer.get_noise_var() + return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum() + + def get_noise_var(self): + return self.noise_layer.get_noise_var() + + +class BayesianClassificationNet(BayesianNN, ABC): + """""" + Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification. + """""" + def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1): + super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers) + self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum') + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer(out, y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = self.kl_divergence_w() / num_batches - Elik + return neg_elbo + + + + + from abc import ABC + +import numpy as np +import torch +from torch import nn + +from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer +from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision +import numpy as np +td = torch.distributions + + +class HshoeB" +"NN(nn.Module, ABC): + """""" + Bayesian neural network with Horseshoe layers. + """""" + def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1, + hshoe_scale=1e-1, use_reg_hshoe=False): + if use_reg_hshoe: + layer = RegularizedHorseshoeLayer + else: + layer = HorseshoeLayer + super(HshoeBNN, self).__init__() + self.num_layers = num_layers + if activation_type == 'relu': + # activation + self.activation = nn.ReLU() + elif activation_type == 'tanh': + self.activation = nn.Tanh() + else: + print(""Activation Type not supported"") + self.fc_hidden = [] + self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale) + for _ in np.arange(self.num_layers - 1): + self.fc_hidden.append(layer(num_nodes, num_nodes)) + self.fc_out = BayesianLinearLayer(num_nodes, op_dim) + self.noise_layer = None + + def forward(self, x, do_sample=True): + x = self.fc1(x, do_sample=do_sample) + x = self.activation(x) + for layer in self.fc_hidden: + x = layer(x, do_sample=do_sample) + x = self.activation(x) + return self.fc_out(x, do_sample=do_sample, scale_variances=True) + + def kl_divergence_w(self): + kld = self.fc1.kl() + self.fc_out.kl() + for layer in self.fc_hidden: + kld += layer.kl() + return kld + + def fixed_point_updates(self): + if hasattr(self.fc1, 'fixed_point_updates'): + self.fc1.fixed_point_updates() + if hasattr(self.fc_out, 'fixed_point_updates'): + self.fc_out.fixed_point_updates() + for layer in self.fc_hidden: + if hasattr(layer, 'fixed_point_updates'): + layer.fixed_point_updates() + + def prior_predictive_samples(self, n_sample=100): + n_eval = 1000 + x = torch.linspace(-2, 2, n_eval)[:, np.newaxis] + y = np.zeros([n_sample, n_eval]) + for i in np.arange(n_sample): + y[i] = self.forward(x).data.numpy().ravel() + return x.data.numpy(), y + + ### get and set weights ### + def get_weights(self): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + weight_dict = {} + weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy() + weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy() + return weight_dict + + def set_weights(self, weight_dict): + assert len(self.fc_hidden) == 0 # only works for one layer networks. + to_param = lambda x: nn.Parameter(torch.Tensor(x)) + self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1]) + self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1]) + self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1]) + self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1]) + + self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1]) + self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1]) + self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1]) + self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1]) + + +class HshoeRegressionNet(HshoeBNN, ABC): + """""" + Horseshoe net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods. + """""" + def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False): + super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers, + hshoe_scale=hshoe_scale, + use_reg_hshoe=use_reg_hshoe) + self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.) + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer.loss(y_pred=out, y_true=y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik + return neg_elbo + + def mse(self, x, y): + """""" + scaled rmse (scaled by 1 / std_y**2) + """""" + E_noise_precision = 1. / self.noise_layer.get_noise_var() + return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum() + + def get_noise_var(self): + return self.noise_layer.get_noise_var() + + +class HshoeClassificationNet(HshoeBNN, ABC): + """""" + Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification. + """""" + def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', + num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False): + super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim, + num_nodes=num_nodes, activation_type=activation_type, + num_layers=num_layers, + hshoe_scale=hshoe_scale, + use_reg_hshoe=use_reg_hshoe) + self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum') + + def likelihood(self, x=None, y=None): + out = self.forward(x) + return -self.noise_layer(out, y) + + def neg_elbo(self, num_batches, x=None, y=None): + # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. + Elik = self.likelihood(x, y) + neg_elbo = (self.kl_divergence_w()) / num_batches - Elik + return neg_elbo + + + + + import abc +import sys + +# Ensure compatibility with Python 2/3 +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta(str('ABC'), (), {}) + + +class AbstractNoiseModel(ABC): + """""" Abstract class. All noise models inherit from here. + """""" + + def __init__(self, *argv, **kwargs): + """""" Initialize an AbstractNoiseModel object. + """""" + + @abc.abstractmethod + def loss(self, *argv, **kwargs): + """""" Compute loss given predictions and groundtruth labels + """""" + raise NotImplementedError + + @abc.abstractmethod + def get_noise_var(self, *argv, **kwargs): + """""" + Return the current estimate of noise variance + """""" + raise NotImplementedError + import math + +import numpy as np +import torch +from scipy.special import gammaln +from uq360.models.noise_models.noisemodel import AbstractNoiseModel +from torch.nn import Parameter + +td = torch.distributions + + +def transform(a): + return torch.log(1 + torch.exp(a)) + + +class GaussianNoise(torch.nn.Module, AbstractNoiseModel): + """""" + N(y_true | f_\\mu(x, w), f_\\sigma^2(x, w)) + """""" + + def __init__(self, cuda=False): + super(GaussianNoise, self).__init__() + self.cuda = cuda + self.const = torch.log(torch.FloatTensor([2 * math.pi])) + + def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True): + """""" + computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred)) + :param y_true: + :param mu_pred: + :param log_var_pred: + + :return: + """""" + var_pred = transform(log_var_pred) + ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2) + if reduce_mean: + return -ll.mean(dim=0) + else: + return -ll.sum(dim=0) + + def get_noise_var(self, log_var_pred): + return transform(log_var_pred) + + + import math + +import numpy as np +import torch +from scipy.special import gammaln +from uq360.models.noise_models.noisemodel import AbstractNoiseModel +from torch.nn import Parameter + +td = torch.distributions + + +def transform(a): + return torch.log(1 + torch.exp(a)) + + +class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel): + """""" + N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b). + Uses a variational approximation; q(lambda) = Gamma(ahat, bhat) + """""" + + def __init__(self, a0=6, b0=6, cuda=False): + super(GaussianNoiseGammaPrecision, self).__init__() + self.cuda = cuda + self.a0 = a0 + self.b0 = b0 + self.const = torch.log(torch.FloatTensor([2 * math.pi])) + # variational parameters + self.ahat = Parameter(torch.FloatTensor([10.])) + self.bhat = Parameter(torch.FloatTensor([3.])) + + def loss(self, y_pred=None, y_true=None): + """""" + computes -1 * E_q(\\lambda)[ln N (y_pred | y_true, \\lambda^-1)], where q(lambda) = Gamma(ahat, bhat) + :param y_pred: + :param y_true: + :return: + """""" + n = y_pred.shape[0] + ahat = transform(self.ahat) + bhat = transform(self.bhat) + return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \\ + - 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum()) + + def kl(self): + ahat = transform(self.ahat) + bhat = transform(self.bhat) + return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \\ + self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat + + def get_noise_var(self): + ahat = transform(self.ahat) + bhat = transform(self.bhat) + return (bhat / ahat).data.numpy()[0] + + +class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel): + """""" + N(y_true | f(x, w), sigma_y**2); known sigma_y + """""" + + def __init__(self, std_y=1., cuda=False): + super(GaussianNoiseFixedPrecision, self).__init__() + self.cuda = cuda + self.const = torch.log(torch.FloatTensor([2 * math.pi])) + self.sigma_y = std_y + + def loss(self, y_pred=None, y_true=None): + """""" + computes -1 * ln N (y_pred | y_true, sigma_y**2) + :param y_pred: + :param y_true: + :return: + """""" + ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2) + return -ll.sum(dim=0) + + def get_noise_var(self): + return self.sigma_y ** 2 import pandas as pd + +tab = ' ' +VALID_AGGREGATION_METHODS = ['mean','sum'] +VALID_GRANULARITY_UNITS = ['second','minute','hour','day','week','month','year'] +VALID_INTERPOLATE_KWARGS = {'linear':{},'spline':{'order':5},'timebased':{}} +VALID_INTERPOLATE_METHODS = list( VALID_INTERPOLATE_KWARGS.keys()) + +def get_one_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v," +"str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_boolean(value): + if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): + return True + else: + return False + +def get_source_delta( data: pd.DataFrame): + MAX_SAMPLE_TRY = 20 + if len( data) <= 1: + return None + time_delta = data.index[-1] - data.index[-2] + count = {} + for i in range(len(data)): + if i == MAX_SAMPLE_TRY or i == data.index[-1]: + break + delta = data.index[i+1] - data.index[i] + if delta not in count.keys(): + count[delta] = 1 + else: + count[delta] += 1 + if count: + return max(count, key=count.get) + else: + return None + + +class timeSeries(): + + def __init__( self, config, datetime, log=None): + self.datetime = datetime + self.validate_config(config) + self.log = log + + def validate_config( self, config): + + if not self.datetime or self.datetime.lower() == 'na': + raise ValueError('date time feature is not provided') + self.config = {} + method = get_one_true_option(config.get('interpolation',None)) + self.config['interpolate'] = {} + self.config['interpolate']['enabled'] = method in VALID_INTERPOLATE_METHODS + self.config['interpolate']['method'] = method + self.config['rolling'] = {} + self.config['rolling']['enabled'] = get_boolean( config.get('rollingWindow',False)) + self.config['rolling']['size'] = int( config.get('rollingWindowSize',1)) + if self.config['rolling']['size'] < 1: + raise ValueError('Rolling window size should be greater than 0.') + self.config['aggregation'] = {} + aggregation = config.get('aggregation',{}) + agg_method = get_one_true_option(aggregation['type']) + self.config['aggregation'] = {} + self.config['aggregation']['enabled'] = agg_method in VALID_AGGREGATION_METHODS + self.config['aggregation']['method'] = agg_method + granularity = aggregation.get('granularity',{}) + granularity_unit = get_one_true_option( granularity.get('unit',None)) + if granularity_unit in VALID_GRANULARITY_UNITS: + granularity_mapping = {'second':'S','minute':'Min','hour':'H','day':'D','week':'W','month':'M','year':'Y'} + size = int(granularity.get('size',10)) + granularity_unit = granularity_mapping.get(granularity_unit,granularity_unit) + self.config['aggregation']['granularity'] = {} + self.config['aggregation']['granularity']['unit'] = granularity_unit + self.config['aggregation']['granularity']['size'] = size + + def log_info(self, msg, type='info'): + if self.log: + if type == 'error': + self.log.error( msg) + else: + self.log.info( msg) + else: + print( msg) + + def is_down_sampling(self, data, size, granularity_unit): + down_sampling = False + if granularity_unit in ['M', 'Y']: + return True + else: + target_delta = pd.Timedelta(size , granularity_unit) + source_delta = get_source_delta(data) + if not source_delta: + raise ValueError('Could not find the data frame time frequency') + return source_delta < target_delta + + def run( self, data): + if self.datetime not in data.columns: + raise ValueError(f""Date time feature '{self.datetime}' is not present in data"") + try: + # data[self.datetime] = pd.to_datetime( data[self.datetime]) + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + data[self.datetime] = pd.to_datetime( data[self.datetime]) + except: + #for utc timestamp + data[self.datetime] = pd.to_datetime( data[self.datetime],utc=True) + data.set_index( self.datetime, inplace=True) + except: + raise ValueError(f""can not convert '{self.datetime}' to dateTime"") + if self.config.get('interpolate',{}).get('enabled',False): + method = self.config['interpolate']['method'] + self.log_info(f""Applying Interpolation using {method}"") + methods_mapping = {'timebased': 'time'} + self.config['interpolate']['mapped_method'] = methods_mapping.get(method, method) + data.interpolate(method=self.config['interpolate']['mapped_method'], inplace=True, **VALID_INTERPOLATE_KWARGS[method]) + if self.config.get('rolling',{}).get('enabled',False): + if self.config['rolling']['size'] > len( data): + raise ValueError('Rolling window size is greater than dataset size') + self.log_info(f""Applying rolling window( moving avg) with size {self.config['rolling']['size']}"") + data = data.rolling( self.config['rolling']['size']).mean() + data = data.iloc[self.config['rolling']['size'] - 1:] + aggregation = self.config.get('aggregation',{}) + if aggregation.get('enabled',False): + method = aggregation.get('method','mean') + self.rule = str(aggregation['granularity']['size']) + aggregation['granularity']['unit'] + if self.is_down_sampling(data, aggregation['granularity']['size'], aggregation['granularity']['unit']): + self.log_info(f""Applying down sampling( {self.rule})"") + if method == 'mean': + data = data.resample( self.rule).mean() + elif method == 'sum': + data = data.resample( self.rule).sum() + else: + self.log_info(f""Applying up sampling using forward fill method( {self.rule})"") + data = data.resample( self.rule).ffill() + data.reset_index( inplace=True, names=self.datetime) + return data + + def get_code(self, indent=0): + tab = ' ' + code = '' + code += f"""""" + +def preprocess( data): + try: + #for non utc timestamp + data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}']) + except: + data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'],utc=True) + data.set_index( '{self.datetime}', inplace=True) +"""""" + if self.config.get('interpolate',{}).get('enabled',False): + code += tab + f""data.interpolate(method='{self.config['interpolate']['mapped_method']}', inplace=True, **{VALID_INTERPOLATE_KWARGS[self.config['interpolate']['method']]})\\n"" + if self.config.get('rolling',{}).get('enabled',False): + code += tab + f""data = data.rolling( {self.config['rolling']['size']}).mean().iloc[{self.config['rolling']['size'] - 1}:]\\n"" + if self.config.get('aggregation',{}).get('enabled',False): + code += tab + f""data = data.resample( '{self.rule}').{self.config.get('aggregation',{}).get('method','mean')}()\\n"" + code += tab + f""data.reset_index( inplace=True, names='{self.datetime}')\\n"" + code += tab + ""return data\\n"" + return code + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import pandas as pd +import sys +import os +import warnings +import logging +from pathlib import Path +import random +from sklearn.model_selection import train_test_split +import operator +import re +import pdfplumber + +class dataReader(): + + def __init__(self): + self.dataDf =None + self.log = logging.getLogger('eion') + + + def readCsv(self,dataPath,featureList,targetColumn): + data=pd.read_csv(dataPath) + dataDf=data[featureList] + predictDf=data[targetColumn] + return dataDf,predictDf + + def rowsfilter(self,filters,dataframe): + self.log.info('\\n-------> No of rows before filtering: '+str(dataframe.shape[0])) #task-13479 + filterexpression='' + firstexpressiondone = False + for x in filters: + if firstexpressiondone: + filterexpression += ' ' + if x['combineOperator'].lower() == 'and': + filterexpression += '&' + elif x['combineOperator'].lower() == 'or': + filterexpression += '|' + filterexpression += ' ' + firstexpressiondone = True + filterexpression += x['feature'] + filterexpression += ' ' + if x['condition'].lower() == 'equals': + filterexpression += '==' + elif x['condition'].lower() == 'notequals': + filterexpression += '!=' + elif x['condition'].lower() == 'lessthan': + filterexpression += '<' + elif x['condition'].lower() == 'lessthanequalto': + filterexpression += '<=' + elif x['condition'].lower() == 'greaterthan': + filterexpression += '>' + elif x['condition'].lower() == 'greaterthanequalto': + filterexpression += '>=' + filterexpression += ' ' + if dataframe[x['feature']].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + filterexpression += x['value'] + else: + filterexpression += '\\''+x['value']+'\\'' + dataframe = dataframe.query(filterexpression) + self.log.info('-------> Row filter: '+str(filterexpression)) #task-13479 + self.log.info('-------> No of rows after filtering: '+str(dataframe.shape[0])) + return dataframe,filterexpression + + def grouping(self,grouper,dataframe): + grouperbyjson= {} + groupbyfeatures = grouper['groupby'] + dataframe = dataframe.reset_index() + features = dataframe.columns.tolist() + aggjson = {} + for feature, featureType in zip(features,dataframe.dtypes): + if feature == groupbyfeatures or feature == 'index': + continue + if dataframe[feature].empty == True: + continue + if dataframe[feature].isnull().all() == True: + continue + if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + temp = {} + temp[feature+'_size'] = 'size' + temp[feature+'_sum'] = 'sum' + temp[feature+'_max'] = 'max' + temp[feature+'_min'] = 'min' + temp[feature+'_mean'] = 'mean' + aggjson[feature] = temp + else: + temp = {} + temp[feature+'_size'] = 'size' + temp[feature+'_unique'] = 'nunique' + aggjson[feature] = temp + groupbystring = 'groupby([\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')' + grouperbyjson['groupbystring'] = groupbystring + dataframe = dataframe.groupby([groupbyfeatures]).agg(aggjson) + dataframe.columns = dataframe.columns.droplevel(0) + dataframe = dataframe.reset_index() + ''' + if operation.lower() == 'size': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size() + elif operation.lower() == 'mean': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean() + elif operation.lower() == 'max': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max() + elif operation.lower() == 'min': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min() + + dataframe = dataframe.rename(""groupby_value"") + dataframe = dataframe.to_frame() + dataframe = dataframe.reset_index() + ''' + return dataframe,grouperbyjson + + + def timeGrouping(self,timegrouper,dataframe): + grouperbyjson= {} + dateTime = timegrouper['dateTime'] + frequency = timegrouper['freq'] + groupbyfeatures = timegrouper['groupby'] + grouperbyjson['datetime'] = dateTime + if dataframe[dateTime].dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + dtlenth = dataframe[dateTime].iloc[0] + dtlenth = np.int64(dtlenth) + dtlenth = len(str(dtlenth)) + if dtlenth == 13: + dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='ms') + grouperbyjson['unit'] = 'ms' + elif dtlenth == 10: + dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='s') + grouperbyjson['unit'] = 's' + else: + dataframe['date'] = pd.to_datetime(dataframe[dateTime]) + grouperbyjson['unit'] = '' + else: + dataframe['date'] = pd.to_datetime(dataframe[dateTime]) + grouperbyjson['unit'] = '' + dataframe = dataframe.reset_index() + dataframe.set_index('date',inplace=True) + features = dataframe.columns.tolist() + aggjson = {} + for feature, featureType in zip(features,dataframe.dtypes): + if feature == groupbyfeatures" +"or feature == dateTime or feature == 'index': + continue + if dataframe[feature].empty == True: + continue + if dataframe[feature].isnull().all() == True: + continue + if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + temp = {'size','sum','max','min','mean'} + aggjson[feature] = temp + else: + temp = {'size','nunique'} + aggjson[feature] = temp + + if groupbyfeatures == '': + groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\')]).agg('+str(aggjson)+')' + else: + groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\'),\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')' + + grouperbyjson['groupbystring'] = groupbystring + print(grouperbyjson) + if groupbyfeatures == '': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency)]).agg(aggjson) + else: + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).agg(aggjson) + + dataframe.columns = ['_'.join(col) for col in dataframe.columns] + dataframe = dataframe.reset_index() + self.log.info(dataframe.head(10)) + ''' + if operation.lower() == 'size': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size() + elif operation.lower() == 'mean': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean() + elif operation.lower() == 'max': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max() + elif operation.lower() == 'min': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min() + + dataframe = dataframe.rename(""groupby_value"") + dataframe = dataframe.to_frame() + dataframe = dataframe.reset_index() + ''' + return dataframe,grouperbyjson + + + def readDf(self,dataF,featureList,targetColumn): + dataDf = dataF[featureList] + predictDf =dataF[targetColumn] + return dataDf,predictDf + + def csvTodf(self,dataPath,delimiter,textqualifier): + ''' + if os.path.splitext(dataPath)[1] == "".tsv"": + dataFrame=pd.read_csv(dataPath,encoding='latin1',sep='\\t') + else: + dataFrame=pd.read_csv(dataPath,encoding='latin1') + ''' + if os.path.splitext(dataPath)[1] == "".py"": + f = open(dataPath, ""r"") + pythoncode = f.read() + f.close() + ldict = {} + exec(pythoncode, globals(), ldict) + dataFrame = ldict['dfpy'] + else: + dataFrame=pd.read_csv(dataPath,encoding='utf-8',sep=delimiter,quotechar=textqualifier, skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') + dataFrame.rename(columns=lambda x: x.strip(), inplace=True) + return dataFrame + + def read_file(self, fileName): + fileName = Path(fileName) + if fileName.suffix == '.pdf': + pdf = pdfplumber.open(fileName) + text = '' + for index, page in enumerate(pdf.pages): + if index: + text += ' ' + text += page.extract_text() + else: + with open(fileName, ""r"",encoding=""utf-8"") as f: + text = f.read() + return text + + def documentsTodf(self,folderlocation,labelFilePath): + dataDf = pd.DataFrame() + error_message = """" + dataset_csv_file = os.path.join(folderlocation,labelFilePath) + labels = pd.read_csv(dataset_csv_file) + dataDict = {} + keys = [""File"",""Label""] + for key in keys: + dataDict[key] = [] + for i in range(len(labels)): + filename = os.path.join(folderlocation,labels.loc[i,""File""]) + dataDict[""File""].append(self.read_file(filename)) + dataDict[""Label""].append(labels.loc[i,""Label""]) + dataDf = pd.DataFrame.from_dict(dataDict) + error_message = """" + return dataDf, error_message + + def removeFeatures(self,df,datetimeFeature,indexFeature,modelFeatures,targetFeature): + self.log.info(""\\n---------- Prepare Features ----------"") + if(str(datetimeFeature).lower() != 'na'): + datetimeFeature = datetimeFeature.split("","") + datetimeFeature = list(map(str.strip, datetimeFeature)) + for dtfeature in datetimeFeature: + if dtfeature in df.columns: + self.log.info(""-------> Remove Date Time Feature: ""+dtfeature) + df = df.drop(columns=dtfeature) + if(str(indexFeature).lower() != 'na'): + indexFeature = indexFeature.split("","") + indexFeature = list(map(str.strip, indexFeature)) + for ifeature in indexFeature: + if ifeature in df.columns: + self.log.info(""-------> Remove Index Feature: ""+ifeature) + df = df.drop(columns=ifeature) + if(str(modelFeatures).lower() != 'na'): + self.log.info(""-------> Model Features: ""+str(modelFeatures)) + modelFeatures = modelFeatures.split("","") + modelFeatures = list(map(str.strip, modelFeatures)) + if(targetFeature != '' and str(targetFeature).lower() != 'na'): + targetFeature = targetFeature.split("","") + targetFeature = list(map(str.strip, targetFeature)) + for ifeature in targetFeature: + if ifeature not in modelFeatures: + modelFeatures.append(ifeature) + if(str(indexFeature).lower() != 'na'): + for ifeature in indexFeature: + if ifeature in modelFeatures: + modelFeatures.remove(ifeature) + if(str(datetimeFeature).lower() != 'na'): + for dtfeature in datetimeFeature: + if dtfeature in modelFeatures: + modelFeatures.remove(dtfeature) + df = df[modelFeatures] + self.log.info(""---------- Prepare Features End ----------"") + return(df) + + def splitImageDataset(self, df, ratio, modelType): + if modelType.lower() == ""objectdetection"": + images = df['File'].unique().tolist() + trainImages = random.sample(images, int(len(images) * ratio)) + mask = [0] * len(df) + for i in range(len(df)): + mask[i] = df.iloc[i]['File'] in trainImages + trainDf = df.iloc[mask] + testDf = df.iloc[[not elem for elem in mask]] + return trainDf, testDf + else: + return train_test_split(df, test_size=(1 - ratio)) + def createTFRecord(self, train_image_dir, output_dir, csv_file, testPercentage, AugEnabled,keepAugImages,operations, modelType,augConf={}): + from transformations import generate_tfrecord + from transformations.imageAug import ImageAugmentation + if isinstance(csv_file, pd.DataFrame): + df = csv_file + else: + df = pd.read_csv(os.path.join(train_image_dir,csv_file)) + labelmap_path, num_classes = generate_tfrecord.createLabelFile(df, output_dir) + train_df, test_df = self.splitImageDataset(df, testPercentage/100.0, modelType) + if AugEnabled: + augFile = os.path.join(output_dir,""tempTrainDf.csv"") + train_df.to_csv(augFile) + ia = ImageAugmentation(train_image_dir, augFile) + augFile = ia.augment(modelType, operations,None,augConf) + train_df = pd.read_csv(augFile) + generate_tfrecord.generate_TF_record(train_image_dir, output_dir, train_df, test_df, labelmap_path) + if AugEnabled and not keepAugImages: + ia.removeAugmentedImages(train_df) + return train_df, num_classes + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#System imports +import os +import sys +import json +import datetime,time,timeit +import itertools + + +import numpy as np +import pandas as pd +import math +from sklearn.preprocessing import MinMaxScaler,StandardScaler +from sklearn.preprocessing import PowerTransformer +import logging +class dataTransformer(): + + def __init__(self): + self.log = logging.getLogger('eion') + + def startTransformer(self,df,features,target,transType): + scaler ='None' + if target in features: + features.remove(target) + + transFeatures=features + transDfColumns=[] + dataframe=df[transFeatures] + #targetArray=np.array(df[target]) + #targetArray.shape = (len(targetArray), 1) + self.log.info(""Data Normalization has started"") + if transType.lower() =='standardscaler': + scaler = StandardScaler().fit(dataframe) + transDf = scaler.transform(dataframe) + + elif transType.lower() =='minmax': + scaler=MinMaxScaler().fit(dataframe) + transDf = scaler.transform(dataframe) + elif transType.lower() =='lognormal': + print(dataframe) + scaler = PowerTransformer(method='yeo-johnson', standardize=False).fit(dataframe) + transDf = scaler.transform(dataframe) + else: + self.log.info(""Need to implement"") + #features.append(target) + + #scaledDf = pd.DataFrame(np.hstack((transDf, targetArray)),columns=features) + return transDf,features,scaler ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import io +import json +import logging +import pandas as pd +import sys +import numpy as np +from pathlib import Path +from word2number import w2n +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import OrdinalEncoder +from sklearn.preprocessing import OneHotEncoder +from sklearn.impute import SimpleImputer, KNNImputer +from sklearn.pipeline import Pipeline, FeatureUnion +from sklearn.preprocessing import FunctionTransformer +from sklearn.preprocessing import MinMaxScaler,StandardScaler +from sklearn.preprocessing import PowerTransformer +from sklearn.compose import ColumnTransformer +from sklearn.base import TransformerMixin +from sklearn.ensemble import IsolationForest +from category_encoders import TargetEncoder +try: + import transformations.data_profiler_functions as cs +except: + import data_profiler_functions as cs + +if 'AION' in sys.modules: + try: + from appbe.app_config import DEBUG_ENABLED + except: + DEBUG_ENABLED = False +else: + DEBUG_ENABLED = False +log_suffix = f'[{Path(__file__).stem}] ' + + +class profiler(): + + def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None): + if not isinstance(xtrain, pd.DataFrame): + raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type') + if xtrain.empty: + raise ValueError(f'{log_suffix}Data frame is empty') + if target and target in xtrain.columns: + self.target = xtrain[target] + xtrain.drop(target, axis=1, inplace=True) + self.target_name = target + elif ytrain: + self.target = ytrain + self.target_name = 'target' + else: + self.target = pd.Series() + self.target_name = None + self.data_path = data_path + self.encode_target = encode_target + self.label_encoder = None + self.data = xtrain + self.keep_unprocessed = keep_unprocessed + self.colm_type = {} + for colm, infer_type in zip(self.data.columns, self.data.dtypes): + self.colm_type[colm] = infer_type + self.numeric_feature = [] + self.cat_feature = [] + self.text_feature = [] + self.wordToNumericFeatures = [] + self.added_features = [] + self.pipeline = [] + self.dropped_features =" +"{} + self.train_features_type={} + self.__update_type() + self.config = config + self.featureDict = config.get('featureDict', []) + self.output_columns = [] + self.feature_expender = [] + self.text_to_num = {} + self.force_numeric_conv = [] + if log: + self.log = log + else: + self.log = logging.getLogger('eion') + self.type_conversion = {} + self.log_input_feat_info() + + def log_input_feat_info(self): + if self.featureDict: + feature_df = pd.DataFrame(self.featureDict) + log_text = '\\nPreprocessing options:' + log_text += '\\n\\t'+str(feature_df.head( len(self.featureDict))).replace('\\n','\\n\\t') + self.log.info(log_text) + + def log_dataframe(self, msg=None): + buffer = io.StringIO() + self.data.info(buf=buffer) + if msg: + log_text = f'Data frame after {msg}:' + else: + log_text = 'Data frame:' + log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t') + log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t')) + self.log.info(log_text) + + def transform(self): + if self.is_target_available(): + if self.target_name: + self.log.info(f""Target feature name: '{self.target_name}'"") + self.log.info(f""Target feature size: {len(self.target)}"") + else: + self.log.info(f""Target feature not present"") + self.log_dataframe() + print(self.data.info()) + try: + self.process() + except Exception as e: + self.log.error(e, exc_info=True) + raise + pipe = FeatureUnion(self.pipeline) + try: + if self.text_feature: + from text.textProfiler import set_pretrained_model + set_pretrained_model(pipe) + conversion_method = self.get_conversion_method() + process_data = pipe.fit_transform(self.data, y=self.target) + # save for testing + if DEBUG_ENABLED: + if not isinstance(process_data, np.ndarray): + process_data = process_data.toarray() + df = pd.DataFrame(process_data) + df.to_csv('debug_preprocessed.csv', index=False) + if self.text_feature and conversion_method == 'latentsemanticanalysis': + n_size = self.get_tf_idf_output_size( pipe) + dimensions = self.get_tf_idf_dimensions() + if n_size < dimensions or n_size > dimensions: + dimensions = n_size + from sklearn.decomposition import TruncatedSVD + reducer = TruncatedSVD( n_components = dimensions) + reduced_data = reducer.fit_transform( process_data[:,-n_size:]) + text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process') + pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer)) + if not isinstance(process_data, np.ndarray): + process_data = process_data.toarray() + process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1) + last_step = self.feature_expender.pop() + self.feature_expender.append({'feature_reducer':list(last_step.values())[0]}) + + except EOFError as e: + if ""Compressed file ended before the end-of-stream marker was reached"" in str(e): + raise EOFError('Pretrained model is not downloaded properly') + + self.update_output_features_names(pipe) + if not isinstance(process_data, np.ndarray): + process_data = process_data.toarray() + df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns) + + if self.is_target_available() and self.target_name: + df[self.target_name] = self.target + if self.keep_unprocessed: + df[self.keep_unprocessed] = self.data[self.keep_unprocessed] + self.log_numerical_fill() + self.log_categorical_fill() + self.log_normalization() + return df, pipe, self.label_encoder + + def log_type_conversion(self): + if self.log: + self.log.info('----------- Inspecting Features -----------') + self.log.info('----------- Type Conversion -----------') + count = 0 + for k, v in self.type_conversion.items(): + if v[0] != v[1]: + self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}') + self.log.info('Status:- |... Feature inspection done') + + def check_config(self): + removeDuplicate = self.config.get('removeDuplicate', False) + self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate) + self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio'])) + self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio'])) + self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel'])) + featureDict = self.config.get('featureDict', []) + if isinstance(featureDict, dict): + self.config['featureDict'] = [] + if isinstance(featureDict, str): + self.config['featureDict'] = [] + + def process(self): + #remove duplicate not required at the time of prediction + self.check_config() + self.remove_constant_feature() + self.remove_empty_feature(self.config['misValueRatio']) + self.remove_index_features() + self.dropna() + if self.config['removeDuplicate']: + self.drop_duplicate() + #self.check_categorical_features() + #self.string_to_numeric() + self.process_target() + self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)} + self.parse_process_step_config() + self.process_drop_fillna() + self.log_type_conversion() + self.update_num_fill_dict() + if DEBUG_ENABLED: + print(self.num_fill_method_dict) + self.update_cat_fill_dict() + self.create_pipeline() + self.text_pipeline(self.config) + self.apply_outlier() + if DEBUG_ENABLED: + self.log.info(self.process_method) + self.log.info(self.pipeline) + + def is_target_available(self): + return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target) + + def process_target(self, operation='encode', arg=None): + if self.is_target_available(): + # drop null values + self.__update_index( self.target.notna(), 'target') + if self.encode_target: + self.label_encoder = LabelEncoder() + self.target = self.label_encoder.fit_transform(self.target) + return self.label_encoder + return None + + def is_target_column(self, column): + return column == self.target_name + + def fill_default_steps(self): + + num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{})) + normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none') + for colm in self.numeric_feature: + if num_fill_method: + self.fill_missing_value_method(colm, num_fill_method.lower()) + if normalization_method: + self.fill_normalizer_method(colm, normalization_method.lower()) + + cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{})) + cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{})) + for colm in self.cat_feature: + if cat_fill_method: + self.fill_missing_value_method(colm, cat_fill_method.lower()) + if cat_encode_method: + self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True) + + def parse_process_step_config(self): + self.process_method = {} + user_provided_data_type = {} + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + user_provided_data_type[colm] = feat_conf['type'] + if user_provided_data_type: + self.update_user_provided_type(user_provided_data_type) + + self.fill_default_steps() + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + if feat_conf.get('fillMethod', None): + self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower()) + if feat_conf.get('categoryEncoding', None): + self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower()) + if feat_conf.get('normalization', None): + self.fill_normalizer_method(colm, feat_conf['normalization'].lower()) + if feat_conf.get('outlier', None): + self.fill_outlier_method(colm, feat_conf['outlier'].lower()) + if feat_conf.get('outlierOperation', None): + self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower()) + + + def get_tf_idf_dimensions(self): + dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default') + return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim] + + def get_tf_idf_output_size(self, pipe): + start_index = {} + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} + if start_index: + for key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + return len(v) + return 0 + + def update_output_features_names(self, pipe): + columns = self.output_columns + start_index = {} + index_shifter = 0 + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + for key,value in start_index.items(): + for k,v in value.items(): + index_shifter += len(v) + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} + #print(start_index) + if start_index: + for key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + v = [f'{x}_vect' for x in v] + self.output_columns[key:key] = v + self.added_features = [*self.added_features, *v] + + + def text_pipeline(self, conf_json): + + if self.text_feature: + from text.textProfiler import textProfiler + from text.textProfiler import textCombine + pipeList = [] + text_pipe = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", self.text_feature) + ], remainder=""drop"")), + (""text_fillNa"",SimpleImputer(strategy='constant', fill_value='')), + (""merge_text_feature"", textCombine())]) + obj = textProfiler() + pipeList = obj.cleaner(conf_json, pipeList, self.data_path) + pipeList = obj.embedding(conf_json, pipeList) + last_step = ""merge_text_feature"" + for pipe_elem in pipeList: + text_pipe.steps.append((pipe_elem[0], pipe_elem[1])) + last_step = pipe_elem[0] + text_transformer = ('text_process', text_pipe) + self.pipeline.append(text_transformer) + self.feature_expender.append({last_step:len(self.output_columns)}) + + def create_pipeline(self): + num_pipe = {} + for k,v in self.num_fill_method_dict.items(): + for k1,v1 in v.items(): + if k1 and k1 != 'none': + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)), + (k1, self.get_num_scaler(k1)) + ]) + else: + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)) + ]) + self.output_columns.extend(v1) + cat_pipe = {} + for k,v in self.cat_fill_method_dict.items(): + for k1,v1 in v.items(): + cat_pipe[f'{k}_{" +"k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_cat_imputer(k)), + (k1, self.get_cat_encoder(k1)) + ]) + if k1 not in ['onehotencoding']: + self.output_columns.extend(v1) + else: + self.feature_expender.append({k1:len(self.output_columns)}) + for key, pipe in num_pipe.items(): + self.pipeline.append((key, pipe)) + for key, pipe in cat_pipe.items(): + self.pipeline.append((key, pipe)) + + ""Drop: feature during training but replace with zero during prediction "" + def process_drop_fillna(self): + drop_column = [] + if 'numFill' in self.process_method.keys(): + for col, method in self.process_method['numFill'].items(): + if method == 'drop': + self.process_method['numFill'][col] = 'zero' + drop_column.append(col) + if 'catFill' in self.process_method.keys(): + for col, method in self.process_method['catFill'].items(): + if method == 'drop': + self.process_method['catFill'][col] = 'zero' + drop_column.append(col) + if drop_column: + self.data.dropna(subset=drop_column, inplace=True) + + def update_num_fill_dict(self): + self.num_fill_method_dict = {} + if 'numFill' in self.process_method.keys(): + for f in cs.supported_method['fillNa']['numeric']: + self.num_fill_method_dict[f] = {} + for en in cs.supported_method['normalization']: + self.num_fill_method_dict[f][en] = [] + for col in self.numeric_feature: + numFillDict = self.process_method.get('numFill',{}) + normalizationDict = self.process_method.get('normalization',{}) + if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''): + self.num_fill_method_dict[f][en].append(col) + if not self.num_fill_method_dict[f][en] : + del self.num_fill_method_dict[f][en] + if not self.num_fill_method_dict[f]: + del self.num_fill_method_dict[f] + + def update_cat_fill_dict(self): + self.cat_fill_method_dict = {} + if 'catFill' in self.process_method.keys(): + for f in cs.supported_method['fillNa']['categorical']: + self.cat_fill_method_dict[f] = {} + for en in cs.supported_method['categoryEncoding']: + self.cat_fill_method_dict[f][en] = [] + for col in self.cat_feature: + catFillDict = self.process_method.get('catFill',{}) + catEncoderDict = self.process_method.get('catEncoder',{}) + if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''): + self.cat_fill_method_dict[f][en].append(col) + if not self.cat_fill_method_dict[f][en] : + del self.cat_fill_method_dict[f][en] + if not self.cat_fill_method_dict[f]: + del self.cat_fill_method_dict[f] + + + def __update_type(self): + self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed)) + self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed)) + self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed)) + self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed)) + + def update_user_provided_type(self, data_types): + allowed_types = ['numerical','categorical', 'text'] + skipped_types = ['date','index'] + type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),} + mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types} + skipped_features = [k for k,v in data_types.items() if v in skipped_types] + if skipped_features: + self.keep_unprocessed.extend( skipped_features) + self.keep_unprocessed = list(set(self.keep_unprocessed)) + self.update_type(mapped_type, 'user provided data type') + + def get_type(self, as_list=False): + if as_list: + return [self.colm_type.values()] + else: + return self.colm_type + + def update_type(self, data_types={}, reason=''): + invalid_features = [x for x in data_types.keys() if x not in self.data.columns] + if invalid_features: + valid_feat = list(set(data_types.keys()) - set(invalid_features)) + valid_feat_type = {k:v for k,v in data_types if k in valid_feat} + else: + valid_feat_type = data_types + for k,v in valid_feat_type.items(): + if v != self.colm_type[k].name: + try: + self.data.astype({k:v}) + self.colm_type.update({k:self.data[k].dtype}) + self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) + except: + self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason) + if v == np.dtype('float64') and self.colm_type[k].name == 'object': + if self.check_numeric( k): + self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce') + self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) + self.force_numeric_conv.append( k) + else: + raise ValueError(f""Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%"") + self.data = self.data.astype(valid_feat_type) + self.__update_type() + + def check_numeric(self, feature): + col_values = self.data[feature].copy() + col_values = pd.to_numeric(col_values, errors='coerce') + if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): + return True + return False + + def string_to_numeric(self): + def to_number(x): + try: + return w2n.word_to_num(x) + except: + return np.nan + for col in self.text_feature: + col_values = self.data[col].copy() + col_values = pd.to_numeric(col_values, errors='coerce') + if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): + self.text_to_num[col] = 'float64' + self.wordToNumericFeatures.append(col) + if self.text_to_num: + columns = list(self.text_to_num.keys()) + self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast') + self.update_type(self.text_to_num) + self.log.info('----------- Inspecting Features -----------') + for col in self.text_feature: + self.log.info(f'-------> Feature : {col}') + if col in self.text_to_num: + self.log.info('----------> Numeric Status :Yes') + self.log.info('----------> Data Type Converting to numeric :Yes') + else: + self.log.info('----------> Numeric Status :No') + self.log.info(f'\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric') + self.log.info(f'\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}') + self.log.info('----------- Inspecting Features End -----------') + + def check_categorical_features(self): + num_data = self.data.select_dtypes(include='number') + num_data_unique = num_data.nunique() + num_to_cat_col = {} + for i, value in enumerate(num_data_unique): + if value < self.config['categoryMaxLabel']: + num_to_cat_col[num_data_unique.index[i]] = 'category' + if num_to_cat_col: + self.update_type(num_to_cat_col, 'numerical to categorical') + str_to_cat_col = {} + str_data = self.data.select_dtypes(include='object') + str_data_unique = str_data.nunique() + for i, value in enumerate(str_data_unique): + if value < self.config['categoryMaxLabel']: + str_to_cat_col[str_data_unique.index[i]] = 'category' + for colm in str_data.columns: + if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']: + str_to_cat_col[colm] = 'category' + if str_to_cat_col: + self.update_type(str_to_cat_col, 'text to categorical') + + def drop_features(self, features=[], reason='unspecified'): + if isinstance(features, str): + features = [features] + feat_to_remove = [x for x in features if x in self.data.columns] + if feat_to_remove: + self.data.drop(feat_to_remove, axis=1, inplace=True) + for feat in feat_to_remove: + self.dropped_features[feat] = reason + self.log_drop_feature(feat_to_remove, reason) + self.__update_type() + + def __update_index(self, indices, reason=''): + if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data): + if not indices.all(): + self.data = self.data[indices] + if self.is_target_available(): + self.target = self.target[indices] + self.log_update_index((indices == False).sum(), reason) + + def dropna(self): + self.data.dropna(how='all',inplace=True) + if self.is_target_available(): + self.target = self.target[self.data.index] + + def drop_duplicate(self): + index = self.data.duplicated(keep='first') + self.__update_index( ~index, reason='duplicate') + + def log_drop_feature(self, columns, reason): + self.log.info(f'---------- Dropping {reason} features ----------') + self.log.info(f'\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found') + self.log.info(f'-------> Drop Features: {columns}') + self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}') + + def log_update_index(self,count, reason): + if count: + if reason == 'target': + self.log.info('-------> Null Target Rows Drop:') + self.log.info(f'-------> Dropped rows count: {count}') + elif reason == 'duplicate': + self.log.info('-------> Duplicate Rows Drop:') + self.log.info(f'-------> Dropped rows count: {count}') + elif reason == 'outlier': + self.log.info(f'-------> Dropped rows count: {count}') + self.log.info('Status:- |... Outlier treatment done') + self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}') + + def log_normalization(self): + if self.process_method.get('normalization', None): + self.log.info(f'\\nStatus:- !... Normalization treatment done') + for method in cs.supported_method['normalization']: + cols = [] + for col, m in self.process_method['normalization'].items(): + if m == method: + cols.append(col) + if cols and method != 'none': + self.log.info(f'Running {method} on features: {cols}') + + def log_numerical_fill(self): + if self.process_method.get('numFill', None): + self.log.info(f'\\nStatus:- !... Fillna for numeric feature done') + for method in cs.supported_method['fillNa']['numeric']: + cols = [] + for col, m in self.process_method['numFill'].items(): + if m == method: + cols.append(col) + if cols: + self.log.info(f'-------> Running {method} on features: {cols}') + + def log_categorical_fill(self): + if self.process_method.get('catFill', None): + self.log.info(f'\\nStatus:- !... FillNa for categorical feature done') + for method in cs.supported_method['fillNa']['categorical']: + cols = [] + for col, m in self.process_method['catFill'].items(): + if m == method: + cols.append(col) + if cols: + self.log.info(f'-------> Running {method} on features: {cols}') + + + def remove_constant_feature(self): + unique_values = self.data.nunique() + constant_features = [] + for i, value in enumerate(unique_values): + if value == 1: + constant_features.append(unique_values.index[i]) + if constant_features: + self.drop_features(constant_features, ""constant"") + + def remove_empty_feature(self, misval_ratio=1.0): + missing_ratio = self.data.isnull().sum() / len(self.data) + missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)} + empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio] + if empty_features: + self.drop_features(empty_features, ""empty"") + + def remove_index_features(self" +"): + index_feature = [] + + for feat in self.numeric_feature: + if self.data[feat].nunique() == len(self.data): + #if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)): + # index feature can be time based + count = (self.data[feat] - self.data[feat].shift() == 1).sum() + if len(self.data) - count == 1: + index_feature.append(feat) + self.drop_features(index_feature, ""index"") + + def fill_missing_value_method(self, colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['fillNa']['numeric']: + if 'numFill' not in self.process_method.keys(): + self.process_method['numFill'] = {} + if method == 'na' and self.process_method['numFill'].get(colm, None): + pass # don't overwrite + else: + self.process_method['numFill'][colm] = method + if colm in self.cat_feature: + if method in cs.supported_method['fillNa']['categorical']: + if 'catFill' not in self.process_method.keys(): + self.process_method['catFill'] = {} + if method == 'na' and self.process_method['catFill'].get(colm, None): + pass + else: + self.process_method['catFill'][colm] = method + + def check_encoding_method(self, method, colm,default=False): + if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]): + method = cs.target_encoding_method_change[method.lower()] + if default: + self.log.info(f""Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present"") + return method + + def fill_encoder_value_method(self,colm, method, default=False): + if colm in self.cat_feature: + if method.lower() in cs.supported_method['categoryEncoding']: + if 'catEncoder' not in self.process_method.keys(): + self.process_method['catEncoder'] = {} + if method == 'na' and self.process_method['catEncoder'].get(colm, None): + pass + else: + self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default) + else: + self.log.info(f""-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}"") + + def fill_normalizer_method(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['normalization']: + if 'normalization' not in self.process_method.keys(): + self.process_method['normalization'] = {} + if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None): + pass + else: + self.process_method['normalization'][colm] = method + else: + self.log.info(f""-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}"") + + def apply_outlier(self): + inlier_indice = np.array([True] * len(self.data)) + if self.process_method.get('outlier', None): + self.log.info('-------> Feature wise outlier detection:') + for k,v in self.process_method['outlier'].items(): + if k in self.numeric_feature: + if v == 'iqr': + index = cs.findiqrOutlier(self.data[k]) + elif v == 'zscore': + index = cs.findzscoreOutlier(self.data[k]) + elif v == 'disable': + index = None + if k in self.process_method['outlierOperation'].keys(): + if self.process_method['outlierOperation'][k] == 'dropdata': + inlier_indice = np.logical_and(inlier_indice, index) + elif self.process_method['outlierOperation'][k] == 'average': + mean = self.data[k].mean() + index = ~index + self.data.loc[index,[k]] = mean + self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}') + elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable': + self.log.info(f'-------> Total outliers in ""{k}"": {(~index).sum()}') + if self.config.get('outlierDetection',None): + if self.config['outlierDetection'].get('IsolationForest','False') == 'True': + if self.numeric_feature: + index = cs.findiforestOutlier(self.data[self.numeric_feature]) + inlier_indice = np.logical_and(inlier_indice, index) + self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):') + if inlier_indice.sum() != len(self.data): + self.__update_index(inlier_indice, 'outlier') + + def fill_outlier_method(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['outlier_column_wise']: + if 'outlier' not in self.process_method.keys(): + self.process_method['outlier'] = {} + if method not in ['Disable', 'na']: + self.process_method['outlier'][colm] = method + else: + self.log.info(f""-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}"") + + def fill_outlier_process(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['outlierOperation']: + if 'outlierOperation' not in self.process_method.keys(): + self.process_method['outlierOperation'] = {} + self.process_method['outlierOperation'][colm] = method + else: + self.log.info(f""-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}"") + + def get_cat_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_cat_encoder(self,method): + if method == 'labelencoding': + return OrdinalEncoder() + elif method == 'onehotencoding': + return OneHotEncoder(sparse=False,handle_unknown=""ignore"") + elif method == 'targetencoding': + if not self.is_target_available(): + raise ValueError('Can not apply Target Encoding when target feature is not present') + return TargetEncoder() + + def get_num_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'mean': + return SimpleImputer(strategy='mean') + elif method == 'median': + return SimpleImputer(strategy='median') + elif method == 'knnimputer': + return KNNImputer() + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_num_scaler(self,method): + if method == 'minmax': + return MinMaxScaler() + elif method == 'standardscaler': + return StandardScaler() + elif method == 'lognormal': + return PowerTransformer(method='yeo-johnson', standardize=False) + + def recommenderStartProfiler(self,modelFeatures): + return cs.recommenderStartProfiler(self,modelFeatures) + + def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): + return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation) + + def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): + return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2) + + def get_conversion_method(self): + return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower() + +def set_features(features,profiler=None): + return cs.set_features(features,profiler) + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import random +from matplotlib import pyplot as plt +import cv2 +import albumentations as A +import os +import pandas as pd +from pathlib import Path + + +class ImageAugmentation(): + + def __init__(self, dataLocation, csvFile): + self.AugmentationOptions = {""Flip"": {""operation"": A.HorizontalFlip, ""suffix"":""_flip""}, + ""Rotate"": {""operation"": A.Rotate, ""suffix"":""_rotate""}, + ""Shift"": {""operation"": A.RGBShift, ""suffix"":""_shift""}, + ""Crop"": {""operation"": [A.CenterCrop, A.RandomSizedBBoxSafeCrop], ""suffix"":""_crop""}, + ""Contrast"": {""operation"": A.RandomContrast, ""suffix"":""_cont""}, + ""Brightness"": {""operation"": A.RandomBrightness, ""suffix"":""_bright""}, + ""Blur"": {""operation"": A.GaussianBlur, ""suffix"":""_blur""} + } + self.dataLocation = dataLocation + self.csvFile = csvFile + + def __applyAugmentationClass(self, image, augmentation,limit): + if augmentation in list(self.AugmentationOptions.keys()): + if augmentation == ""Crop"": + height, width, _ = image.shape + crop_percentage = random.uniform(0.6, 0.9) + transform = self.AugmentationOptions[augmentation][""operation""][0](height=int(height*crop_percentage), width=int(width*crop_percentage) ) + elif augmentation == ""Blur"": + transform = self.AugmentationOptions[augmentation][""operation""](blur_limit = limit) + elif augmentation in [""Contrast"",""Brightness""]: + transform = self.AugmentationOptions[augmentation][""operation""](limit = limit) + else: + transform = self.AugmentationOptions[augmentation][""operation""]() + return transform(image=image) + + def __applyAugmentation(self, image, augmentation,limit,bboxes=None, category_ids=None, seed=7): + transformOptions = [] + if bboxes: + bbox_params = A.BboxParams(format='pascal_voc', label_fields=['category_ids']) + else: + bbox_params = None + if augmentation in list(self.AugmentationOptions.keys()): + if augmentation == ""Crop"": + height, width, _ = image.shape + crop_percentage = random.uniform(0.6, 0.9) + transformOptions.append(self.AugmentationOptions[augmentation][""operation""][1](height=int(height*crop_percentage), width=int(width*crop_percentage) )) + elif augmentation == ""Blur"": + transformOptions.append(self.AugmentationOptions[augmentation][""operation""](blur_limit = limit)) + elif augmentation in [""Contrast"",""Brightness""]: + transformOptions.append(self.AugmentationOptions[augmentation][""operation""](limit = limit)) + else: + transformOptions.append(self.AugmentationOptions[augmentation][""operation""]()) + transform = A.Compose( + transformOptions, + bbox_params=bbox_params, + ) + random.seed(seed) + return transform(image=image, bboxes=bboxes, category_ids=category_ids) + else: + return None + def getBBox(self, df, imageLoc, category_name_to_id): + + subDf = df[df['loc']==imageLoc] + boxes = [] + category = [] + for index, row in subDf.iterrows(): + boxes.append( [row['xmin'],row['ymin'],row['xmax'],row['ymax']]) + category.append(category_name_to_id[row['Label']]) + return boxes, category + + def __objAug(self, imageLoc, df, classes_names, category_id_to_name, category_name_to_id,limit,numberofImages,op): + for x in range(numberofImages): + bbox, category_ids = self.getBBox(df, imageLoc, category_name_to_id) + image = cv2.imread(imageLoc) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + transformed = self.__applyAugmentation(image, op,limit,bbox, category_ids) + transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR) + count = 1 + row = df[df['loc']==imageLoc].iloc[0] + filename = (Path(imageLoc).stem +'_'+str(x)+ self.AugmentationOptions[op][""suffix""] + Path(imageLoc).suffix) + newImage = str(Path(imageLoc).parent/filename) + for index,bbox in enumerate(transformed['bboxes']): + data = {'File':filename, 'xmin':bbox[0],'ymin':bbox[1],'xmax':bbox[2],'ymax':bbox[3],'Label':category_id_to_name[transformed['category_ids'][index]],'id':count,'height':row['height'],'width':row['width'], 'angle':0.0, 'loc': newImage, 'AugmentedImage': True} + count += 1 + df=df.append(data, ignore_index=True) + + cv2.imwrite(newImage, transformed['image']) + return df + + + def __objectDetection(self, images, df, optionDf, classes_names, suffix='',augConf={}): + + category_id_to_name = {v+1:k for v,k in enumerate(classes_names)} + category_name_to_id = {k:v+1 for v,k in enumerate(classes_names)} + for i, imageLoc in enumerate(images): + for key in optionDf.columns: + if optionDf.iloc[i][key]: + if key in augConf: + limit = eval(augConf[key].get('limit','0.2')) + numberofImages = int(" +"augConf[key].get('noOfImages',1)) + else: + limit = 0.2 + numberofImages = 1 + + df = self.__objAug(imageLoc, df, classes_names, category_id_to_name,category_name_to_id,limit,numberofImages,op=key) + return df + + def __augClassificationImage(self, imageLoc, df,limit,imageindex,op): + data = {} + image = cv2.imread(imageLoc) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + transformed = self.__applyAugmentationClass(image, op,limit) + transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR) + location = Path(imageLoc).parent + filename = (Path(imageLoc).stem +'_'+'str(imageindex)'+ self.AugmentationOptions[op][""suffix""] + Path(imageLoc).suffix) + cv2.imwrite(str(location/'AION'/'AugumentedImages'/filename), transformed['image']) + data['File'] = filename + data['Label'] = df[df['File']==Path(imageLoc).name][""Label""].iloc[0] + data['AugmentedImage'] = True + data['loc'] = str(location/filename) + return data + + def __classification(self, images, df, optionDf,augConf,csv_file=None, outputDir=None): + for i, imageLoc in enumerate(images): + for key in optionDf.columns: + if optionDf.iloc[i][key]: + if key in augConf: + limit = eval(augConf[key].get('limit','0.2')) + numberofImages = int(augConf[key].get('noOfImages',1)) + else: + limit = 0.2 + numberofImages = 1 + for x in range(numberofImages): + rows = self.__augClassificationImage(imageLoc, df,limit,x,op=key) + df=df.append(rows, ignore_index=True) + return df + + def removeAugmentedImages(self, df): + removeDf = df[df['AugmentedImage'] == True]['loc'].unique().tolist() + #df[df['imageAugmentationOriginalImage'] != True][loocationField].apply(lambda x: Path(x).unlink()) + for file in removeDf: + if file: + Path(file).unlink() + + def augment(self, modelType=""imageclassification"",params=None,csvSavePath = None,augConf={}): + if isinstance(params, dict) and any(params.values()): + df = pd.read_csv(self.csvFile) + if not self.dataLocation.endswith('/'): + images = self.dataLocation+'/' + else: + images = self.dataLocation + if modelType == ""imageclassification"": + images = images + df['File'] + else: + images = images + df['File'] + df['loc'] = images + images = set(images.tolist()) + option = {} + for key in list(self.AugmentationOptions.keys()): + option[key] = params.get(key, False) + optionDf = pd.DataFrame(columns=list(option.keys())) + for i in range(len(images)): + optionDf = optionDf.append(option, ignore_index=True) + if modelType == ""imageclassification"": + df = self.__classification(images, df, optionDf,augConf) + else: + classes_names = sorted(df['Label'].unique().tolist()) + df = self.__objectDetection(images, df, optionDf, classes_names,'',augConf) + df.to_csv(self.csvFile, index=False) + return self.csvFile ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#System imports +import logging +from distutils.util import strtobool +import pandas as pd +from text import TextProcessing + + +def get_one_true_option(d, default_value): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + +class textProfiler(): + + def __init__(self): + self.log = logging.getLogger('eion') + + def textCleaning(self, textCorpus): + textProcessor = TextProcessing.TextProcessing() + textCorpus = textProcessor.transform(textCorpus) + return(textCorpus) + + def textProfiler(self, textCorpus, conf_json, pipeList, max_features): + cleaning_kwargs = {} + textCleaning = conf_json.get('textCleaning') + self.log.info(""Text Preprocessing config: "",textCleaning) + cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True')) + cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True')) + cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False')) + cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False')) + cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True')) + cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True')) + cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True')) + cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'), + 'lemmatization').capitalize() + + removeNoiseConfig = textCleaning.get('removeNoiseConfig') + if type(removeNoiseConfig) is dict: + cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True')) + cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True')) + cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True')) + cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True')) + cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace' + cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True')) + cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True')) + + acronymConfig = textCleaning.get('acronymConfig') + if type(acronymConfig) is dict: + cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None) + + stopWordsConfig = textCleaning.get('stopWordsConfig') + if type(stopWordsConfig) is dict: + cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', []) + cleaning_kwargs['extend_or_replace_stopwordslist'] = 'extend' if strtobool(stopWordsConfig.get('extend', 'True')) else 'replace' + removeNumericConfig = textCleaning.get('removeNumericConfig') + if type(removeNumericConfig) is dict: + cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True')) + + removePunctuationConfig = textCleaning.get('removePunctuationConfig') + if type(removePunctuationConfig) is dict: + cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False')) + + cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False')) + if cleaning_kwargs['fExpandContractions']: + cleaning_kwargs['expandContractions_googleNewsWordVectorPath'] = GOOGLE_NEWS_WORD_VECTORS_PATH + + libConfig = textCleaning.get('libConfig') + if type(libConfig) is dict: + cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk') + cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk') + cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk') + + textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs) + textCorpus = textProcessor.transform(textCorpus) + pipeList.append((""TextProcessing"",textProcessor)) + + textFeatureExtraction = conf_json.get('textFeatureExtraction') + if strtobool(textFeatureExtraction.get('pos_tags', 'False')): + pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk') + posTagger = TextProcessing.PosTagging( pos_tags_lib) + textCorpus = posTagger.transform(textCorpus) + pipeList.append((""posTagger"",posTagger)) + ngram_min = 1 + ngram_max = 1 + if strtobool(textFeatureExtraction.get('n_grams', 'False')): + n_grams_config = textFeatureExtraction.get(""n_grams_config"") + ngram_min = int(n_grams_config.get('min_n', 1)) + ngram_max = int(n_grams_config.get('max_n', 1)) + if (ngram_min < 1) or ngram_min > ngram_max: + ngram_min = 1 + ngram_max = 1 + invalidNgramWarning = 'WARNING : invalid ngram config.\\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max) + self.log.info(invalidNgramWarning) + ngram_range_tuple = (ngram_min, ngram_max) + textConversionMethod = conf_json.get('textConversionMethod') + conversion_method = get_one_true_option(textConversionMethod, None) + if conversion_method.lower() == ""countvectors"": + X, vectorizer = TextProcessing.ExtractFeatureCountVectors(textCorpus, ngram_range=ngram_range_tuple, max_features=max_features) + pipeList.append((""vectorizer"",vectorizer)) + df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) + df1 = df1.add_suffix('_vect') + self.log.info('----------> Conversion Method: CountVectors') + elif conversion_method.lower() in [""word2vec"",""fasttext"",""glove""]: + embedding_method = conversion_method + wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method) + wordEmbeddingVecotrizer.checkAndDownloadPretrainedModel() + X = wordEmbeddingVecotrizer.transform(textCorpus) + df1 = pd.DataFrame(X) + df1 = df1.add_suffix('_vect') + pipeList.append((""vectorizer"",wordEmbeddingVecotrizer)) + self.log.info('----------> Conversion Method: '+str(conversion_method)) + elif conversion_method.lower() == ""sentencetransformer"": + from sentence_transformers import SentenceTransformer + model = SentenceTransformer('sentence-transformers/msmarco-distilroberta-base-v2') + X = model.encode(textCorpus) + df1 = pd.DataFrame(X) + df1 = df1.add_suffix('_vect') + pipeList.append((""vectorizer"",model)) + self.log.info('----------> Conversion Method: SentenceTransformer') + elif conversion_method.lower() == 'tf_idf': + X, vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(textCorpus,ngram_range=ngram_range_tuple, max_features=max_features) + pipeList.append((""vectorizer"",vectorizer)) + df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) + df1 = df1.add_suffix('_vect') + self.log.info('----------> Conversion Method: TF_IDF') + else: + df1 = pd.DataFrame() + df1['tokenize'] = textCorpus + self.log.info('----------> Conversion Method: NA') + return df1, pipeList,conversion_method + import os +import sys +import numpy as np +import scipy +import pandas as pd +from pathlib import Path + +default_config = { + 'misValueRatio': '1.0', + 'numericFeatureRatio': '1.0', + 'categoryMaxLabel': '20', + 'str_to_cat_len_max': 10 +} + +target_encoding_method_change = {'targetencoding': 'labelencoding'} + +supported_method = { + 'fillNa': + { + 'categorical' : ['mode','zero','na'], + 'numeric' : ['median','mean','knnimputer','zero','drop','na'], + }, + 'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'], + 'normalization': ['standardscaler','minmax','lognormal', 'na','none'], + 'outlier_column_wise': ['iqr','zscore', 'disable', 'na'], + 'outlierOperation': ['dropdata', 'average', 'nochange'] + } + +def findiqrOutlier(df): + Q1 = df.quantile(0.25) + Q3 = df.quantile(0.75) + IQR = Q3 - Q1 + index = ~((df < (Q1 - 1.5 * IQR)) |" +"(df > (Q3 + 1.5 * IQR))) + return index + +def findzscoreOutlier(df): + z = np.abs(scipy.stats.zscore(df)) + index = (z < 3) + return index + +def findiforestOutlier(df): + from sklearn.ensemble import IsolationForest + isolation_forest = IsolationForest(n_estimators=100) + isolation_forest.fit(df) + y_pred_train = isolation_forest.predict(df) + return y_pred_train == 1 + +def get_one_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_boolean(value): + if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): + return True + else: + return False + +def recommenderStartProfiler(self,modelFeatures): + try: + self.log.info('----------> FillNA:0') + self.data = self.data.fillna(value=0) + self.log.info('Status:- !... Missing value treatment done') + self.log.info('----------> Remove Empty Row') + self.data = self.data.dropna(axis=0,how='all') + self.log.info('Status:- !... Empty feature treatment done') + userId,itemId,rating = modelFeatures.split(',') + self.data[itemId] = self.data[itemId].astype(np.int32) + self.data[userId] = self.data[userId].astype(np.int32) + self.data[rating] = self.data[rating].astype(np.float32) + return self.data + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + return(self.data) + +def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): + try: + dataset_directory = Path(folderlocation) + dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name'] + tfrecord_directory = Path(deployLocation)/'Video_TFRecord' + from savp import PreprocessSAVP + import csv + csvfile = open(dataset_csv_file, newline='') + csv_reader = csv.DictReader(csvfile) + PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory) + dataColumns = list(self.data.columns) + VideoProcessing = True + return dataColumns,VideoProcessing,tfrecord_directory + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + +def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): + import os + try: + features = [doc_col_1, doc_col_2] + pipe = None + dataColumns = list(self.data.columns) + self.numofCols = self.data.shape[1] + self.numOfRows = self.data.shape[0] + from transformations.textProfiler import textProfiler + + self.log.info('-------> Execute Fill NA With Empty String') + self.data = self.data.fillna(value="" "") + self.log.info('Status:- |... Missing value treatment done') + self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1]) + self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2]) + self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2) + self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + from tensorflow.keras.preprocessing.text import Tokenizer + pipe = Tokenizer() + pipe.fit_on_texts(self.data['text'].values) + self.log.info('-------> Tokenizer: Fit on Concatenate Field') + self.log.info('Status:- |... Tokenizer the text') + self.data[doc_col_1] = self.data[doc_col_1].astype(str) + self.data[doc_col_1] = self.data[doc_col_1].astype(str) + return (self.data, pipe, self.target_name, features) + except Exception as inst: + self.log.info(""StartProfiler failed "" + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + +def set_features(features,profiler=None): + if profiler: + features = [x for x in features if x not in profiler.added_features] + return features + profiler.text_feature + return features ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sys +from pathlib import Path +import urllib.request +import tarfile +import json +import subprocess +import os +from os.path import expanduser +import platform + + +class ODpretrainedModels(): + + def __init__(self, location=None): + if location: + if isinstance(location, Path): + self.pretrained_models_location = location.as_posix() + else: + self.pretrained_models_location = location + else: + p = subprocess.run([sys.executable, ""-m"", ""pip"",""show"",""AION""],capture_output=True, text=True) + if p.returncode == 0: + Output = p.stdout.split('\\n') + for x in Output: + y = x.split(':',1) + if(y[0]=='Location'): + self.pretrained_models_location = y[1].strip()+""/AION/pretrained_models/object_detection"" + break + if Path(self.pretrained_models_location).is_dir(): + self.config_file_location = self.pretrained_models_location+'/supported_models.json' + with open(self.config_file_location) as json_data: + self.supportedModels = json.load(json_data) + + home = expanduser(""~"") + if platform.system() == 'Windows': + self.modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection') + else: + self.modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection') + + if os.path.isdir(self.modelsPath) == False: + os.makedirs(self.modelsPath) + + def __save_config(self): + with open(self.config_file_location, 'w') as json_file: + json.dump(self.supportedModels, json_file) + + + def __download(self, modelName): + try: + url = self.supportedModels[modelName][""url""] + file = self.supportedModels[modelName][""file""] + local_file_path = Path(self.modelsPath)/(file+"".tar.gz"") + urllib.request.urlretrieve(url, local_file_path) + except: + raise ValueError(""{} model download error, check your internet connection"".format(modelName)) + return local_file_path + + + def __extract(self, modelName, file_location, extract_dir): + try: + tarFile = tarfile.open(file_location) + tarFile.extractall(extract_dir) + tarFile.close() + Path.unlink(file_location) + return True + except: + return False + + + def download(self, modelName): + if modelName in list(self.supportedModels.keys()): + p = Path(self.modelsPath).glob('**/*') + modelsDownloaded = [x.name for x in p if x.is_dir()] + if self.supportedModels[modelName]['file'] not in modelsDownloaded: + file = self.__download(modelName) + self.supportedModels[modelName][""downloaded""] = True + if self.__extract(modelName, file, self.modelsPath): + self.supportedModels[modelName][""extracted""] = True + self.__save_config() + else: + self.__save_config() + raise ValueError(""{} model downloaded but extraction failed,please try again"".format(modelName)) + else: + raise ValueError(""{} is not supported for object detection"".format(modelName)) + return self.supportedModels[modelName] + + def get_info(self,modeltype): + models_info = {} + p = Path(self.pretrained_models_location) + downloaded_models = [x.name for x in p.iterdir() if x.is_dir()] + for model in list(self.supportedModels.keys()): + if (self.supportedModels[model]['type'] == modeltype) or (modeltype == ''): + models_info[model] = self.supportedModels[model]['extracted'] + return models_info + + def is_model_exist(self, model_name): + models = self.get_info('') + status = ""NOT_SUPPORTED"" + if model_name in models: + if self.supportedModels[model_name]['extracted']: + status = ""READY"" + else: + status = ""NOT_READY"" + return status + + def clear_config(self, model_name): + self.supportedModels[model_name]['extracted'] = False + self.supportedModels[model_name]['downloaded'] = False + self.__save_config() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import os +import sys +import string +import spacy +#import en_core_web_sm +from spacy.lang.en.stop_words import STOP_WORDS +from spacy.lang.en import English +try: + from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS +except: + from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS +from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer +from sklearn.base import TransformerMixin +from nltk.stem import WordNetLemmatizer +import re +from collections import defaultdict +from nltk.corpus import wordnet as wn +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import LabelBinarizer +from nltk.tokenize import word_tokenize +from nltk import pos_tag +from nltk.corpus import stopwords + +class textDataProfiler(): + def __init__(self): + self.data=None + #self.nlp=en_core_web_sm.load() + self.punctuations = string.punctuation + self.stopwords = list(STOP_WORDS) + + def startTextProfiler(self,df,target): + try: + dataColumns = list(df.columns) + print(' \\n No of rows and columns in dataFrame',df.shape) + print('\\n features in dataFrame',dataColumns) + dataFDtypes=self.dataFramecolType(df) + print('\\n feature types in dataFrame',dataFDtypes) + trainX=df['text'] + trainY=df[target] + return trainX,trainY + except Exception as inst: + print('startTextProfiler code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + + def dataFramecolType(self,dataFrame): + dataFDtypes=[] + try: + dataColumns=list(dataFrame.columns) + for i in dataColumns: + dataType=dataFrame[i].dtypes + dataFDtypes.append(tuple([i,str(dataType)])) + return dataFDtypes + except Exception as e: + print(""error in dataFramecolyType"",e) + return dataFDtypes + + def textTokenizer(self,text): + try: + parser = English() + tokens = parser(text) + tokens = [ word.lemma_.lower().strip() if word.lemma_ != ""-PRON-"" else word.lower_ for word in tokens ] + tokens = [ word for word in tokens if word not in self.stopwords and word not in self.punctuations ] + return tokens + except Exception as inst: + print('textDataProfiler code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + return {} + + def cleanText(self,text): + try: + text=str(text).strip().lower() + for punctuation in string.punctuation: + text = text.replace(punctuation, '') + return text + except Exception as inst: + print('cleanText code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + + def textTokenization(self,text): + try: + tokenizedText=word_tokenize(text) + return tokenizedText + except Exception as inst: + print('textDataProfiler code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc" +"_tb.tb_lineno) + return {} + + def textLemmitizer(self,text): + try: + tag_map = defaultdict(lambda : wn.NOUN) + tag_map['J'] = wn.ADJ + tag_map['V'] = wn.VERB + tag_map['R'] = wn.ADV + Final_words = [] + word_Lemmatized = WordNetLemmatizer() + for word, tag in pos_tag(text): + if word not in stopwords.words('english') and word.isalpha(): + word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]]) + Final_words.append(word_Final) + return str(Final_words) + except Exception as inst: + print('textLemmitizer code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + return {} + +class TextCleaner(TransformerMixin): + + def clean_text(self,text): + try: + text=str(text).strip().lower() + text = text.replace(""isn't"", ""is not"") + text = text.replace(""aren't"", ""are not"") + text = text.replace(""ain't"", ""am not"") + text = text.replace(""won't"", ""will not"") + text = text.replace(""didn't"", ""did not"") + text = text.replace(""shan't"", ""shall not"") + text = text.replace(""haven't"", ""have not"") + text = text.replace(""hadn't"", ""had not"") + text = text.replace(""hasn't"", ""has not"") + text = text.replace(""don't"", ""do not"") + text = text.replace(""wasn't"", ""was not"") + text = text.replace(""weren't"", ""were not"") + text = text.replace(""doesn't"", ""does not"") + text = text.replace(""'s"", "" is"") + text = text.replace(""'re"", "" are"") + text = text.replace(""'m"", "" am"") + text = text.replace(""'d"", "" would"") + text = text.replace(""'ll"", "" will"") + text = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE) + text = re.sub(r'[\\w\\.-]+@[\\w\\.-]+', ' ', text, flags=re.MULTILINE) + for punctuation in string.punctuation: + text = text.replace(punctuation,' ') + text = re.sub(r'[^A-Za-z0-9\\s]',r' ',text) + text = re.sub(r'\\n',r' ',text) + text = re.sub(r'[0-9]',r' ',text) + wordnet_lemmatizer = WordNetLemmatizer() + text = "" "".join([wordnet_lemmatizer.lemmatize(w, pos='v') for w in text.split()]) + return text + except Exception as inst: + print('TextCleaner clean_text code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + + def text_cleaner(self,text): + text = self.clean_text(text) + stop_words = set(stopwords.words('english')) + text_tokens = word_tokenize(text) + out=' '.join(str(j) for j in text_tokens if j not in stop_words and (len(j)!=1)) + return(out) + + def transform(self, X, **transform_params): + # Cleaning Text + return [self.clean_text(text) for text in X] + + def fit(self, X, y=None, **fit_params): + return self + + def get_params(self, deep=True): + return {} ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import glob +import pandas as pd +import io +import xml.etree.ElementTree as ET +import argparse + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) +import tensorflow as tf +from PIL import Image +from object_detection.utils import dataset_util, label_map_util +from collections import namedtuple +from pathlib import Path + + +def class_text_to_int(row_label, label_map_dict): + return label_map_dict[row_label] + + +def split(df, group): + data = namedtuple('data', ['File', 'object']) + gb = df.groupby(group) + return [data(File, gb.get_group(x)) for File, x in zip(gb.groups.keys(), gb.groups)] + + +def create_tf_example(group, path, label_map_dict): + with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.File)), 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = Image.open(encoded_jpg_io) + width, height = image.size + + File = group.File.encode('utf8') + image_format = b'jpg' + xmins = [] + xmaxs = [] + ymins = [] + ymaxs = [] + classes_text = [] + classes = [] + + for index, row in group.object.iterrows(): + xmin_n = min(row['xmin'], row['xmax']) + xmax_n = max(row['xmin'], row['xmax']) + ymin_n = min(row['ymin'], row['ymax']) + ymax_n = max(row['ymin'], row['ymax']) + + xmin_new = min(xmin_n, width) + xmax_new = min(xmax_n, width) + ymin_new = min(ymin_n, height) + ymax_new = min(ymax_n, height) + + xmn = xmin_new / width + xmins.append(xmn) + + xmx = xmax_new / width + xmaxs.append(xmx) + + ymn = ymin_new / height + ymins.append(ymn) + + ymx = ymax_new / height + ymaxs.append(ymx) + classes_text.append(row['Label'].encode('utf8')) + classes.append(class_text_to_int(row['Label'], label_map_dict)) + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(File), + 'image/source_id': dataset_util.bytes_feature(File), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example + +def labelFile(classes_names, label_map_path): + pbtxt_content = """" + + for i, class_name in enumerate(classes_names): + pbtxt_content = ( + pbtxt_content + + ""item {{\\n id: {0}\\n name: '{1}'\\n}}\\n\\n"".format(i + 1, class_name) + ) + pbtxt_content = pbtxt_content.strip() + with open(label_map_path, ""w"") as f: + f.write(pbtxt_content) + +def createLabelFile(train_df, save_path): + labelmap_path = str(Path(save_path)/ 'label_map.pbtxt') + classes_names = sorted(train_df['Label'].unique().tolist()) + labelFile(classes_names, labelmap_path) + return labelmap_path, len(classes_names) + + +def generate_TF_record(image_dir, output_dir, train_df, test_df, labelmap_path): + + outputPath = str(Path(output_dir)/ 'train.tfrecord') + writer = tf.io.TFRecordWriter( outputPath) + grouped = split(train_df, 'File') + label_map = label_map_util.load_labelmap(labelmap_path ) + label_map_dict = label_map_util.get_label_map_dict(label_map) + for group in grouped: + tf_example = create_tf_example(group, image_dir, label_map_dict) + writer.write(tf_example.SerializeToString()) + writer.close() + if len(test_df): + outputPath = str(Path(output_dir)/ 'test.tfrecord') + writer = tf.io.TFRecordWriter( outputPath) + grouped = split(test_df, 'File') + for group in grouped: + tf_example = create_tf_example(group, image_dir, label_map_dict) + writer.write(tf_example.SerializeToString()) + writer.close() + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#from sklearn.externals import joblib +import joblib +# import pyreadstat +# import sys +# import math +import time +import pandas as pd +import numpy as np +from sklearn.metrics import confusion_matrix +from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score +from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LinearRegression +import argparse +import json +import os +import pathlib +from tensorflow.keras.models import load_model +# from tensorflow.keras import backend as K +import tensorflow as tf +# from sklearn.decomposition import LatentDirichletAllocation +from pathlib import Path +#from aionUQ import aionUQ +from uq_main import aionUQ +import os +from datetime import datetime +from sklearn.model_selection import train_test_split +parser = argparse.ArgumentParser() +parser.add_argument('savFile') +parser.add_argument('csvFile') +parser.add_argument('features') +parser.add_argument('target') +args = parser.parse_args() +from appbe.dataPath import DEPLOY_LOCATION + +if ',' in args.features: + args.features = [x.strip() for x in args.features.split(',')] +else: + args.features = args.features.split("","") +models = args.savFile +if Path(models).is_file(): +# if Path(args.savFile.is_file()): + model = joblib.load(args.savFile) + # print(model.__class__.__name__) + # print('class:',model.__class__) + # print(type(model).__name__) + + # try: + # print('Classess=',model.classes_) + + # except: + # print(""Classess=N/A"") + + # print('params:',model.get_params()) + + # try: + # print('fea_imp =',model.feature_importances_) + + # except: + # print(""fea_imp =N/A"") + + ProblemName = model.__class__.__name__ + Params = model.get_params() + # print(""ProblemName: \\n"",ProblemName) + # print(""Params: \\n"",Params) + + # print('ProblemName:',model.__doc__) + # print(type(ProblemName)) + if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']: + Problemtype = 'Classification' + else : + Problemtype = 'Regression' + + if Problemtype == 'Classification': + + df = pd.read_csv(args.csvFile) + object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object'] + df = df.drop(object_cols, axis=1) + df = df.dropna(axis=1) + df = df.reset_index(drop=True) + modelfeatures = args.features + # dfp = df[modelfeatures] + tar = args.target + # target = df[tar] + y=df[tar] + X = df.drop(tar, axis=1) + #for dummy test,train values pass + X_train, X_test, y_train, y_test = train_test_" +"split(X, y, test_size=0.3, random_state=0) + uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar) + #accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,""uqtest"") + accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification() + # print(""UQ Classification: \\n"",output_jsonobject) + print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per) + print(""End of UQ Classification.\\n"") + + else: + + df = pd.read_csv(args.csvFile) + modelfeatures = args.features + # print(""modelfeatures: \\n"",modelfeatures) + # print(""type modelfeatures: \\n"",type(modelfeatures)) + dfp = df[modelfeatures] + tar = args.target + target = df[tar] + #Not used, just dummy X,y split + y=df[tar] + X = df.drop(tar, axis=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar) + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression() + print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject) + print(""End of UQ reg\\n"") + +elif Path(models).is_dir(): + + + os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' + os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + model = load_model(models) + ProblemName = model.__class__.__name__ + Problemtype = 'Classification' + # print('class:',model.__class__) + # print('class1',model.__class__.__name__) + # print(model.summary()) + # print('ProblemName1:',model.get_config()) + + + + + def Params(model: tf.keras.Model): + Params = [] + model.Params(print_fn=lambda x: Params.append(x)) + return '\\n'.join(Params) + + + + df = pd.read_csv(args.csvFile) + modelfeatures = args.features + dfp = df[modelfeatures] + tar = args.target + target = df[tar] + df3 = dfp.astype(np.float32) + predic = model.predict(df3) + if predic.shape[-1] > 1: + predic = np.argmax(predic, axis=-1) + else: + predic = (predic > 0.5).astype(""int32"") + + matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) + matrixconfusion = matrixconfusion.to_json(orient='index') + + classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() + classificationreport = round(classificationreport,2) + classificationreport = classificationreport.to_json(orient='index') + output = {} + output[""Precision""] = ""%.3f"" % precision_score(target, predic,average='weighted') + output[""Recall""] = ""%.3f"" % recall_score(target, predic,average='weighted') + output[""Accuracy""] = ""%.3f"" % accuracy_score(target, predic) + output[""ProblemName""] = ProblemName + output[""Params""] = Params + output[""Problemtype""] = Problemtype + output[""Confusionmatrix""] = matrixconfusion + output[""classificationreport""] = classificationreport + print(json.dumps(output)) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import logging +logging.getLogger('tensorflow').disabled = True +import json +#from nltk.corpus import stopwords +from collections import Counter +from matplotlib import pyplot +import sys +import os +import json +import matplotlib.pyplot as plt +from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression +from uq360.algorithms.ucc_recalibration import UCCRecalibration +from sklearn import datasets +from sklearn.model_selection import train_test_split +import pandas as pd +from uq360.metrics.regression_metrics import compute_regression_metrics +import numpy as np +from sklearn.metrics import accuracy_score +from sklearn.metrics import precision_score +from sklearn.metrics import recall_score +from sklearn.metrics import f1_score +from sklearn.metrics import roc_curve +# from math import sqrt +from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error +# from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature +from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature + +#Added libs from MLTest +import sys +import time +from sklearn.metrics import confusion_matrix +from pathlib import Path +import logging +# import json + +class aionUQ: + # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): + def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation): + # #printprint(""Inside aionUQ \\n"") + try: + #print(""Inside aionUQ init\\n "") + self.data=df + self.dfFeatures=dfp + self.uqconfig_base=Params + self.uqconfig_meta=Params + self.targetFeature=targetfeature + self.target=target + self.selectedfeature=modelfeatures + self.y=self.target + self.X=self.dfFeatures + self.log = logging.getLogger('eion') + self.basemodel=model + self.model_name=ProblemName + self.Deployment = os.path.join(deployLocation,'log','UQ') + os.makedirs(self.Deployment,exist_ok=True) + self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph') + os.makedirs(self.uqgraphlocation,exist_ok=True) + except Exception as e: + self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus): + from sklearn.model_selection import train_test_split + # To get each class values and uncertainty + if (aionstatus.lower() == 'aionuq'): + X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest + # y_val = y_train.append(y_test) + else: + # y_val = self.y + df=self.data + y=df[self.targetFeature] + X = df.drop(self.targetFeature, axis=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + + # from sklearn.tree import DecisionTreeRegressor + # from sklearn.linear_model import LinearRegression,Lasso,Ridge + # from sklearn import linear_model + # from sklearn.ensemble import RandomForestRegressor + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='picp' + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + y_hat_total_mean=np.mean(y_hat) + y_hat_lb_total_mean=np.mean(y_hat_lb) + y_hat_ub_total_mean=np.mean(y_hat_ub) + mpiw_20_per=(y_hat_total_mean*20/100) + mpiw_lower_range = y_hat_total_mean - mpiw_20_per + mpiw_upper_range = y_hat_total_mean + mpiw_20_per + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) + self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) + self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) + self.log.info('Model total picp_percentage : '+str(picp_percentage)) + return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range + + def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): + try: + global x_feature,y_feature + if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): + x_feature=''.join(map(str, self.selectedfeature)) + else: + x_feature= str(self.selectedfeature) + # self.selectedfeature=str(self.selectedfeature) + X_test=np.squeeze(X_test) + y_feature=str(self.targetFeature) + pred_dict = {x_feature: X_test, + 'y': y_test, + 'y_mean': y_mean, + 'y_upper': y_upper, + 'y_lower': y_lower + } + pred_df = pd.DataFrame(data=pred_dict) + pred_df_sorted = pred_df.sort_values(by=x_feature) + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed') + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') + plt.legend() + plt.xlabel(x_feature) + plt.ylabel(y_feature) + plt.title('UQ Confidence Interval Plot.') + # plt.savefig('uq_test_plt.png') + if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'): + os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png') + plt.savefig(str(self.Deployment)+'/uq_test_plt.png') + plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png') + plt.clf() + plt.cla() + " +" plt.close() + pltreg=plot_picp_by_feature(X_test, y_test, + y_lower, y_upper, + xlabel=x_feature) + #pltreg.savefig('x.png') + pltr=pltreg.figure + if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'): + os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png') + pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') + pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png') + plt.clf() + plt.cla() + plt.close() + except Exception as e: + # #print(""display exception: \\n"",e) + self.log.info(' '+str(e)) + + def classUncertainty(self,pred,score): + try: + outuq = {} + classes = np.unique(pred) + for c in classes: + ids = pred == c + class_score = score[ids] + predc = 'Class_'+str(c) + outuq[predc]=np.mean(class_score) + x = np.mean(class_score) + #Uncertaininty in percentage + x=x*100 + self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x))) + return outuq + except Exception as e: + # #print(""display exception: \\n"",e) + self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus): + try: + # print(""Inside uqMain_BBMClassification\\n"") + # print(""lenth of x_train {}, x_test {}, y_train {}, y_test {}"".format(x_train, x_test, y_train, y_test)) + aionstatus = str(aionstatus) + if (aionstatus.lower() == 'aionuq'): + X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test + else: + X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification + from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics + from sklearn.ensemble import GradientBoostingClassifier + from sklearn.linear_model import LogisticRegression + from sklearn.linear_model import SGDClassifier + from sklearn.naive_bayes import GaussianNB + from sklearn.tree import DecisionTreeClassifier + from sklearn.ensemble import RandomForestClassifier + from sklearn.svm import SVC + from xgboost import XGBClassifier + from lightgbm import LGBMClassifier + from sklearn.neighbors import KNeighborsClassifier + + base_modelname=__class__.__name__ + base_config = self.uqconfig_base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + #print(model_name) + try: + #geting used features + model_used_features=self.basemodel.feature_names_in_ + self.log.info(""Base model used training features are (UQ Testing): \\n""+str(model_used_features)) + except: + pass + model_params=self.basemodel.get_params() + uq_scoring_param='accuracy' + basemodel=None + if (model_name == ""GradientBoostingClassifier""): + basemodel=GradientBoostingClassifier + elif (model_name == ""SGDClassifier""): + basemodel=SGDClassifier + elif (model_name == ""GaussianNB""): + basemodel=GaussianNB + elif (model_name == ""DecisionTreeClassifier""): + basemodel=DecisionTreeClassifier + elif(model_name == ""RandomForestClassifier""): + basemodel=RandomForestClassifier + elif (model_name == ""SVC""): + basemodel=SVC + elif(model_name == ""KNeighborsClassifier""): + basemodel=KNeighborsClassifier + elif(model_name.lower() == ""logisticregression""): + basemodel=LogisticRegression + elif(model_name == ""XGBClassifier""): + basemodel=XGBClassifier + elif(model_name == ""LGBMClassifier""): + basemodel=LGBMClassifier + else: + basemodel=LogisticRegression + + calibrated_mdl=None + if (model_name == ""SVC""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SVC(**model_params) + calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_mdl.fit(X_train, y_train) + basepredict = calibrated_mdl.predict(X_test) + predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] + elif (model_name == ""SGDClassifier""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SGDClassifier(**model_params) + calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_mdl.fit(X_train, y_train) + basepredict = calibrated_mdl.predict(X_test) + predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] + else: + from sklearn.calibration import CalibratedClassifierCV + base_mdl = basemodel(**model_params) + calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3) + basemodelfit = calibrated_mdl.fit(X_train, y_train) + basepredict = calibrated_mdl.predict(X_test) + predprob_base=calibrated_mdl.predict_proba(X_test)[:, :] + cal_model_params=calibrated_mdl.get_params() + + acc_score_base=accuracy_score(y_test, basepredict) + base_estimator_calibrate = cal_model_params['base_estimator'] + + + uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel, + base_config=model_params, meta_config=model_params) + + try: + X_train=X_train[model_used_features] + X_test=X_test[model_used_features] + except: + pass + uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) + # uqmodel_fit = uq_model.fit(X_train, y_train) + y_t_pred, y_t_score = uq_model.predict(X_test) + acc_score=accuracy_score(y_test, y_t_pred) + test_accuracy_perc=round(100*acc_score) + if(aionstatus == ""aionuq""): + test_accuracy_perc=round(test_accuracy_perc,2) + #uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0. + uq_aurrrc=0 + pass + else: + bbm_c_plot = plot_risk_vs_rejection_rate( + y_true=y_test, + y_prob=predprob_base, + selection_scores=y_t_score, + y_pred=y_t_pred, + plot_label=['UQ_risk_vs_rejection'], + risk_func=accuracy_score, + num_bins = 10 ) + # This done by kiran, need to uncomment for GUI integration. + # bbm_c_plot_sub = bbm_c_plot[4] + bbm_c_plot_sub = bbm_c_plot + if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'): + os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') + # bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') + re_plot=plot_reliability_diagram(y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + plot_label=['UQModel reliability_diagram'], + num_bins=10 ) + # This done by kiran, need to uncomment for GUI integration. + # re_plot_sub = re_plot[4] + re_plot_sub = re_plot + if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'): + os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png') + # re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png') + + uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + selection_scores=y_t_score, + attributes=None, + risk_func=accuracy_score,subgroup_ids=None, return_counts=False, + num_bins=10) + uq_aurrrc=uq_aurrrc + test_accuracy_perc=round(test_accuracy_perc) + #metric_all=compute_classification_metrics(y_test, y_prob, option='all') + metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') + #expected_calibration_error + uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False) + # uq_aurrrc=uq_aurrrc + confidence_score=acc_score_base-uq_ece + ece_confidence_score=round(confidence_score,2) + # Model uncertainty using ECE score + # model_uncertainty_ece = 1-ece_confidence_score + #Uncertainty Using model inherent predict probability + + mean_predprob_total=np.mean(y_t_score) + model_confidence=mean_predprob_total + model_uncertainty = 1-mean_predprob_total + + model_confidence = round(model_confidence,2) + # To get each class values and uncertainty + if (aionstatus.lower() == 'aionuq'): + y_val = np.append(y_train,y_test) + else: + y_val = self.y + self.log.info('------------------> Model Confidence Score '+str(model_confidence)) + outuq = self.classUncertainty(y_t_pred,y_t_score) + + # Another way to get conf score + model_uncertainty_per=round((model_uncertainty*100),2) + model_confidence_per=round((model_confidence*100),2) + acc_score_per = round((acc_score*100),2) + uq_ece_per=round((uq_ece*100),2) + output={} + recommendation = """" + if (uq_ece > 0.5): + # RED text + recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/" +"ensembling to reduce ECE (ECE~0).' + else: + # self.log.info('Model has good ECE score and accuracy, ready to deploy.\\n.') + if (uq_ece <= 0.1 and model_confidence >= 0.9): + # Green Text + recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' + else: + # Orange + recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. ' + #Adding each class uncertainty value + classoutput = {} + for k,v in outuq.items(): + classoutput[k]=(str(round((v*100),2))) + output['classes'] = classoutput + output['ModelConfidenceScore']=(str(model_confidence_per)) + output['ExpectedCalibrationError']=str(uq_ece_per) + output['ModelUncertainty']=str(model_uncertainty_per) + output['Recommendation']=recommendation + # output['user_msg']='Please check the plot for more understanding of model uncertainty' + #output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4) + output['Accuracy']=str(acc_score_per) + output['Problem']= 'Classification' + #self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %')) + # #print(""Prediction mean for the given model:"",np.mean(y_hat),""\\n"") + #self.log.info(recommendation) + #self.log.info(""Model_confidence_score: "" +str(confidence_score)) + #self.log.info(""Model_uncertainty: "" +str(round(model_uncertainty,2))) + #self.log.info('Please check the plot for more understanding of model uncertainty.\\n.') + uq_jsonobject = json.dumps(output) + with open(str(self.Deployment)+""/uq_classification_log.json"", ""w"") as f: + json.dump(output, f) + return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per + except Exception as inst: + self.log.info('\\n < ---------- UQ Model Execution Failed Start--------->') + self.log.info('\\n<------Model Execution failed!!!.' + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + def aion_confidence_plot(self,df): + df=df + df = df.sort_values(by=self.selectedfeature) + best_values=df.Best_values.to_list() + best_upper=df.Best__upper.to_list() + best_lower=df.Best__lower.to_list() + Total_Upper_PI=df.Total_Upper_PI.to_list() + Total_Low_PI=df.Total_Low_PI.to_list() + Obseved = df.Observed.to_list() + + plt.plot(df[x_feature], df['Observed'], 'o', label='Observed') + plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey') + plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey') + plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') + plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) + plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) + plt.legend() + plt.xlabel(self.selectedfeature) + plt.ylabel(self.targetFeature) + plt.title('UQ Best & Good Area Plot') + if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'): + os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png') + plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png') + plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') + + + def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus): + aionstatus = str(aionstatus) + # if (aionstatus.lower() == 'aionuq'): + # X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test + # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) + # else: + # X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) + + # modelName = """" + self.log.info(' ') + try: + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression + import pandas as pd + base_modelname=__class__.__name__ + base_config = self.uqconfig_base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + model_params=self.basemodel.get_params() + # #print(""model_params['criterion']: \\n"",model_params['criterion']) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + # modelname='sklearn.linear_model'+'.'+model_name + # X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest + #Geeting trained model name and to use the model in BlackboxMetamodelRegression + from sklearn.tree import DecisionTreeRegressor + from sklearn.linear_model import LinearRegression,Lasso,Ridge + from sklearn.ensemble import RandomForestRegressor + if (model_name == ""DecisionTreeRegressor""): + basemodel=DecisionTreeRegressor + elif (model_name == ""LinearRegression""): + basemodel=LinearRegression + elif (model_name == ""Lasso""): + basemodel=Lasso + elif (model_name == ""Ridge""): + basemodel=Ridge + elif(model_name == ""RandomForestRegressor""): + basemodel=RandomForestRegressor + else: + basemodel=LinearRegression + + if (aionstatus.lower() == 'aionuq'): + X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) + else: + X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus) + + + + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='picp' + + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + # #print(""X_train.shape: \\n"",X_train.shape) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + self.log.info(' '+str(observed_alphas_picp)) + self.log.info(' '+str(observed_widths_mpiw)) + # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” + #metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss. + metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) + metric_used='' + for k,v in metric_all.items(): + metric_used=str(round(v,2)) + self.log.info(' '+str(metric_all)) + # Determine the confidence level and recommentation to the tester + # test_data=y_test + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + #Calculate total uncertainty for all features + # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) + # df1=self.data + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) + recommendation="""" + output={} + + if (observed_alphas_picp >= 0.95 and total_picp >= 0.75): + # Add GREEN text + self.log.info('Model has good confidence for the selected feature, ready to deploy.\\n.') + recommendation = ""Model has good confidence score, ready to deploy."" + elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)): + # Orange + recommendation = ""Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."" + self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') + + else: + # RED text + recommendation = ""Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."" + self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') + + #Build uq json info dict + output['ModelConfidenceScore']=(str(total_picp_percentage)+'%') + output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%') + output['SelectedFeatureConfidence']=(str(picp_percentage)+'%') + output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%') + output['PredictionIntervalCoverageProbability']=ob" +"served_alphas_picp + output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw) + output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) + output['Recommendation']=str(recommendation) + output['Metric']=uq_scoring_param + output['Score']=metric_used + output['Problemtype']= 'Regression' + self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %')) + self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %')) + #self.log.info('Please check the plot for more understanding of model uncertainty.\\n.') + #self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub) + uq_jsonobject = json.dumps(output) + with open(str(self.Deployment)+""/uq_reg_log.json"", ""w"") as f: + json.dump(output, f) + #To get best and medium UQ range of values from total predict interval + y_hat_m=y_hat.tolist() + y_hat_lb=y_hat_lb.tolist() + upper_bound=y_hat_ub.tolist() + y_hat_ub=y_hat_ub.tolist() + for x in y_hat_lb: + y_hat_ub.append(x) + total_pi=y_hat_ub + medium_UQ_range = y_hat_ub + best_UQ_range= y_hat.tolist() + ymean_upper=[] + ymean_lower=[] + y_hat_m=y_hat.tolist() + for i in y_hat_m: + y_hat_m_range= (i*20/100) + x=i+y_hat_m_range + y=i-y_hat_m_range + ymean_upper.append(x) + ymean_lower.append(y) + min_best_uq_dist=round(min(best_UQ_range)) + max_best_uq_dist=round(max(best_UQ_range)) + # initializing ranges + list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) + list_best = y_hat_m + X_test = np.squeeze(X_test) + ''' + uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, + 'Best__upper':ymean_upper, + 'Best__lower':ymean_lower, + 'Total_Low_PI': y_hat_lb, + 'Total_Upper_PI': upper_bound, + } + + print(uq_dict) + uq_pred_df = pd.DataFrame(data=uq_dict) + uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') + uq_pred_df_sorted.to_csv(str(self.Deployment)+""/uq_pred_df.csv"",index = False) + csv_path=str(self.Deployment)+""/uq_pred_df.csv"" + df=pd.read_csv(csv_path) + self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.') + #Callconfidence olot fn only for UQTest interface + + if (aionstatus.lower() == 'aionuq'): + #No need to showcase confidence plot for aion main + pass + else: + self.aion_confidence_plot(df) + ''' + return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject) + + except Exception as inst: + exc = {""status"":""FAIL"",""message"":str(inst).strip('""')} + out_exc = json.dumps(exc) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import logging +logging.getLogger('tensorflow').disabled = True +import json +#from nltk.corpus import stopwords +from collections import Counter +from matplotlib import pyplot +import sys +import os +import matplotlib.pyplot as plt +from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression +from sklearn import datasets +from sklearn.model_selection import train_test_split +import pandas as pd +from uq360.metrics.regression_metrics import compute_regression_metrics +import numpy as np +from sklearn.metrics import accuracy_score +from sklearn.metrics import precision_score +from sklearn.metrics import recall_score +from sklearn.metrics import f1_score +from sklearn.metrics import roc_curve +from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error +from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature +import sys +import time +from sklearn.metrics import confusion_matrix +from pathlib import Path +import logging +import logging.config +from os.path import expanduser +import platform +from sklearn.utils import shuffle + +class aionUQ: + # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): + def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature): + try: + self.data=df + self.dfFeatures=dfp + self.uqconfig_base=Params + self.uqconfig_meta=Params + self.targetFeature=targetfeature + self.log = logging.getLogger('aionUQ') + self.target=target + self.selectedfeature=modelfeatures + self.y=self.target + self.X=self.dfFeatures + from appbe.dataPath import DEPLOY_LOCATION + self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time())))) + os.makedirs(self.Deployment,exist_ok=True) + self.basemodel=model + self.model_name=ProblemName + # self.X, self.y = shuffle(self.X, self.y) + X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0) + self.xtrain = X_train + self.xtest = X_test + self.ytrain = y_train + self.ytest = y_test + # self.deployLocation=deployLocation + + + except Exception as e: + # self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + # self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def totalUncertainty(self,df,basemodel,model_params): + try: + # from sklearn.model_selection import train_test_split + # df=self.data + # y=df[self.targetFeature] + # X = df.drop(self.targetFeature, axis=1) + if (isinstance(self.selectedfeature,list)): + selectedfeature=[self.selectedfeature[0]] + selectedfeature=' '.join(map(str,selectedfeature)) + if (isinstance(self.targetFeature,list)): + targetFeature=[self.targetFeature[0]] + targetFeature=' '.join(map(str,targetFeature)) + X = self.data[selectedfeature] + y = self.data[targetFeature] + X = X.values.reshape((-1,1)) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + # from sklearn.tree import DecisionTreeRegressor + # from sklearn.linear_model import LinearRegression,Lasso,Ridge + # from sklearn import linear_model + # from sklearn.ensemble import RandomForestRegressor + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='picp' + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + y_hat_total_mean=np.mean(y_hat) + y_hat_lb_total_mean=np.mean(y_hat_lb) + y_hat_ub_total_mean=np.mean(y_hat_ub) + mpiw_20_per=(y_hat_total_mean*20/100) + mpiw_lower_range = y_hat_total_mean - mpiw_20_per + mpiw_upper_range = y_hat_total_mean + mpiw_20_per + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + # self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) + # self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) + # self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) + # self.log.info('Model total picp_percentage : '+str(picp_percentage)) + except Exception as e: + print(""totalUncertainty fn error: \\n"",e) + + return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range + + def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): + try: + global x_feature,y_feature + if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): + x_feature=','.join(map(str, self.selectedfeature)) + else: + x_feature= str(self.selectedfeature) + # self.selectedfeature=str(self.selectedfeature) + + X_test=np.squeeze(X_test) + y_feature=str(self.targetFeature) + pred_dict = {x_feature: X_test, + 'y': y_test, + 'y_mean': y_mean, + 'y_upper': y_upper, + 'y_lower': y_lower + } + pred_df = pd.DataFrame(data=pred_dict) + x_feature1 = x_feature.split(',') + pred_df_sorted = pred_df.sort_values(by=x_feature1) + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['" +"y'], 'o', label='Observed') + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') + plt.legend() + plt.xlabel(x_feature1[0]) + plt.ylabel(y_feature) + plt.title('UQ Confidence Interval Plot.') + # plt.savefig('uq_test_plt.png') + ''' + if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'): + os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') + ''' + plt.savefig(str(self.Deployment)+'/uq_test_plt.png') + #plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') + confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png') + plt.clf() + plt.cla() + plt.close() + pltreg=plot_picp_by_feature(X_test, y_test, + y_lower, y_upper, + xlabel=x_feature) + #pltreg.savefig('x.png') + pltr=pltreg.figure + ''' + if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'): + os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') + ''' + pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') + picpPlot = os.path.join(self.Deployment,'picp_per_feature.png') + #pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') + plt.clf() + plt.cla() + plt.close() + except Exception as e: + print(""display exception: \\n"",e) + # self.log.info(' '+str(e)) + return confidencePlot,picpPlot + + def classUncertainty(self,predprob_base): + # from collections import Counter + predc=""Class_"" + classes = np.unique(self.y) + total = len(self.y) + list_predprob=[] + counter = Counter(self.y) + #for loop for test class purpose + for k,v in counter.items(): + n_samples = len(self.y[self.y==k]) + per = ((v/total) * 100) + prob_c=predprob_base[:,int(k)] + list_predprob.append(prob_c) + # #print(""Class_{} : {}/{} percentage={}% \\n"".format(k,n_samples,total,per )) + outuq={} + for k in classes: + predc += str(k) + mean_predprob_class=np.mean(list_predprob[int(k)]) + uncertainty=1-mean_predprob_class + predc+='_Uncertainty' + outuq[predc]=uncertainty + predc=""Class_"" + return outuq + + + def uqMain_BBMClassification(self): + # self.log.info(' ') + # import matplotlib.pyplot as plt + try: + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification + except: + ##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification. + from uq360.algorithms.blackbox_metamodel import MetamodelClassification + + # from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics + from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics + # from sklearn import datasets + # from sklearn.model_selection import train_test_split + # from sklearn.metrics import accuracy_score + from sklearn.ensemble import GradientBoostingClassifier + from sklearn.linear_model import LogisticRegression + from sklearn.linear_model import SGDClassifier + from sklearn.naive_bayes import GaussianNB + from sklearn.tree import DecisionTreeClassifier + from sklearn.ensemble import RandomForestClassifier + from sklearn.svm import SVC + from sklearn.neighbors import KNeighborsClassifier + # from sklearn.linear_model import LogisticRegression + + # import pandas as pd + base_modelname=__class__.__name__ + base_config = self.uqconfig_base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + model_params=self.basemodel.get_params() + try: + #geting used features + model_used_features=self.basemodel.feature_names_in_ + except: + pass + X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest + uq_scoring_param='accuracy' + basemodel=None + if (model_name == ""GradientBoostingClassifier""): + basemodel=GradientBoostingClassifier + elif (model_name == ""SGDClassifier""): + basemodel=SGDClassifier + elif (model_name == ""GaussianNB""): + basemodel=GaussianNB + elif (model_name == ""DecisionTreeClassifier""): + basemodel=DecisionTreeClassifier + elif(model_name == ""RandomForestClassifier""): + basemodel=RandomForestClassifier + elif (model_name == ""SVC""): + basemodel=SVC + elif(model_name == ""KNeighborsClassifier""): + basemodel=KNeighborsClassifier + elif(model_name == ""LogisticRegression""): + basemodel=LogisticRegression + else: + basemodel=LogisticRegression + + try: + try: + ##Removed meta_config because leave meta model config as default ml model params + uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) + except: + uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) + except: + ##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification + try: + ##Removed meta_config because leave meta model config as default ml model params + uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) + except: + uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) + + # this will fit both the base and the meta model + try: + X_train=X_train[model_used_features] + X_test=X_test[model_used_features] + except: + pass + + uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) + # uqmodel_fit = uq_model.fit(X_train, y_train) + #Test data pred, score + y_t_pred, y_t_score = uq_model.predict(X_test) + #predict probability + # uq_pred_prob=uq_model.predict_proba(X_test) + # predprob_base=basemodel.predict_proba(X_test)[:, :] + #if (model_name == ""SVC"" or model_name == ""SGDClassifier""): + # if model_name in ['SVC','SGDClassifier']: + if (model_name == ""SVC""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SVC(**model_params) + calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_svc.fit(X_train, y_train) + basepredict = basemodel.predict(X_test) + predprob_base = calibrated_svc.predict_proba(X_test)[:, :] + elif (model_name == ""SGDClassifier""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SGDClassifier(**model_params) + calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_svc.fit(X_train, y_train) + basepredict = basemodel.predict(X_test) + predprob_base = calibrated_svc.predict_proba(X_test)[:, :] + else: + base_mdl = basemodel(**model_params) + basemodelfit = base_mdl.fit(X_train, y_train) + basepredict = base_mdl.predict(X_test) + predprob_base=base_mdl.predict_proba(X_test)[:, :] + + acc_score=accuracy_score(y_test, y_t_pred) + test_accuracy_perc=round(100*acc_score) + + ''' + bbm_c_plot = plot_risk_vs_rejection_rate( + y_true=y_test, + y_prob=predprob_base, + selection_scores=y_t_score, + y_pred=y_t_pred, + plot_label=['UQ_risk_vs_rejection'], + risk_func=accuracy_score, + num_bins = 10 ) + + # This done by kiran, need to uncomment for GUI integration. + try: + bbm_c_plot_sub = bbm_c_plot[4] + bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png') + riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png') + except Exception as e: + print(e) + pass + riskPlot = '' + ''' + riskPlot = '' + ''' + try: + re_plot=plot_reliability_diagram(y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + plot_label=['UQModel reliability_diagram'], + num_bins=10) + # This done by kiran, need to uncomment for GUI integration. + re_plot_sub = re_plot[4] + # re_plot_sub = re_plot + re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png') + reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png') + except Exception as e: + print(e) + pass + reliability_plot = '' + ''' + reliability_plot = '' + uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + selection_scores=y_t_score, + attributes=None, + risk_func=accuracy_score,subgroup_ids=None, return_counts=False, + num_bins=10) + uq_aurrrc=uq_aurrrc + test_accuracy_perc=round(test_accuracy_perc) + + #metric_all=compute_classification_metrics(y_test, y_prob, option='all') + metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') + #expected_calibration_error + uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False) + uq_aurrrc=uq_aurrrc + confidence_score=acc_score-uq_ece + + ece_confidence_score=round(confidence_score,2) + # Model uncertainty using ECE score + # model_uncertainty_ece = 1-ece_confidence_score + # #print(""model_uncertainty1: \\n"",model_uncertainty_ece) + + #Uncertainty Using model inherent predict probability + mean_predprob_total=np.mean(predprob_base) + model_uncertainty = 1-mean_predprob_total + model_confidence=mean_predprob_total + model_confidence = round(model_confidence,2) + + + # To get each class values and uncertainty + outuq = self.classUncertainty(predprob_base) + # Another way to get conf score + model_uncertainty_per" +"=round((model_uncertainty*100),2) + # model_confidence_per=round((model_confidence*100),2) + model_confidence_per=round((ece_confidence_score*100),2) + acc_score_per = round((acc_score*100),2) + uq_ece_per=round((uq_ece*100),2) + + output={} + recommendati" +"() + for x in y_hat_lb: + y_hat_ub.append(x) + total_pi=y_hat_ub + medium_UQ_range = y_hat_ub + best_UQ_range= y_hat.tolist() + ymean_upper=[] + ymean_lower=[] + y_hat_m=y_hat.tolist() + for i in y_hat_m: + y_hat_m_range= (i*20/100) + x=i+y_hat_m_range + y=i-y_hat_m_range + ymean_upper.append(x) + ymean_lower.append(y) + + + min_best_uq_dist=round(min(best_UQ_range)) + max_best_uq_dist=round(max(best_UQ_range)) + # initializing ranges + list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) + list_best = y_hat_m + ''' + print(X_test) + print(X_test) + X_test = np.squeeze(X_test) + print(x_feature) + ''' + uq_dict = pd.DataFrame(X_test) + #print(uq_dict) + uq_dict['Observed'] = y_test + uq_dict['Best_values'] = y_hat_m + uq_dict['Best__upper'] = ymean_upper + uq_dict['Best__lower'] = ymean_lower + uq_dict['Total_Low_PI'] = y_hat_lb + uq_dict['Total_Upper_PI'] = upper_bound + ''' + uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, + 'Best__upper':ymean_upper, + 'Best__lower':ymean_lower, + 'Total_Low_PI': y_hat_lb, + 'Total_Upper_PI': upper_bound, + }''' + uq_pred_df = pd.DataFrame(data=uq_dict) + uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') + uq_pred_df_sorted.to_csv(str(self.Deployment)+""/uq_pred_df.csv"",index = False) + csv_path=str(self.Deployment)+""/uq_pred_df.csv"" + df=pd.read_csv(csv_path) + + # self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.') + # confidenceplot = self.aion_confidence_plot(df) + # output['Confidence Plot']= confidenceplot + uq_jsonobject = json.dumps(output) + print(""UQ regression problem training completed...\\n"") + return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject + + except Exception as inst: + print('-------',inst) + exc = {""status"":""FAIL"",""message"":str(inst).strip('""')} + out_exc = json.dumps(exc) + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#System imports +import logging +import os +import sys +import pickle + +#Sci-Tools imports +import numpy as np +import pandas as pd +from sklearn.preprocessing import LabelEncoder +from scipy import stats +from word2number import w2n + + +#river imports +from river.preprocessing import StatImputer +from river import stats, compose, anomaly + + + +class incProfiler(): + + def __init__(self): + self.DtypesDic={} + self.pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.allNumberTypeCols = [] #all number type columns + self.allNumCols = [] #only numerical columns which includes num features and target if it is numerical + self.allCatCols = [] + self.numFtrs = [] + self.catFtrs = [] + self.textFtrs = [] + self.textVectorFtrs = [] + self.numDiscreteCols = [] + self.numContinuousCols = [] + self.wordToNumericFeatures=[] + self.emptyCols=[] + self.missingCols = [] + self.targetColumn = """" + + self.le_dict = {} + self.configDict = {} + self.incFill = None + self.incLabelMapping = None + self.incCatEncoder = None + self.incScaler = None + self.incOutlierRem = None + self.log = logging.getLogger('eion') + + + + def pickleDump(self, model, path): + if model is not None: + with open(path, 'wb') as f: + pickle.dump(model, f) + + + def saveProfilerModels(self, deployLocation): + if isinstance(self.incFill['num_fill'], StatImputer) or isinstance(self.incFill['cat_fill'], StatImputer): + self.pickleDump(self.incFill, os.path.join(deployLocation,'production','profiler','incFill.pkl')) + self.pickleDump(self.incLabelMapping, os.path.join(deployLocation,'production','profiler','incLabelMapping.pkl')) + self.pickleDump(self.incCatEncoder, os.path.join(deployLocation,'production','profiler','incCatEncoder.pkl')) + self.pickleDump(self.incScaler, os.path.join(deployLocation,'production','profiler','incScaler.pkl')) + self.pickleDump(self.incOutlierRem, os.path.join(deployLocation,'production','profiler','incOutlierRem.pkl')) + + + + + def featureAnalysis(self, df, conf_json, targetFeature): + try: + self.log.info('-------> Remove Duplicate Rows') + noofdplicaterows = df.duplicated(keep='first').sum() + df = df.drop_duplicates(keep=""first"") + df = df.reset_index(drop=True) + self.log.info('Status:- |... Duplicate row treatment done: '+str(noofdplicaterows)) + self.log.info(df.head(5)) + self.log.info( '\\n----------- Inspecting Features -----------') + ctn_count = 0 + df = df.replace('-', np.nan) + df = df.replace('?', np.nan) + dataFDtypes=self.dataFramecolType(df) + numerical_ratio = float(conf_json['numericFeatureRatio']) + categoricalMaxLabel = int(conf_json['categoryMaxLabel']) + indexFeatures = [] + numOfRows = df.shape[0] + dataCols = df.columns + for item in dataFDtypes: + if(item[1] == 'object'): + filteredDf,checkFlag = self.smartFilter(item[0],df,numerical_ratio) + if(checkFlag): + self.wordToNumericFeatures.append(item[0]) + self.log.info('----------> Data Type Converting to numeric :Yes') + try: + df[item[0]]=filteredDf[item[0]].astype(float) + except: + pass + ctn_count = ctn_count+1 + else: + count = (df[item[0]] - df[item[0]].shift() == 1).sum() + if((numOfRows - count) == 1): + self.log.info( '-------> Feature :'+str(item[0])) + self.log.info('----------> Sequence Feature') + indexFeatures.append(item[0]) + self.configDict['wordToNumCols'] = self.wordToNumericFeatures + self.configDict['emptyFtrs'] = indexFeatures + self.log.info('Status:- |... Feature inspection done for numeric data: '+str(ctn_count)+' feature(s) converted to numeric') + self.log.info('Status:- |... Feature word to numeric treatment done: '+str(self.wordToNumericFeatures)) + self.log.info( '----------- Inspecting Features End -----------\\n') + except Exception as inst: + self.log.info(""Error in Feature inspection: ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + try: + self.log.info('\\n---------- Dropping Index features ----------') + self.log.info('Index Features to remove '+str(indexFeatures)) + if len(indexFeatures) > 0: + dataCols = list(set(dataCols) - set(indexFeatures)) + for empCol in indexFeatures: + self.log.info('-------> Drop Feature: '+empCol) + df = df.drop(columns=[empCol]) + self.log.info('---------- Dropping Index features End----------\\n') + + + dataFDtypes=self.dataFramecolType(df) + categoricalMaxLabel = int(conf_json['categoryMaxLabel']) + for item in dataFDtypes: + self.DtypesDic[item[0]] = item[1] + nUnique=len(df[item[0]].unique().tolist()) + if item[1] in self.pandasNumericDtypes: + self.allNumberTypeCols.append(item[0]) + if nUnique >= categoricalMaxLabel: + self.allNumCols.append(item[0]) #pure numerical + if item[1] in ['int16', 'int32', 'int64']: + self.numDiscreteCols.append(item[0]) + elif item[1] in ['float16', 'float32', 'float64']: + self.numContinuousCols.append(item[0]) + else: + self.allCatCols.append(item[0]) + elif item[1] != 'bool': + if (nUnique >= categoricalMaxLabel) and targetFeature != item[0]: + self.textFtrs.append(item[0]) + else: + col = item[0] + if (max(df[col].astype(str).str.split().str.len()) > 10) and targetFeature != item[0]: + self.textFtrs.append(item[0]) + else: + self.allCatCols.append(item[0]) + else: + self.allCatCols.append(item[0]) + misval_ratio = float(conf_json['misValueRatio']) + self.configDict['misval_ratio'] = misval_ratio + missingCols, emptyCols = self.getMissingVals(df, dataCols, misval_ratio) + if targetFeature in emptyCols: + raise Exception('Target column '+str(targetFeature)+' cannot be empty') + dataCols = list(set(dataCols) - set(emptyCols)) + self.log.info('\\n---------- Dropping empty features ----------') + for empCol in emptyCols: + self.log.info('-------> Drop Feature: '+empCol) + df = df.drop(columns=[empCol]) + self.log.info('---------- Dropping empty features End----------\\n') + self.log.info('Status:- |... Empty feature treatment done: '+str(len(emptyCols))+' empty feature(s) found') + self.log.info('-------> Data Frame Shape After Dropping (Rows,Columns): '+str(df.shape)) + self.allNumCols = list(set(self.allNumCols) - set(emptyCols)) + self.allCatCols = list(set(self.allCatCols) - set(emptyCols)) + self.textFtrs = list(set(self.textFtrs) - set(emptyCols)) + missingValFtrs = list(set(missingCols) - set(emptyCols)) + self.log.info(str(len(missingValFtrs))+' feature(s) found with missing value(s)') + self.log.info('\\n-------> Numerical continuous columns :'+(str(self.numContinuousCols))[:500]) + self.log.info('-------> Numerical discrete columns :'+(str(self.numDiscreteCols))[:500]) + self.log.info('-------> Non numerical columns :'+(str(self.allCatCols))[:500]) + self.log.info('-------> Text columns :'+(str(self.textFtrs))[:500]) + except Exception as inst: + self.log.info(""Error in segregating numerical and categorical columns: ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f" +"_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return df, missingValFtrs, emptyCols, dataCols, self.allNumCols, self.allCatCols, self.textFtrs + + + + def createIncProfiler(self, df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs): + self.incLabelMapping = None + catFtrs = allCatCols.copy() + #LabelEncoding + if self.targetColumn in allCatCols: + catFtrs.remove(self.targetColumn) + self.incLabelMapping = LabelEncoder() + df[self.targetColumn] = df[self.targetColumn].apply(str) + self.incLabelMapping.fit(df[self.targetColumn]) + self.le_dict = dict(zip(self.incLabelMapping.classes_, self.incLabelMapping.transform(self.incLabelMapping.classes_))) + self.log.info('----------> Encoded Values of Target Labels: '+(str(self.le_dict))[:500]) + + #self.incFill --> {num_fill:SI/0.0/'drop', cat_fill:SI/0.0/'drop'} + #fill + self.incFill = {} + self.incCatEncoder = None + self.incScaler = None + self.incOutlierRem = None + num_fill_method = 'Mean' + for x in list(conf_json['numericalFillMethod'].keys()): + if conf_json['numericalFillMethod'][x] == 'True': + num_fill_method = x + break + if num_fill_method.lower() =='mean': + num_fill = [(col, stats.Mean()) for col in allNumCols] + self.incFill['num_fill'] = StatImputer(*num_fill) + elif num_fill_method.lower() =='min': + num_fill = [(col, stats.Min()) for col in allNumCols] + self.incFill['num_fill'] = StatImputer(*num_fill) + elif num_fill_method.lower() == 'max': + num_fill = [(col, stats.Max()) for col in allNumCols] + self.incFill['num_fill'] = StatImputer(*num_fill) + elif num_fill_method.lower() =='zero': + self.incFill['num_fill'] = 'zero' + elif num_fill_method.lower() =='drop': + self.incFill['num_fill'] = 'drop' + else: + num_fill = [(col, stats.Mean()) for col in allNumCols] + self.incFill['num_fill'] = StatImputer(*num_fill) + + cat_fill_method = 'Mode' + for x in list(conf_json['categoricalFillMethod'].keys()): + if conf_json['categoricalFillMethod'][x] == 'True': + cat_fill_method = x + break + if cat_fill_method.lower() =='zero': + self.incFill['cat_fill'] = 'zero' + elif cat_fill_method.lower() == 'mode': + cat_fill = [(col, stats.Mode()) for col in allCatCols] + self.incFill['cat_fill'] = StatImputer(*cat_fill) + elif cat_fill_method.lower() =='drop': + self.incFill['cat_fill'] = 'drop' + + #CatEncoding + for x in list(conf_json['categoryEncoding'].keys()): + if conf_json['categoryEncoding'][x] == 'True': + catEncoder = x + break + catEncHow = 'Mean' + for x in list(conf_json['targetEncodingParams']['how'].keys()): + if conf_json['targetEncodingParams']['how'][x] == 'True': + catEncHow = x + break + if self.targetColumn in catFtrs: + catFtrs.remove(self.targetColumn) + if len(catFtrs) > 0: + from river.feature_extraction import TargetAgg + if catEncHow.lower() == 'mean': + agg_stat = stats.Mean() + if catEncHow.lower() == 'bayesianmean' or catEncHow.lower() == 'bayesian mean': + agg_stat = stats.BayesianMean(prior=0.5, prior_weight=50) + self.incCatEncoder = TargetAgg( + by=catFtrs[0], how=agg_stat) + for col in catFtrs[1:]: + self.incCatEncoder += TargetAgg( + by=col, how=agg_stat) + self.incCatEncoder|= compose.Discard(*catFtrs) + + #Scaling + normalization_status = 'False' + normalization_method = """" + if 'normalization' in conf_json: + nor_supported_methods = conf_json['normalization'] + for k in nor_supported_methods.keys(): + if conf_json['normalization'][k].lower() == 'true': + normalization_status='True' + normalization_method =k + break + if normalization_status.lower() == ""true"" and len(numFtrs) > 0: + from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler + if self.targetColumn in numFtrs: + numFtrs.remove(self.targetColumn) + if normalization_method.lower() =='standardscaler': + self.incScaler = StandardScaler() + elif normalization_method.lower() =='minmaxscaler' or normalization_method.lower() =='minmax': + self.incScaler = MinMaxScaler() + elif normalization_method.lower() =='maxabsscaler' or normalization_method.lower() =='maxabs': + self.incScaler = MaxAbsScaler() + else: + self.incScaler = None + + #OutlierRemoval + outlier_status = 'False' + outlier_method = 'None' + for x in list(conf_json['outlierDetection'].keys()): + if conf_json['outlierDetection'][x] == 'True': + outlier_method = x + outlier_status = 'True' + break + if outlier_status and numFtrs: + outlierMethodNames = list(conf_json['outlierDetectionParams'].keys()) + if outlier_method.lower() == 'oneclasssvm' or outlier_method.lower() == 'one class svm': + for x in outlierMethodNames: + if x[0].lower() == 'o': + key = x + break + params = conf_json['outlierDetectionParams'][key] + self.log.info('<--- one class SVM with quantile filter --->') + self.incOutlierRem = anomaly.QuantileFilter(anomaly.OneClassSVM(nu=float(params['nu'])),q=float(params['q'])) + elif outlier_method.lower() =='halfspacetrees' or outlier_method.lower() =='half space trees': + for x in outlierMethodNames: + if x[0].lower() == 'h': + key = x + break + params = conf_json['outlierDetectionParams'][key] + self.log.info('<--- Half space trees with quantile filter --->') + self.incOutlierRem = anomaly.QuantileFilter(anomaly.HalfSpaceTrees(n_trees=int(params['n_trees']),height=int(params['height']), window_size=int(params['window_size'])) ,q=float(params['q'])) + else: + self.log.info(""No method is provided for outlier analysis"") + + + + def getMissingVals(self,dataframe,columns,misval_ratio): + try: + self.log.info( '\\n----------- Detecting for Missing Values -----------') + nonNAArray=[] + numOfRows = dataframe.shape[0] + for i in columns: + numNa=dataframe.loc[(pd.isna(dataframe[i])),i ].shape[0] + nonNAArray.append(tuple([i,numNa])) + for item in nonNAArray: + numofMissingVals = item[1] + if(numofMissingVals !=0): + self.log.info('-------> Feature '+str(item[0])) + self.log.info('----------> Number of Empty Rows '+str(numofMissingVals)) + self.missingCols.append(item[0]) + + if(numofMissingVals >= numOfRows * misval_ratio): + self.log.info('----------> Empty: Yes') + self.log.info('----------> Permitted Rows: '+str(int(numOfRows * misval_ratio))) + self.emptyCols.append(item[0]) + if(len(self.missingCols) !=0): + self.log.info( '----------- Detecting for Missing Values End -----------\\n') + return self.missingCols, self.emptyCols + else: + self.log.info( '-------> Missing Value Features :Not Any') + self.log.info( '----------- Detecting for Missing Values End -----------\\n') + return self.missingCols, self.emptyCols + except Exception as e: + self.log.info(""getMissingVals failed ==>"" +str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return None, None + + + def startIncProfiler(self,df,conf_json,targetFeature,deployLocation,problemType): + try: + self.targetColumn = targetFeature + df, missingValFtrs, emptyFtrs, dataFtrs, allNumCols, allCatCols, textFtrs = self.featureAnalysis(df, conf_json, self.targetColumn) + if len(textFtrs)>0: + self.log.info('Text Features are not supported. Dropping '+str(textFtrs)[:500]) + df = df.drop(columns=textFtrs) + catFtrs = allCatCols.copy() + numFtrs = allNumCols.copy() + if self.targetColumn in catFtrs: + catFtrs.remove(self.targetColumn) + + if targetFeature in allNumCols: + numFtrs.remove(targetFeature) + + self.configDict['targetCol'] = self.targetColumn + self.configDict['numFtrs'] = numFtrs + self.configDict['catFtrs'] = catFtrs + self.configDict['allNumCols'] = allNumCols + self.configDict['allCatCols'] = allCatCols + self.configDict['allFtrs'] = numFtrs+catFtrs + + + try: + self.log.info('\\n---------- Creating Incremental profiler models ----------') + self.createIncProfiler(df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs) + self.log.info('\\n--------- Incremental profiler models have been created ---------') + except Exception as inst: + self.log.info(""Error in creating Incremental profiler models""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + raise + + try: + #mvt + # if missingValFtrs: + if self.incFill['num_fill'] == 'drop': + df = df.dropna(axis = 0, subset=allNumCols) + self.configDict['num_fill'] = 'drop' + elif self.incFill['num_fill'] == 'zero': + df[allNumCols] = df[allNumCols].fillna(value = 0.0) + self.configDict['num_fill'] = 'zero' + else: + df = df.astype(object).where(df.notna(), None) + df[allNumCols]= df[allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill + ['num_fill']), axis='columns') + self.configDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in allNumCols} + + if self.incFill['cat_fill'] == 'drop': + df = df.dropna(axis = 0, subset=allCatCols) + self.configDict['cat_fill'] = 'drop' + elif self.incFill['cat_fill'] == 'zero': + df[allCatCols] = df[allCatCols].fillna(value = 0.0) + self.configDict['cat_fill'] = 'zero' + else: + df = df.astype(object).where(df.notna(), None) + df[allCatCols]= df[allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['cat_fill']), axis='columns') + " +" self.configDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in allCatCols} + + self.log.info('\\nStatus:- |... Missing value treatment done') + except Exception as inst: + self.log.info(""Error in Missing value treatment ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + raise + + try: + #labelenc + if self.incLabelMapping: + df[targetFeature] = self.incLabelMapping.transform(df[targetFeature]) + # self.configDict['labelMapping'] = self.le_dict + except Exception as inst: + self.log.info(""Error in Label mapping ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + raise + + try: + #catenc + if self.incCatEncoder: + self.log.info('\\n--------- Converting Non Numerical Categorical Features to Numerical Features ---------') + self.encTarget = targetFeature + if problemType.lower() == 'regression': + from sklearn.preprocessing import StandardScaler + sc = StandardScaler() + self.encTarget = 'scaledTarget' + df['scaledTarget'] = sc.fit_transform(df[targetFeature].to_numpy().reshape(-1,1)) + encCols = catFtrs.copy() + encCols.append(self.encTarget) + self.configDict['encCols'] = encCols + self.configDict['encTarget'] = self.encTarget + transformed_data = df[encCols].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns') + if targetFeature in transformed_data.columns: + transformed_data.drop(targetFeature, inplace=True, axis = 1) + if problemType.lower() == 'regression': + df.drop('scaledTarget', inplace=True, axis = 1) + df[catFtrs] = transformed_data + # self.log.info('Status:- |... Target Encoding state is as follows: ') + self.configDict['catEnc'] = [] + if len(catFtrs) == 1: + col = catFtrs[0] + self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()}) + else: + for i, col in enumerate(catFtrs): + if i==0: + no = '' + else: + no = str(i) + self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()}) + # print(self.incCatEncoder['TransformerUnion']['TargetAgg'].state) + # self.log.info(self.incCatEncoder) + self.log.info('Status:- |... Categorical to numeric feature conversion done: '+str(len(catFtrs))+' features converted') + + except Exception as inst: + self.log.info(""Error in categorical encoding ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + raise + + try: + #scaler + if self.incScaler: + self.log.info(""\\n---------- Data Normalization has started ----------"") + self.incScaler = self.incScaler.partial_fit(df[numFtrs]) + df[numFtrs] = self.incScaler.transform(df[numFtrs]) + self.log.info( ""---------- Normalization Done on Following features ----------"") + self.log.info(numFtrs) + self.log.info('Status:- |... Normalization treatment done') + except Exception as inst: + self.log.info(""Error in normalization ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + raise + + try: + #outlierrem + if self.incOutlierRem: + self.log.info('\\n---------- Performing outlier analysis ----------') + df = df[df[numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)] + self.log.info('\\n <--- dataframe after outlier analysis --->') + df.reset_index(drop=True, inplace=True) + self.log.info(df.head(5)) + self.log.info('Status:- |... Outlier treatment done') + self.log.info('\\n <--- shape of dataframe after outlier analysis --->') + self.log.info(df.shape) + except Exception as inst: + self.log.info(""Error in outlier treatment ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + raise + + #saveprofiler + self.log.info('\\n---------- Saving profiler models ----------') + self.saveProfilerModels(deployLocation) + self.log.info('<--- Profiler models saved at '+deployLocation+' --->') + return df,targetFeature,missingValFtrs,numFtrs,catFtrs,self.le_dict,self.configDict,textFtrs,emptyFtrs,self.wordToNumericFeatures + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + def transformData(self, df, targetFeature, missingValFtrs,numFtrs, catFtrs, textFtrs): + try: + df = df.drop_duplicates(keep=""first"") + df = df.reset_index(drop=True) + df = df.replace('-', np.nan) + df = df.replace('?', np.nan) + text_mv_cols = list(set(missingValFtrs).intersection(set(textFtrs))) + if len(text_mv_cols)>0: + df[text_mv_cols] = df[text_mv_cols].fillna(value = 'NA') + if 'num_fill' in self.configDict: + if self.configDict['num_fill'] == 'drop': + df = df.dropna(axis = 0, subset=self.allNumCols) + elif self.configDict['num_fill'] == 'zero': + df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0) + else: + for x in self.allNumCols: + df[x] = df[x].fillna(value = self.configDict['num_fill'][x]) + if 'cat_fill' in self.configDict: + if self.configDict['cat_fill'] == 'drop': + df = df.dropna(axis = 0, subset=self.allCatCols) + elif self.configDict['cat_fill'] == 'zero': + df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0) + else: + for x in self.allCatCols: + df[x] = df[x].fillna(value = self.configDict['cat_fill'][x]) + + if self.incLabelMapping: + df[targetFeature] = self.incLabelMapping.transform(df[targetFeature]) + + if self.incCatEncoder: + transformed_data = df[catFtrs].apply(lambda row: self.apply_enc(row.to_dict(), isTrain=False), axis='columns') + df[catFtrs] = transformed_data + + if self.incScaler: + df[numFtrs] = self.incScaler.transform(df[numFtrs]) + + return df + except Exception as inst: + self.log.info(""Error: DataProfiling transformation failed ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + def checknumStr(self,dataframe,feature): + try: + dataframe[feature]=dataframe[feature].apply(lambda x: self.testStr(x)) + return dataframe + except: + self.log.info(""checknumStr failed"") + return dataframe + + #test whether the value is numeric /string + def testStr(self,value): + try: + x=eval(value) + return np.nan + except: + return value + + """""" + Missing values analysis + Detects number of missing values in each column of dataframe + """""" + def checksRows(self,dataframe,target_column,dataColumns): + self.log.info( '\\n----------- Checking Target Feature Empty Rows -----------') + if self.targetColumn != '': + numNa=dataframe.loc[(pd.isna(dataframe[self.targetColumn])),self.targetColumn].shape[0] + self.log.info('------->No of Empty Rows in Target Fields: '+str(numNa)) + if numNa >0: + self.log.info('-------> Remove Empty Target Field Rows') + dataframe = dataframe.dropna(axis=0, subset=[self.targetColumn]) + self.log.info('-------> Remove Duplicate Rows') + dataframe = dataframe.dropna(axis=0,how='all',subset=dataColumns) + noofdplicaterows = dataframe.duplicated(keep='first').sum() + dataframe = dataframe.drop_duplicates(keep=""first"") + dataframe = dataframe.reset_index(drop=True) + return dataframe,noofdplicaterows + + + + def apply_river_model(self, x, profModel): + profModel.learn_one(x) + return pd.Series(profModel.transform_one(x)) + + def apply_enc(self, x, isTrain=True): + if isTrain: + y = x[self.encTarget] + self.incCatEncoder.learn_one(x, y) + return pd.Series(self.incCatEncoder.transform_one(x)) + + def apply_od_pipe(self, x): + score = self.incOutlierRem.score_one(x) + is_anomaly = self.incOutlierRem.classify(score) + self.incOutlierRem.learn_one(x) + return is_anomaly + + #Convert Words To Number + def s2n(self,value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan + + def convertWordToNumeric(self,dataframe,feature): + try: + dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) + return dataframe + except Exception as inst: + self.log.info(""convertWordToNumeric Failed ===>""+str(inst)) + return dataframe + + + #test whether the value is numeric /string + def testNum(self,value): + try: + x=eval(value) + return x + except: + return np.nan + + ##check for numeric values in string column + def checkNumeric(self,dataframe,feature): + try: + dataframe[feature]=dataframe[feature].apply(lambda x: self.testNum(x)) + return dataframe + except Exception as inst: + self.log.info(""checkNumeric Failed ===>""+str(inst)) + return dataframe + + + def smartFilter(self,feature,df,numericRatio): + try: + distinctCount = len(df[feature].unique()) + numOfRows = df.shape[0] + tempDataFrame=df.copy(deep=True) + if(distinctCount != 1): + self.log.info('-------> Feature :'+str(feature)) + testDf = self." +"checkNumeric(tempDataFrame,feature) + tempDf = testDf[feature] + tempDf = tempDf.dropna() + numberOfNonNullVals = tempDf.count() + if(numberOfNonNullVals > int(numOfRows * numericRatio)): + tempDataFrame=df.copy(deep=True) + testDf = self.convertWordToNumeric(tempDataFrame,feature) + tempDf = testDf[feature] + tempDf = tempDf.dropna() + self.log.info('----------> Numeric Status :Yes') + return testDf,True + else: + #Wasnt't a numerical feature + self.log.info('----------> Numeric Status :No') + #numDf = self.checknumStr(df,feature) + return df,False + else: + self.log.info( '\\n---> No Numerics found in :' +str(feature)) + return df,False + + except: + self.log.info( '\\n---> No Numerics found in :'+str(feature)) + return df,False + + def dataFramecolType(self,dataFrame): + dataFDtypes=[] + try: + dataColumns=list(dataFrame.columns) + for i in dataColumns: + dataType=dataFrame[i].dtypes + dataFDtypes.append(tuple([i,str(dataType)])) + return dataFDtypes + except: + self.log.info(""error in dataFramecolyType"") + return dataFDtypes + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +warnings.filterwarnings('ignore') +import logging +import sklearn +from random import sample +from numpy.random import uniform +import numpy as np +import math +import pickle +import os +import json +from math import isnan +from sklearn.preprocessing import binarize +from sklearn.preprocessing import LabelEncoder +import pandas as pd +from sklearn.preprocessing import LabelBinarizer +from sklearn.model_selection import train_test_split +from incremental.incClassificationModel import incClassifierModel +from incremental.incRegressionModel import incRegressionModel + + + +class incMachineLearning(object): + def __init__(self,mlobj): + self.features=[] + self.mlobj=mlobj + self.log = logging.getLogger('eion') + + + + + def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps): + model = 'None' + params = 'None' + score = 0xFFFF + estimator = None + model_tried = '' + threshold = -1 + pscore = -1 + rscore = -1 + topics = {} + if(targetColumn != ''): + targetData = dataFrame[targetColumn] + datacolumns=list(dataFrame.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + + + scoreParam = self.mlobj.setScoreParams(scoreParam,modelType,categoryCountList) + + + self.log.info('\\n-------------- Training ML: Start --------------') + model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method,incObj=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps) + self.log.info('-------------- Training ML: End --------------\\n') + + filename = os.path.join(deployLocation,'production','model',model+'.pkl') + saved_model = model+'.pkl' + pickle.dump(estimator, open(filename, 'wb')) + df_test = xtest.copy() + df_test.reset_index(inplace = True,drop=True) + trainPredictedData = incObj.bestTrainPredictedData + predictedData = incObj.bestPredictedData + try: + if(model_type == 'Classification'): + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + train_matrix = self.mlobj.getClassificationPerformaceMatrix(ytrain,trainPredictedData,labelMaps) + self.log.info('--------- Performance Matrix with Train Data End ---------\\n') + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performancematrix = self.mlobj.getClassificationPerformaceMatrix(ytest,predictedData,labelMaps) + ytest.reset_index(inplace=True,drop=True) + df_test['actual'] = ytest + df_test['predict'] = predictedData + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + + matrix = performancematrix + elif(model_type == 'Regression'): + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + train_matrix = self.mlobj.get_regression_matrix(ytrain, trainPredictedData) + self.log.info('--------- Performance Matrix with Train Data End ---------\\n') + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + matrix = self.mlobj.get_regression_matrix(ytest, predictedData) + ytest.reset_index(inplace=True, drop=True) + df_test['actual'] = ytest + df_test['predict'] = predictedData + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + + except Exception as Inst: + self.log.info('--------- Error Performance Matrix ---------\\n') + self.log.info(str(Inst)) + df_test['predict'] = predictedData + matrix = """" + train_matrix = """" + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + df_test.to_csv(predicted_data_file) + return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,self.features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params + + + + + def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps): + matrix = '' + threshold = -1 + pscore = -1 + rscore = -1 + datacolumns=list(xtrain.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + self.features =datacolumns + self.log.info('-------> Features Used For Training the Model: '+(str(self.features))[:500]) + xtrain = xtrain[self.features] + xtest = xtest[self.features] + method = mlconfig['optimizationMethod'] + method = method.lower() + geneticParam = '' + optimizationHyperParameter = mlconfig['optimizationHyperParameter'] + cvSplit = optimizationHyperParameter['trainTestCVSplit'] + nIter = int(optimizationHyperParameter['iterations']) + if(method.lower() == 'genetic'): + geneticParam = optimizationHyperParameter['geneticparams'] + scoreParam = scoreParam + if 'thresholdTunning' in mlconfig: + thresholdTunning = mlconfig['thresholdTunning'] + else: + thresholdTunning = 'NA' + if cvSplit == """": + cvSplit =None + else: + cvSplit =int(cvSplit) + + if modelType == 'classification': + model_type = ""Classification"" + MakeFP0 = False + MakeFN0 = False + if(len(categoryCountList) == 2): + if(thresholdTunning.lower() == 'fp0'): + MakeFP0 = True + elif(thresholdTunning.lower() == 'fn0'): + MakeFN0 = True + noOfClasses= len(labelMaps) + incObjClf = incClassifierModel(noOfClasses,modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation) + model, params, score, estimator,model_tried,threshold,pscore,rscore = incObjClf.firstFit() + incObj = incObjClf + + elif modelType == 'regression': + model_type = ""Regression"" + incObjReg = incRegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation) + model,params,score,estimator,model_tried = incObjReg.firstFit() + incObj = incObjReg + + + return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, incObj ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from learner.optimizetechnique import OptimizationTq +from learner.parameters import parametersDefine +import time +import logging +import os +import sys +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +from learner.aion_matrix import aion_matrix + + +class incRegressionModel(): + def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation): + self.modelList =modelList + self.params =params + self.trainX =trainX + self.trainY =trainY + self.testX = testX + self.testY = testY + self.method =method + self.scoreParam=scoreParam + self.cvSplit=cvSplit + self.numIter=numIter + self.geneticParam=geneticParam + self.log = logging.getLogger('eion') + self.deployLocation = deployLocation + self.bestTrainPredictedData = None + self.bestPredictedData = None + self.AlgorithmNames={'Online Linear Regression':'Online Linear Regression', 'Online Decision Tree Regressor':'Online Decision Tree Regressor', 'Online KNN Regressor':'Online KNN Regressor'} + self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} + + + + def firstFit(self): + bestModel='' + bestParams={} + import sys + bestScore=-sys.float_info.max #bugfix 11656 + scoredetails = '' + self.log.info('\\n---------- Regression Model has started ----------') + try: + for modelName in self.modelList: + if modelName not in self.params: + continue + paramSpace=self.params[modelName] + algoName = self.AlgorithmNames[modelName] + from incremental.riverML import riverML + riverMLObj = riverML() + self.log.info(""-------> Model Name: ""+str(modelName)) + start = time.time() + model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('regression',algoName,paramSpace,self.trainX, self.trainY) + modelParams = str(modelParams) + executionTime=time.time() - start + self.log.info('---------> Total Execution: '+str(executionTime)) + predictedData = riverMLObj.getPrediction(estimator,self.testX) + if 'neg_mean_squared_error' in self.scoreParam: + meanssquatederror = mean_squared_error(self.testY,predictedData) + score = meanssquatederror + elif 'neg_root_mean_squared_error' in self.scoreParam: + rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) + score = rootmeanssquatederror + elif 'neg_mean_absolute_error' in self.scoreParam: + meanabsoluteerror=mean_absolute_error(self.testY,predictedData) + score = meanabsoluteerror + elif 'r2' in self.scoreParam: + r2score=r2_score(self.testY,predictedData) + score = round(r2score*100, 2) + if self.scoreParam == ""r2"": + if score>bestScore: + bestScore =score + " +"bestModel =model + bestParams=modelParams + bestEstimator=estimator + self.bestTrainPredictedData = trainPredictedData + self.bestPredictedData = predictedData + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: + bestScore =abs(score) + bestModel =model + bestParams=modelParams + bestEstimator=estimator + self.bestTrainPredictedData = trainPredictedData + self.bestPredictedData = predictedData + metrices = {} + metrices[""score""] = score + + + + if(scoredetails != ''): + scoredetails += ',' + + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""Score"":'+str(abs(score))+'}' + + self.log.info('Status:- |... ML Algorithm applied: '+modelName) + self.log.info(""Status:- |... Testing Score: ""+str(score)) + self.log.info('---------- Regression Model End ---------- \\n') + self.log.info('\\n------- Best Model and its parameters -------------') + self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) + self.log.info(""-------> Best Name: ""+str(bestModel)) + self.log.info(""-------> Best Score: ""+str(bestScore)) + return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails + except Exception as inst: + self.log.info( '\\n-----> regressionModel failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + +''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import time +import os +import sys +import numpy as np +from sklearn.metrics import confusion_matrix +from sklearn.metrics import recall_score +from sklearn.metrics import precision_score +from sklearn.preprocessing import binarize +from learner.optimizetechnique import OptimizationTq +from learner.parameters import parametersDefine +import logging +from learner.aion_matrix import aion_matrix + + +# apply threshold to positive probabilities to create labels +def to_labels(pos_probs, threshold): + return (pos_probs >= threshold).astype('int') +class incClassifierModel(): + def __init__(self,noOfClasses,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation): + self.noOfClasses = noOfClasses + self.modelList =modelList + self.params =params + self.trainX =trainX + self.X =trainX + self.trainY =trainY + self.testX = testX + self.testY = testY + self.method =method + self.scoreParam=scoreParam + self.cvSplit=cvSplit + self.numIter=numIter + self.geneticParam=geneticParam + self.MakeFP0= MakeFP0 + self.MakeFN0=MakeFN0 + self.log = logging.getLogger('eion') + self.modelType = modelType + self.deployLocation = deployLocation + self.isRiverModel = False + self.AlgorithmNames={'Online Logistic Regression':'Online Logistic Regression', 'Online Softmax Regression':'Online Softmax Regression', 'Online Decision Tree Classifier':'Online Decision Tree Classifier', 'Online KNN Classifier':'Online KNN Classifier'} + self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} + + def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): + thresholdx = -1 + for threshold in threshold_range: + predictedData = estimator.predict_proba(testX) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437 + p_score = precision_score(testY, predictedData) + r_score = recall_score(testY, predictedData) + tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() + if(checkParameter.lower() == 'fp'): + if fp == 0: + if(p_score == 1): + thresholdx = threshold + self.log.info('---------------> Best Threshold:'+str(threshold)) + self.log.info('---------------> Best Precision:'+str(p_score)) + self.log.info('---------------> Best Recall:'+str(r_score)) + self.log.info('---------------> TN:'+str(tn)) + self.log.info('---------------> FP:'+str(fp)) + self.log.info('---------------> FN:'+str(fn)) + self.log.info('---------------> TP:'+str(tp)) + break + + if(checkParameter.lower() == 'fn'): + if fn == 0: + if(r_score == 1): + thresholdx = threshold + self.log.info('---------------> Best Threshold:'+str(threshold)) + self.log.info('---------------> Best Precision:'+str(p_score)) + self.log.info('---------------> Best Recall:'+str(r_score)) + self.log.info('---------------> TN:'+str(tn)) + self.log.info('---------------> FP:'+str(fp)) + self.log.info('---------------> FN:'+str(fn)) + self.log.info('---------------> TP:'+str(tp)) + break + return(thresholdx,p_score,r_score) + + def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): + cmodel = False + if(threshold != -1): + if(bestthreshold == -1): + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif fp0: + if rscore > brscore: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif rscore == brscore: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif fn0: + if pscore > bpscore: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif pscore == bpscore: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + else: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + btscore = tscore + else: + if(bestthreshold == -1): + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + btscore = tscore + + return cmodel,btscore,bestthreshold,brscore,bpscore + + + def firstFit(self): + bestModel='None' + bestParams={} + bestScore=-0xFFFF + bestEstimator = 'None' + scoredetails = '' + threshold = -1 + bestthreshold = -1 + precisionscore =-1 + bestprecisionscore=-1 + recallscore = -1 + bestrecallscore=-1 + self.bestTrainPredictedData = None + self.bestPredictedData = None + self.log.info('\\n---------- ClassifierModel has started ----------') + objClf = aion_matrix() + try: + for modelName in self.modelList: + paramSpace=self.params[modelName] + algoName = self.AlgorithmNames[modelName] + from incremental.riverML import riverML + riverMLObj = riverML() + self.log.info(""-------> Model Name: ""+str(modelName)) + start = time.time() + model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('classification',algoName,paramSpace,self.trainX, self.trainY, self.noOfClasses) + modelParams = str(modelParams) + predictedData = riverMLObj.getPrediction(estimator,self.testX) + executionTime=time.time() - start + self.testY.reset_index(inplace=True, drop=True) + score = objClf.get_score(self.scoreParam,self.testY.values.flatten(),predictedData.values.flatten()) + self.log.info(str(score)) + metrices = {} + metrices[""score""] = score + threshold = -1 + precisionscore = precision_score(self.testY, predictedData, average='macro') + recallscore = recall_score(self.testY, predictedData, average='macro') + self.log.info('---------> Total Execution: '+str(executionTime)) + + + if(scoredetails != ''): + scoredetails += ',' + + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""Score"":'+str(score)+'}' + + status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) + if status: + bestScore =bscore + bestModel =model + bestParams=modelParams + bestEstimator=estimator + bestthreshold = threshold + bestrecallscore = recallscore + bestprecisionscore = precisionscore + self.bestTrainPredictedData = trainPredictedData + self.bestPredictedData = predictedData + self.log.info('Status:- |... ML Algorithm applied: '+modelName) + self.log.info(""Status:- |... Testing Score: ""+str(score)) + + self.log.info('---------- ClassifierModel End ---------- \\n') + self.log.info('\\n------- Best Model and its parameters -------------') + self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) + self.log.info(""-------> Best Name: ""+str(bestModel)) + self.log.info(""-------> Best Score: ""+str(bestScore)) + + return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore + except Exception as inst: + self.log.info( '\\n-----> ClassifierModel failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + +import logging +import pickle +import os +import sys +import pandas as pd +from river import stream + +from river.linear_model import LogisticRegression, SoftmaxRegression, LinearRegression +from river.tree import ExtremelyFastDecisionTreeClassifier, HoeffdingAdaptiveTreeRegressor +# from river.ensemble import AdaptiveRandomForestRegressor, AdaptiveRandomForestClassifier +from river.neighbors import KNNClassifier, KNNRegressor +from river.multiclass import OneVsRestClassifier +from river.optim import SGD, Adam, AdaDelta, NesterovMomentum, RMSProp +# from river.optim.losses import CrossEntropy, Log, MultiClassLoss, Poisson, RegressionLoss, BinaryLoss, Huber +# from river.optim.initializers import Normal + + +class riverML(object): + def __init__(self): + self.algoDict={'Online Logistic Regression':LogisticRegression, 'Online Softmax Regression':SoftmaxRegression, 'Online Decision Tree Classifier':ExtremelyFastDecisionTreeClassifier, 'Online KNN Classifier':" +"KNNClassifier,'Online Linear Regression':LinearRegression, 'Online Decision Tree Regressor':HoeffdingAdaptiveTreeRegressor, 'Online KNN Regressor':KNNRegressor} + self.optDict={'sgd': SGD, 'adam':Adam, 'adadelta':AdaDelta, 'nesterovmomentum':NesterovMomentum, 'rmsprop':RMSProp} + self.log = logging.getLogger('eion') + + + + def getPrediction(self, model,X): + testStream = stream.iter_pandas(X) + preds = [] + for (xi,yi) in testStream: + pred = model.predict_one(xi) + preds.append(pred) + return pd.DataFrame(preds) + + + def startLearn(self,problemType,algoName,params,xtrain,ytrain,noOfClasses=None): + try: + model = self.algoDict[algoName] + params = self.parseParams(params, algoName) + if problemType == 'classification': + if noOfClasses>2: + model = OneVsRestClassifier(classifier=model(**params)) + else: + model = model(**params) + else: + model = model(**params) + + trainStream = stream.iter_pandas(xtrain, ytrain) + #head start + for i, (xi, yi) in enumerate(trainStream): + if i>100: + break + if yi!=None: + model.learn_one(xi, yi) + trainPredictedData = [] + trainStream = stream.iter_pandas(xtrain, ytrain) + for i, (xi, yi) in enumerate(trainStream): + if yi!=None: + trainPredictedData.append(model.predict_one(xi)) + model.learn_one(xi, yi) + trainPredictedData = pd.DataFrame(trainPredictedData) + return algoName, params, model, trainPredictedData + except Exception as inst: + self.log.info( '\\n-----> '+algoName+' failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def parseParams(self, params, algoName): + try: + from learner.parameters import parametersDefine + paramsObj = parametersDefine() + paramDict =paramsObj.paramDefine(params,method=None) + paramDict = {k:v[0] for k,v in paramDict.items()} + if algoName=='Online Logistic Regression' or algoName=='Online Softmax Regression' or algoName=='Online Linear Regression': + opt = self.optDict[paramDict.pop('optimizer').lower()] + lr = float(paramDict.pop('optimizer_lr')) + paramDict['optimizer'] = opt(lr) + return paramDict + except Exception as inst: + self.log.info( '\\n-----> Parameter parsing failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + + import json +import sys,os +from pathlib import Path, PurePosixPath +from fabric import Connection +import tarfile +import copy + +from hyperscalers.cloudServer import awsGPUTraining +import time +import shutil +import logging +import multiprocessing +from hyperscalers.mergeLogs import mergeLogs + + +class AION(awsGPUTraining): + + def __init__(self, config): + config['AMAZON_EC2']['InstanceIds'] = [] #removing the support for Instance Id + super().__init__(config) + self.remoteUpload = {} + + def copyDataOnServer(self, index): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDeployLocation'])) + client.put(self.remoteUpload['configFile'], self.remoteUpload['remoteConfigLoc']) + if not Path(self.remoteUpload['dataLoc']).exists(): + raise ValueError("" data location {} does not exist"".format(self.remoteUpload['dataLoc'])) + if Path(self.remoteUpload['dataLoc']).is_file(): + client.put(self.remoteUpload['dataLoc'], self.remoteUpload['remoteDataLoc']) + else: + client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDataLoc'])) + p = Path(self.remoteUpload['dataLoc']).glob('**/*') + files = [x for x in p if x.is_file()] + for file in files: + client.put(file, self.remoteUpload['remoteDataLoc']) + if self.remoteUpload.get('imgCsvLoc', None): + client.put(self.remoteUpload['imgCsvLoc'], self.remoteUpload['remoteDataLoc']) + except Exception as e: + raise ValueError(""Error in copying data to cloud server. "" + str(e)) + + def executeCode(self): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + cmd = '{} {} {}'.format(""/home/ubuntu/aws/venv/aion-env/bin/python3.8"", ""/home/ubuntu/aws/venv/aion-env/lib/python3.8/site-packages/AION/aion.py"", self.remoteUpload['remoteConfigLoc']) + output = client.run( cmd, warn=True) + except Exception as e: + raise ValueError(""Error in running code on cloud server. "" + str(e)) + + def downloadAndExtractModel(self): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + remote = PurePosixPath(self.remoteUpload['remoteDeployLocation']) + fileName = self.remoteUpload['deployName'] + local = Path(self.remoteUpload['localDeployLocation']) + tarFileName = fileName+"".tar.gz"" + cmd = 'cd {};tar -czvf {} -C {}/ {}'.format(remote, tarFileName, remote, fileName) + client.run( cmd) + extractFile = str(local/tarFileName) + client.get( str(remote/tarFileName), extractFile) + with tarfile.open(extractFile, ""r:gz"") as tar: + tar.extractall(local) + Path(extractFile).unlink() + client.run( 'rm -r {}'.format(remote/fileName)) + client.run( 'rm {}'.format(remote/tarFileName)) + + except Exception as e: + raise ValueError(""Error in downloading file from server. "" + str(e)) + def deleteDataOnServer(self): + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + dataPaths = [self.remoteUpload['remoteDataLoc'], self.remoteUpload['remoteDeployLocation'], self.remoteUpload['remoteConfigLoc']] + for loc in dataPaths: + if Path(loc).is_file(): + client.run( 'rm {}'.format(loc)) + else: + client.run( 'rm -r {}'.format(loc)) + + # only for csv files + def updateConfigGetRemoteLoc(self, config, index=0): + remote_location = '/home/ubuntu/aws/usecase' + remoteInputLoc = PurePosixPath(remote_location)/""input"" + remoteOutputLoc = PurePosixPath(remote_location)/""target"" + if Path(config['basic']['dataLocation']).is_dir(): + if Path(config['basic']['folderSettings']['labelDataFile']).parent !=Path(config['basic']['dataLocation']): + self.remoteUpload['imgCsvLoc'] = config['basic']['folderSettings']['labelDataFile'] + config['basic']['folderSettings']['labelDataFile'] = Path(config['basic']['folderSettings']['labelDataFile']).name + csvFile = Path(config['basic']['dataLocation']).name + + localFile = config['basic']['dataLocation'] + localDeployLoc = config['basic']['deployLocation'] + config['basic']['dataLocation'] = str(remoteInputLoc/csvFile) + config['basic']['deployLocation'] = str(remoteOutputLoc) + jsonFile = Path(__file__).parent/'remote_{}.json'.format(index) + with open(jsonFile,""w"") as f: + json.dump(config, f) + self.remoteUpload['remoteDataLoc'] = config['basic']['dataLocation'] + self.remoteUpload['remoteConfigLoc'] = str(remoteInputLoc)+ ""/temp.json"" + self.remoteUpload['remoteDeployLocation'] = config['basic']['deployLocation'] + self.remoteUpload['dataLoc'] = localFile + self.remoteUpload['configFile'] = str(jsonFile) + self.remoteUpload['localDeployLocation'] = localDeployLoc + self.remoteUpload['deployName'] = ""{}_{}"".format(config['basic']['modelName'],config['basic']['modelVersion']) + + def updateDeployPath(self): + import fileinput + logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/""model_training_logs.log"" + self.remoteUpload['localDeployLocation'] = self.remoteUpload['localDeployLocation'].replace('\\\\','/') + if Path(logFile).exists(): + with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file: + for line in file: + remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName'] + localLoc = self.remoteUpload['localDeployLocation'] +'/'+ ""_"".join(self.remoteUpload['deployName'].split('_')[:-1]) + print(line.replace(remoteLoc, localLoc), end='') + logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/""output.json"" + if Path(logFile).exists(): + with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file: + for line in file: + remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName'] + localLoc = self.remoteUpload['localDeployLocation'] +'/'+ ""_"".join(self.remoteUpload['deployName'].split('_')[:-1]) + print(line.replace(remoteLoc, localLoc), end='') + logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/""display.json"" + if Path(logFile).exists(): + with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file: + for line in file: + remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName'] + localLoc = self.remoteUpload['localDeployLocation'] +'/'+ ""_"".join(self.remoteUpload['deployName'].split('_')[:-1]) + print(line.replace(remoteLoc, localLoc), end='') + +def updateUserServerConfig(aws_config): + aws_config['ssh']['keyFilePath'] = str(Path(__file__).parent/""AION_GPU.pem"") + return aws_config + +def getKeyByValue(dictionary, refValue): + for key, value in dictionary.items(): + if value == refValue: + return key + return None + +def getKeysByValue(dictionary, refValue): + keys = [] + for key, value in dictionary.items(): + if value == refValue: + keys.append(key) + return keys + +class openInstancesStatus(): + + def __init__(self): + pass + + def addInstance(self, instanceId, args=None): + fileName = instanceId + '.ec2instance' + data = {} + data[instanceId] = args + with open(fileName, ""w"") as f: + json.dump( data, f, indent=4) #TODO do we need to encrypt + + def removeInstance(self, instanceId): + fileName = instanceId + '.ec2instance' + if Path(fileName).exists(): + Path(fileName).unlink() + + def clearPreviousInstancesState(self): + # check and stop the previous instance + openInstances = Path().glob(""*.ec2instance"") + for file in openInstances: + with open(file, 'r') as f: + data = json.load(f) + prevConfig = list(data.values())[0] + key = Path(file).stem + if prevConfig['AMAZON_EC2']['amiId']: + prevConfig['AMAZON_EC2']['InstanceIds'] = [key] + prevConfig['AMAZON_EC2']['amiId'] = """" # clear amiId + instance = awsGPUTraining(prevConfig) + if len(prevConfig['AMAZON_EC2']['InstanceIds']) > 0: + try: + if instance.is_instance_running(prevConfig['AMAZON_EC2']['InstanceIds'][0]): + instance.stop_server_instance() + except: + pass + self.removeInstance(key) + +class prepareConfig(): + + def __init__(self, config,noOfInstance,ComputeInfrastructure): + if isinstance(config, dict): + self.config = config + self.configDir = Path(__file__).parent + elif isinstance(config, str): + with open(config, 'r') as f: + self.config = json.load(f) + self.configDir = Path(config).parent + else: + raise TypeError(""{} type object is not supported for config"".format(type(config))) + self.problemType = getKeyByValue(self.config['basic']['analysisType'] ,""True"") + self.algorithms = getKeysByValue(self.config['basic']['algorithms'][self.problemType] ,""True"") + self.numInstances = int(noOfInstance) + self.computeInfrastructure = ComputeInfrastructure + self.isMultiInstance = False + self.validateMultiInstance() + self.newConfigs = [] + + def isRemoteTraining(self): + return True if(self.computeInfrastructure == ""True"") else False + + def validateMultiInstance(self): + if self.isRemoteTraining(): + if self.problemType == 'classification' or self.problemType == 'regression': + if self.numInstances > len(self.algorithms): + self.numInstances = len(self.algorithms) + if len(self.algorithms) > 1 and self.numInstances > 1: + self.isMultiInstance = True + + def createNewConfigs(self): + configs = [] + algos" +"= self.algorithms + if len(algos) <= self.numInstances: + self.numInstances = len(algos) + algosPerInstances = (len(algos)+(self.numInstances - 1))//self.numInstances + remainingAlgos = len(algos) + for i in range(self.nu" +"ances = Path().glob(""*.ec2instance"") + for file in openInstances: + with open(file, 'r') as f: + data = json.load(f) + prevConfig = list(data.values())[0] + key = Path(file).stem + if prevConfig['AMAZON_EC2']['amiId']: + prevConfig['AMAZON_EC2']['InstanceIds'] = [key] + prevConfig['AMAZON_EC2']['amiId'] = """" # clear amiId + instance = awsGPUTraining(prevConfig) + if len(prevConfig['AMAZON_EC2']['InstanceIds']) > 0: + try: + if instance.is_instance_running(prevConfig['AMAZON_EC2']['InstanceIds'][0]): + instance.stop_server_instance() + except: + pass + self.removeInstance(key) + +class prepareConfig(): + + def __init__(self, config,noOfInstance,ComputeInfrastructure): + if isinstance(config, dict): + self.config = config + self.configDir = Path(__file__).parent + elif isinstance(config, str): + with open(config, 'r') as f: + self.config = json.load(f) + self.configDir = Path(config).parent + else: + raise TypeError(""{} type object is not supported for config"".format(type(config))) + self.problemType = getKeyByValue(self.config['basic']['analysisType'] ,""True"") + self.algorithms = getKeysByValue(self.config['basic']['algorithms'][self.problemType] ,""True"") + self.numInstances = int(noOfInstance) + self.computeInfrastructure = ComputeInfrastructure + self.isMultiInstance = False + self.validateMultiInstance() + self.newConfigs = [] + + def isRemoteTraining(self): + return True if(self.computeInfrastructure == ""True"") else False + + def validateMultiInstance(self): + if self.isRemoteTraining(): + if self.problemType == 'classification' or self.problemType == 'regression': + if self.numInstances > len(self.algorithms): + self.numInstances = len(self.algorithms) + if len(self.algorithms) > 1 and self.numInstances > 1: + self.isMultiInstance = True + + def createNewConfigs(self): + configs = [] + algos = self.algorithms + if len(algos) <= self.numInstances: + self.numInstances = len(algos) + algosPerInstances = (len(algos)+(self.numInstances - 1))//self.numInstances + remainingAlgos = len(algos) + for i in range(self.numInstances): + newConfig = copy.deepcopy(self.config) + for k,v in newConfig['basic']['algorithms'][self.problemType].items(): + newConfig['basic']['algorithms'][self.problemType][k] = ""False"" + algosPerInstances = remainingAlgos // (self.numInstances - i) + for j in range(algosPerInstances): + newConfig['basic']['algorithms'][self.problemType][algos[len(algos) - remainingAlgos + j]] = ""True"" + newConfig['basic']['modelVersion'] = newConfig['basic']['modelVersion'] + ""_{}"".format(i) + newFileName = str(self.configDir/""splittedConfig_{}.json"".format(i)) + with open(newFileName, 'w') as jsonFile: + json.dump(newConfig, jsonFile, indent=4) + configs.append(newFileName) + remainingAlgos -= algosPerInstances + return configs + +class Process(multiprocessing.Process): + def __init__(self, aws_config, configFile, index, openInstancesLog): + super(Process, self).__init__() + self.index = index + self.aws_config = aws_config + self.configFile = configFile + self.openInstances = openInstancesLog + + def run(self): + log = logging.getLogger('eion') + serverStart = False + try: + server = AION(self.aws_config) + with open(self.configFile,""r"") as f: + config = json.load(f) + server.updateConfigGetRemoteLoc(config, self.index) + instanceId = server.start_instance() + log.info('Status:-|... start instance: {}'.format(instanceId)) + serverStart = True + self.openInstances.addInstance(instanceId, self.aws_config) + time.sleep(40) + log.info('Status:-|... copying data on instance: {}'.format(instanceId)) + server.copyDataOnServer( config) + log.info('Status:-|... Training on instance: {}'.format(instanceId)) + server.executeCode() + log.info('Status:-|... downloading data from instance: {}'.format(instanceId)) + server.downloadAndExtractModel() + server.deleteDataOnServer() + log.info('Status:-|... stopping instance: {}'.format(instanceId)) + server.stop_server_instance() + serverStart = False + self.openInstances.removeInstance(instanceId) + server.updateDeployPath() + except Exception as e: + print(e) + pass + finally: + if serverStart: + log.info('Status:-|... stopping instance: {}'.format(instanceId)) + server.stop_server_instance() + self.openInstances.removeInstance(instanceId) + +def awsTraining(configPath): + try: + # This function responsible for starting the training with AWS + with open(configPath, ""r"") as f: + config = json.load(f) + + ec2 = boto3.resource('ec2',region_name=AWS_Region) + instance_id= instance[0].instance_id + deployFolder = config['basic']['deployLocation'] + iterName = config['basic']['modelName'] + iterVersion = config['basic']['modelVersion'] + dataLocation = config['basic']['dataLocation'] + usecaseLocation = os.path.join(deployFolder,iterName) + if not Path(usecaseLocation).exists(): + os.makedirs(usecaseLocation) + deployLocation = os.path.join(usecaseLocation,iterVersion) + if Path(deployLocation).exists(): + shutil.rmtree(deployLocation) + os.makedirs(deployLocation) + logLocation = os.path.join(deployLocation,'log') + if not Path(logLocation).exists(): + os.makedirs(logLocation) + #read the server config + logFileName=os.path.join(logLocation,'model_training_logs.log') + filehandler = logging.FileHandler(logFileName, 'w','utf-8') + formatter = logging.Formatter('%(message)s') + filehandler.setFormatter(formatter) + log = logging.getLogger('eion') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + log.info('Status:-|... Compute Infrastructure:AMAZON EC2') + with open(Path(__file__).parent/""../config/compute.conf"", ""r"") as f: + aws_config = json.load(f) + aws_config = updateUserServerConfig(aws_config) + configSplitter = prepareConfig(sys.argv[1],aws_config['AMAZON_EC2']['NoOfInstance'],aws_config['ComputeInfrastructure']) + newConfigs = configSplitter.createNewConfigs() + print(newConfigs) + openInstances = openInstancesStatus() + openInstances.clearPreviousInstancesState() + folders = [] + processes = [0] * len(newConfigs) + for index, config in enumerate(newConfigs): + processes[index] = Process(aws_config, config, index, openInstances) + processes[index].start() + for index, config in enumerate(newConfigs): + processes[index].join() + folders.append(deployLocation + '_{}'.format(index)) + if Path(deployLocation+'_0').exists(): + filehandler.close() + log.removeHandler(filehandler) + merge = mergeLogs(folders) + merge.mergeFolder() + else: + output = {""status"":""FAIL"",""message"":""Please check cloud server configuration.""} + output = json.dumps(output) + log.info('server code execution failed !....') + log.info('\\n------------- Output JSON ------------') + log.info('-------> Output :'+str(output)) + log.info('------------- Output JSON ------------\\n') + print(""\\n"") + print(""aion_learner_status:"",output) + print(""\\n"") + except Exception as inst: + output = {""status"":""FAIL"",""message"":str(inst).strip('""')} + output = json.dumps(output) + log.info('server code execution failed !....'+str(inst)) + log.info('\\n------------- Output JSON ------------') + log.info('-------> Output :'+str(output)) + log.info('------------- Output JSON ------------\\n') + print(""\\n"") + print(""aion_learner_status:"",output) + print(""\\n"") + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import boto3 +import json +import time +import requests +import datetime +import uuid +import shutil +from websocket import create_connection +from botocore.exceptions import ClientError +import tarfile +from pathlib import Path, PurePosixPath +from stat import S_ISDIR +from fabric import Connection +import time +import logging + +class awsGPUTraining(): + + + def __init__(self, config): + local_config = {""location"":{""data"":""aion/data/od"", ""code"":"""", ""pretrainedModel"":""aion/pretrainedModels""}, + ""jupyter"":{""header"":{""Authorization"":""Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2""}, ""portNo"":""8888"", + ""notebook_path"":""aion/code/AWS_GPU_OD_Training.ipynb""}} + self.serverConfig = config[""server""] + self.sshConfig = config[""ssh""] + self.log = logging.getLogger('eion') + self.codeLocation = local_config[""location""][""code""] + self.dataLocation = local_config[""location""][""data""] + self.pretrainedModelLocation = local_config[""location""][""pretrainedModel""] + self.jupyterConfig = local_config[""jupyter""] + self.serverIP = """" + if self.serverConfig[""awsAccessKeyId""] == """" or self.serverConfig[""awsSecretAccessKey""] == """": + raise ValueError(""Cloud server configuration is not available."") + if len(self.serverConfig[""InstanceIds""]) == 0 and self.serverConfig[""amiId""] == """": + raise ValueError(""Please provide either InstanceIds or amiId in server config"") + + self.instanceId = [] + self.separate_instance = False + if self.serverConfig[""amiId""] != """": + self.separate_instance = True + else: + if len(self.serverConfig[""InstanceIds""]): + if isinstance(self.serverConfig[""InstanceIds""], list): + self.instanceId = self.serverConfig[""InstanceIds""] + elif isinstance(self.serverConfig[""InstanceIds""], str): + self.instanceId = [self.serverConfig[""InstanceIds""]] + + self.ec2_client = boto3.client(self.serverConfig[""serverName""], region_name=self.serverConfig[""regionName""], aws_access_key_id=self.serverConfig[""awsAccessKeyId""], aws_secret_access_key=self.serverConfig[""awsSecretAccessKey""]) + + def __sftp_exists(self, sftp, path): + try: + sftp.stat(path) + return True + except:# IOError, e: + #if e.errno == errno.ENOENT: + return False + + def __rmtree(self, sftp, remotepath, level=0): + for f in sftp.listdir_attr(remotepath): + rpath = str(PurePosixPath(remotepath)/f.filename) + if S_ISDIR(f.st_mode): + self.__rmtree(sftp, rpath, level=(level + 1)) + sftp.rmdir(rpath) + else: + rpath = str(PurePosixPath(remotepath)/f.filename) + sftp.remove(rpath) + + def copy_files_to_server(self, location): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + client.sudo('rm -rf {}/*'.format(self.dataLocation)) + tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix("".tar.gz"")) + client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord') + client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord') + client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config') + client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt') + client.put(location+'/model.config', self.dataLocation+'/model.config') + if self.jupyterConfig != """": + client.run(""touch {}"".format(self.dataLocation+'/log.txt')) + except Exception as e: + raise ValueError(""Error in copying data to cloud server. "" + str(e)) + + def __myexec(self, ssh, cmd, timeout, want_exitcode=False): + # one channel per command + stdin, stdout, stderr = ssh.exec_command(cmd) + # get the shared channel for stdout/stderr/stdin + channel = stdout.channel + + # we do not need stdin. + stdin.close() + # indicate that we're not going to write to that channel anymore + channel.shutdown_write() + + # read stdout/stderr in order to prevent read block hangs + stdout_chunks = [] + stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer))) + # chunked read to prevent stalls + while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready(): + # stop if channel was closed prematurely, and there is no data in the buffers. + got_chunk = False + readq, _, _ = select.select([stdout.channel], [], [], timeout) + for c in readq: + " +" + if c.recv_ready(): + stdout_chunks.append(stdout.channel.recv(len(c.in_buffer))) + got_chunk = True + if c.recv_stderr_ready(): + # make sure to read stderr to prevent stall + stderr.channel.recv_stderr(len(c.in_stderr_buffer)) + got_chunk = True + ''' + 1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file). + 2) if no data arrived in the last loop, check if we already received the exit code + 3) check if input buffers are empty + 4) exit the loop + ''' + if not got_chunk \\ + and stdout.channel.exit_status_ready() \\ + and not stderr.channel.recv_stderr_ready() \\ + and not stdout.channel.recv_ready(): + # indicate that we're not going to read from this channel anymore + stdout.channel.shutdown_read() + # close the channel + stdout.channel.close() + break # exit as remote side is finished and our bufferes are empty + + # close all the pseudofiles + stdout.close() + stderr.close() + + if want_exitcode: + # exit code is always ready at this point + return (''.join(stdout_chunks), stdout.channel.recv_exit_status()) + return ''.join(stdout_chunks) + + + def __myexec1(self, ssh, cmd, timeout, want_exitcode=False): + # one channel per command + stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True) + for line in iter(stderr.readline, """"): + print(line, end="""") + stdin.close() + stdout.close() + stderr.close() + + def executeCode(self): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation) + client.run( cmd) + except Exception as e: + raise ValueError(""Error in running code on cloud server. "" + str(e)) + + def start_executing_notebook(self): + try: + publicIp_Port = self.serverIP + "":"" + self.jupyterConfig[""portNo""] + conURL = ""ws://"" + publicIp_Port + base = 'http://' + publicIp_Port + '' + + headers = self.jupyterConfig[""header""] + url = base + '/api/kernels' + flag = True + while flag: # deadlock need to add timeout + response = requests.post(url, headers=headers) + flag = False + kernel = json.loads(response.text) + + # Load the notebook and get the code of each cell + url = base + '/api/contents/' + self.jupyterConfig[""notebook_path""] + response = requests.get(url, headers=headers) + file = json.loads(response.text) + code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ] + ws = create_connection(conURL + ""/api/kernels/"" + kernel[""id""] + ""/channels"", + header=headers) + + def send_execute_request(code): + msg_type = 'execute_request'; + content = {'code': code, 'silent': False} + hdr = {'msg_id': uuid.uuid1().hex, + 'username': 'test', + 'session': uuid.uuid1().hex, + 'data': datetime.datetime.now().isoformat(), + 'msg_type': msg_type, + 'version': '5.0'} + msg = {'header': hdr, 'parent_header': hdr, + 'metadata': {}, + 'content': content} + return msg + + for c in code: + ws.send(json.dumps(send_execute_request(c))) + + # We ignore all the other messages, we just get the code execution output + # (this needs to be improved for production to take into account errors, large cell output, images, etc.) + error_msg = '' + traceback_msg = '' + for i in range(0, len(code)): + msg_type = ''; + while msg_type != ""stream"": + rsp = json.loads(ws.recv()) + msg_type = rsp[""msg_type""] + if msg_type == 'error': + raise ValueError(""Error on Cloud machine: ""+rsp['content']['evalue']) + ws.close() + self.log.info('Status:- |...Execution Started`') + except ClientError as e: + raise ValueError(e) + + + + def __wait_for_completion(self, sftp, remoteLogFile, localLogFile): + + waiting = True + error_msg = """" + while waiting: + time.sleep(5 * 60) + try: + sftp.get(str(remoteLogFile), str(localLogFile)) + with open(localLogFile, ""r"") as f: + content = f.readlines() + for x in content: + if ""Error"" in x: + waiting = False + error_msg = x + if ""success"" in x: + waiting = False + except: + raise (str(e)) + return error_msg + + def copy_file_from_server(self, localPath): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + + remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt' + localLogFile = Path(localPath)/'remote_log.txt' + client.get(str(remoteLogFile), str(localLogFile)) + tarFile = (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix("".tar.gz"") + client.get(str(tarFile), str(Path(localPath)/tarFile.name)) + except: + raise + return str(Path(localPath)/tarFile.name) + + def create_instance(self): + instances = self.ec2_client.run_instances( + ImageId=self.serverConfig[""amiId""], + MinCount=1, + MaxCount=1, + InstanceType=""t2.xlarge"", + KeyName=""AION_GPU"", + SecurityGroupIds = [""sg-02c3a6c8dd67edb74""] + ) + self.instanceId = [instances['Instances'][0]['InstanceId']] + + def start_instance(self): + if self.separate_instance: + self.create_instance() + try: + response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=True) + except Exception as e: + if 'DryRunOperation' not in str(e): + raise ValueError(""Error in starting the EC2 instance, check server configuration. "" + str(e)) + try: + running_state_code = 16 + response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=False) + instance_status_code = 0 + while instance_status_code != running_state_code: + response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) + instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] + if instance_status_code == running_state_code: + self.serverIP = response['Reservations'][0]['Instances'][0]['PublicIpAddress'] + break + except ClientError as e: + raise ValueError(""Error in starting the EC2 instance. "" + str(e)) + + def terminate_instance(self): + ec2 = boto3.resource(self.serverConfig[""serverName""], region_name=self.serverConfig[""regionName""], aws_access_key_id=self.serverConfig[""awsAccessKeyId""], aws_secret_access_key=self.serverConfig[""awsSecretAccessKey""]) + + ec2.instances.filter(InstanceIds=self.instanceId).terminate() # for terminating an ec2 instance + + def stop_server_instance(self): + try: + self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=True) + except Exception as e: + if 'DryRunOperation' not in str(e): + raise + stopped_state_code = 80 + # Dry run succeeded, call stop_instances without dryrun + try: + response = self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=False) + response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) + instance_status_code = 0 + while instance_status_code != stopped_state_code: + response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) + instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] + if instance_status_code == stopped_state_code: + break + except: + raise ValueError(""Error in stopping the EC2 instance {}.Please stop it manually "".format(self.instanceId[0])) + if self.separate_instance: + try: + self.terminate_instance() + except: + raise ValueError(""Error in terminating the EC2 instance {}.Please terminate it manually "".format(self.instanceId[0])) + + import json +from pathlib import Path +import shutil + +class mergeLogs(): + + def __init__(self, folders, dataLocation=None): + self.folders = [Path(x) for x in folders] + self.dataLocation = dataLocation + self.baseFolder = """" + self.outputData = {} + + def readOutputStr(self, data): + text = ""-------> Output :"" + output = data.find(text) + + def keywithmaxval(self, d): + """""" a) create a list of the dict's keys and values; + b) return the key with the max value"""""" + v=list(d.values()) + k=list(d.keys()) + return k[v.index(max(v))] + + def getBestScore(self, data): + text = ""-------> Output :"" + output = [x[len(text):-1] for x in data if text in x] + self.outputData = json.loads(output[0]) + return self.outputData['data']['BestScore'] + + def getModelParams(self, data): + capture = False + startText = ""---------- ClassifierModel has started ----------"" + endText = ""---------- ClassifierModel End ---------- "" + modelBasedText = ""Models Based Selected Features Start"" + CorrelationBased = ""Top/CorrelationBased Features Start"" + removableText = ""Status:- |... Search Optimization Method applied: random\\n"" + + modelsParam = [] + modelcorrelation = None + output = {} + for x in data: + if endText in x: + capture = False + output[modelcorrelation] = ''.join(modelsParam) + modelcorrelation = None + modelsParam = [] + elif capture: + if x != removableText: + modelsParam.append(x) + elif startText in x: + capture = True + elif modelBasedText in x: + modelcorrelation = 'modelBased' + elif CorrelationBased in x: + modelcorrelation = 'correlationBased' + return output + + def mergeConfigFiles(self, bestScoreFolder): + # path is already updated + with open(bestScoreFolder/'etc'/'code_config.json', 'r') as f: + config = json.load(f) + if self.dataLocation: + config['dataLocation'] = self.dataLocation + if 'modelVersion' in config.keys(): + config['modelVersion'] = '_'.join(config['modelVersion'].split('_')[:-1]) + with open(bestScoreFolder/'etc'/'code_config.json', 'w') as f: + json.dump(config, f, indent=4) + with open(bestScoreFolder/'etc'/'display.json', 'r') as f: + config = json.load(f) + if 'version' in config.keys(): + config['version'] = '_'.join(config['version'].split('_')[:-1]) + with open(bestScoreFolder/'etc'/'display.json', 'w') as f: + json.dump(config, f, indent=4) + if len(self.folders) > 1: + with open(bestScoreFolder/'etc'/'output.json', 'r') as f: + config = json.load(f) + evaluated_models = config['data']['EvaluatedModels'] + for folder in self.folders: + if folder != bestScoreFolder: + with open(folder/'etc'/'output.json', 'r') as f: + sub_config = json.load(f) + for evaluated_model in sub_config['data']['EvaluatedModels']: + evaluated_models.append(evaluated_model) + with open(bestScoreFolder/'etc'/'output.json', 'w') as f: + config['data']['EvaluatedModels'] = evaluated_models + json.dump(config, f, indent=4) + + + def mergeLogFiles(self, bestScoreFolder, data): + startText = ""---------- ClassifierModel has started ----------\\n"" + endText = ""---------- ClassifierModel End ---------- \\n"" + modelBasedText = ""Models Based Selected Features Start"" + CorrelationBased = ""Top/CorrelationBased Features Start"" + + with open(bestScoreFolder/'log'/'model_training_logs.log', 'r') as f: + text = f.read() + CorrelationBasedIndex = text.find(CorrelationBased) + modelBasedTextIndex = text.find(modelBasedText) + firstendIndex = text.find(endText) + numOfMethods = 0 + if CorrelationBasedIndex > 0: + numOfMethods += 1 + if modelBasedTextIndex > 0: + numOfMethods += 1 + if numOfMethods == 2: + secondendIndex = text[firstendIndex+ len(endText):].find(endText) +firstendIndex+len(endText) + # assuming correlation is always first + for k,v in data.items(): + if k != bestScoreFolder: + if 'correlationBased' in v.keys(): + text = text[:firstendIndex] + v['correlationBased'] + text[firstendIndex:] + firstendIndex += len(v['correlationBased']) + if numOfMethods == 2: + secondendIndex += len(v['correlationBased']) + if 'modelBased' in v.keys(): + if numOfMethods == 2: + text = text[:secondendIndex] + v['modelBased'] + text[secondendIndex:] + secondendIndex += len(v['modelBased']) + else: + text = text[:firstendIndex] + v['modelBased'] + text[firstendIndex:] + firstendIndex += len(v['modelBased']) + with open(bestScoreFolder/'log'/'model_training_logs.log', 'w') as f: + text = text.replace(str(bestScoreFolder), str(self.baseFolder)) + f.write(text) + + def mergeFolder(self): + bestScoreInFile" +"= {} + modelsTrainOutput = {} + self.baseFolder = self.folders[0].parent/""_"".join(self.folders[0].name.split('_')[:-1]) + if len(self.folders) == 1: + if self.baseFolder.exists(): + shutil.rmtree(self.baseFolder) +" +" + predictions (:obj:`list` of :obj:`Prediction\\ + `): + A list of predictions, as returned by the :meth:`test() + ` method. + verbose: If True, will print computed value. Default is ``True``. + + + Returns: + The Mean Squared Error of predictions. + + Raises: + ValueError: When ``predictions`` is empty. + """""" + + if not predictions: + raise ValueError(""Prediction list is empty."") + + mse_ = np.mean( + [float((true_r - est) ** 2) for (_, _, true_r, est, _) in predictions] + ) + + if verbose: + print(f""MSE: {mse_:1.4f}"") + + return mse_ + + +def mae(predictions, verbose=True): + """"""Compute MAE (Mean Absolute Error). + + .. math:: + \\\\text{MAE} = \\\\frac{1}{|\\\\hat{R}|} \\\\sum_{\\\\hat{r}_{ui} \\\\in + \\\\hat{R}}|r_{ui} - \\\\hat{r}_{ui}| + + Args: + predictions (:obj:`list` of :obj:`Prediction\\ + `): + A list of predictions, as returned by the :meth:`test() + ` method. + verbose: If True, will print computed value. Default is ``True``. + + + Returns: + The Mean Absolute Error of predictions. + + Raises: + ValueError: When ``predictions`` is empty. + """""" + + if not predictions: + raise ValueError(""Prediction list is empty."") + + mae_ = np.mean([float(abs(true_r - est)) for (_, _, true_r, est, _) in predictions]) + + if verbose: + print(f""MAE: {mae_:1.4f}"") + + return mae_ + + +def fcp(predictions, verbose=True): + """"""Compute FCP (Fraction of Concordant Pairs). + + Computed as described in paper `Collaborative Filtering on Ordinal User + Feedback `_ by Koren + and Sill, section 5.2. + + Args: + predictions (:obj:`list` of :obj:`Prediction\\ + `): + A list of predictions, as returned by the :meth:`test() + ` method. + verbose: If True, will print computed value. Default is ``True``. + + + Returns: + The Fraction of Concordant Pairs. + + Raises: + ValueError: When ``predictions`` is empty. + """""" + + if not predictions: + raise ValueError(""Prediction list is empty."") + + predictions_u = defaultdict(list) + nc_u = defaultdict(int) + nd_u = defaultdict(int) + + for u0, _, r0, est, _ in predictions: + predictions_u[u0].append((r0, est)) + + for u0, preds in predictions_u.items(): + for r0i, esti in preds: + for r0j, estj in preds: + if esti > estj and r0i > r0j: + nc_u[u0] += 1 + if esti >= estj and r0i < r0j: + nd_u[u0] += 1 + + nc = np.mean(list(nc_u.values())) if nc_u else 0 + nd = np.mean(list(nd_u.values())) if nd_u else 0 + + try: + fcp = nc / (nc + nd) + except ZeroDivisionError: + raise ValueError( + ""cannot compute fcp on this list of prediction. "" + + ""Does every user have at least two predictions?"" + ) + + if verbose: + print(f""FCP: {fcp:1.4f}"") + + return fcp + """""" +The :mod:`dataset ` module defines the :class:`Dataset` class +and other subclasses which are used for managing datasets. + +Users may use both *built-in* and user-defined datasets (see the +:ref:`getting_started` page for examples). Right now, three built-in datasets +are available: + +* The `movielens-100k `_ dataset. +* The `movielens-1m `_ dataset. +* The `Jester `_ dataset 2. + +Built-in datasets can all be loaded (or downloaded if you haven't already) +using the :meth:`Dataset.load_builtin` method. +Summary: + +.. autosummary:: + :nosignatures: + + Dataset.load_builtin + Dataset.load_from_file + Dataset.load_from_folds +"""""" + + +import itertools +import os +import sys +from collections import defaultdict + +from .builtin_datasets import BUILTIN_DATASETS, download_builtin_dataset + +from .reader import Reader +from .trainset import Trainset + + +class Dataset: + """"""Base class for loading datasets. + + Note that you should never instantiate the :class:`Dataset` class directly + (same goes for its derived classes), but instead use one of the three + available methods for loading datasets."""""" + + def __init__(self, reader): + + self.reader = reader + + @classmethod + def load_builtin(cls, name=""ml-100k"", prompt=True): + """"""Load a built-in dataset. + + If the dataset has not already been loaded, it will be downloaded and + saved. You will have to split your dataset using the :meth:`split + ` method. See an example in the :ref:`User + Guide `. + + Args: + name(:obj:`string`): The name of the built-in dataset to load. + Accepted values are 'ml-100k', 'ml-1m', and 'jester'. + Default is 'ml-100k'. + prompt(:obj:`bool`): Prompt before downloading if dataset is not + already on disk. + Default is True. + + Returns: + A :obj:`Dataset` object. + + Raises: + ValueError: If the ``name`` parameter is incorrect. + """""" + + try: + dataset = BUILTIN_DATASETS[name] + except KeyError: + raise ValueError( + ""unknown dataset "" + + name + + "". Accepted values are "" + + "", "".join(BUILTIN_DATASETS.keys()) + + ""."" + ) + + # if dataset does not exist, offer to download it + if not os.path.isfile(dataset.path): + answered = not prompt + while not answered: + print( + ""Dataset "" + name + "" could not be found. Do you want "" + ""to download it? [Y/n] "", + end="""", + ) + choice = input().lower() + + if choice in [""yes"", ""y"", """", ""omg this is so nice of you!!""]: + answered = True + elif choice in [""no"", ""n"", ""hell no why would i want that?!""]: + answered = True + print(""Ok then, I'm out!"") + sys.exit() + + download_builtin_dataset(name) + + reader = Reader(**dataset.reader_params) + + return cls.load_from_file(file_path=dataset.path, reader=reader) + + @classmethod + def load_from_file(cls, file_path, reader): + """"""Load a dataset from a (custom) file. + + Use this if you want to use a custom dataset and all of the ratings are + stored in one file. You will have to split your dataset using the + :meth:`split ` method. See an example in the + :ref:`User Guide `. + + + Args: + file_path(:obj:`string`): The path to the file containing ratings. + reader(:obj:`Reader `): A reader to read + the file. + """""" + + return DatasetAutoFolds(ratings_file=file_path, reader=reader) + + @classmethod + def load_from_folds(cls, folds_files, reader): + """"""Load a dataset where folds (for cross-validation) are predefined by + some files. + + The purpose of this method is to cover a common use case where a + dataset is already split into predefined folds, such as the + movielens-100k dataset which defines files u1.base, u1.test, u2.base, + u2.test, etc... It can also be used when you don't want to perform + cross-validation but still want to specify your training and testing + data (which comes down to 1-fold cross-validation anyway). See an + example in the :ref:`User Guide `. + + + Args: + folds_files(:obj:`iterable` of :obj:`tuples`): The list of the + folds. A fold is a tuple of the form ``(path_to_train_file, + path_to_test_file)``. + reader(:obj:`Reader `): A reader to read + the files. + + """""" + + return DatasetUserFolds(folds_files=folds_files, reader=reader) + + @classmethod + def load_from_df(cls, df, reader): + """"""Load a dataset from a pandas dataframe. + + Use this if you want to use a custom dataset that is stored in a pandas + dataframe. See the :ref:`User Guide` for an + example. + + Args: + df(`Dataframe`): The dataframe containing the ratings. It must have + three columns, corresponding to the user (raw) ids, the item + (raw) ids, and the ratings, in this order. + reader(:obj:`Reader `): A reader to read + the file. Only the ``rating_scale`` field needs to be + specified. + """""" + + return DatasetAutoFolds(reader=reader, df=df) + + def read_ratings(self, file_name): + """"""Return a list of ratings (user, item, rating, timestamp) read from + file_name"""""" + + with open(os.path.expanduser(file_name)) as f: + raw_ratings = [ + self.reader.parse_line(line) + for line in itertools.islice(f, self.reader.skip_lines, None) + ] + return raw_ratings + + def construct_trainset(self, raw_trainset): + + raw2inner_id_users = {} + raw2inner_id_items = {} + + current_u_index = 0 + current_i_index = 0 + + ur = defaultdict(list) + ir = defaultdict(list) + + # user raw id, item raw id, translated rating, time stamp + for urid, irid, r, timestamp in raw_trainset: + try: + uid = raw2inner_id_users[urid] + except KeyError: + uid = current_u_index + raw2inner_id_users[urid] = current_u_index + current_u_index += 1 + try: + iid = raw2inner_id_items[irid] + except KeyError: + iid = current_i_index + raw2inner_id_items[irid] = current_i_index + current_i_index += 1 + + ur[uid].append((iid, r)) + ir[iid].append((uid, r)) + + n_users = len(ur) # number of users + n_items = len(ir) # number of items + n_ratings = len(raw_trainset) + + trainset = Trainset( + ur, + ir, + n_users, + n_items, + n_ratings, + self.reader.rating_scale, + raw2inner_id_users, + raw2inner_id_items, + ) + + return trainset + + def construct_testset(self, raw_testset): + + return [(ruid, riid, r_ui_trans) for (ruid, riid, r_ui_trans, _) in raw_testset] + + +class DatasetUserFolds(Dataset): + """"""A derived class from :class:`Dataset` for which folds (for + cross-validation) are predefined."""""" + + def __init__(self, folds_files=None, reader=None): + + Dataset.__init__(self, reader) + self.folds_files = folds_files + + # check that all files actually exist. + for train_test_files in self.folds_files: + for f in train_test_files: + if not os.path.isfile(os.path.expanduser(f)): + raise ValueError(""File "" + str(f) + "" does not exist."") + + +class DatasetAutoFolds(Dataset): + """"""A derived class from :class:`Dataset` for which folds (for + cross-validation) are not predefined. (Or for when there are no folds at + all)."""""" + + def __init__(self, ratings_file=None, reader=None, df=None): + + Dataset.__init__(self, reader) + self.has_been_split = False # flag indicating if split() was called. + + if ratings_file is not None: + self.ratings_file = ratings_file + self.raw_ratings = self.read_ratings(self.ratings_file) + elif df is not None: + self.df = df + self.raw_ratings = [ + (uid, iid, float(r), None) + for (uid, iid, r) in self.df.itertuples(index=False) + ] + else: + raise ValueError(""Must specify ratings file or dataframe."") + + def build_full_trainset(self): + """"""Do not split the dataset into folds and just return a trainset as + is, built from the whole dataset. + + User can then query for predictions, as shown in the :ref:`User Guide + `. + + Returns: + The :class:`Trainset `. + """""" + + return self.construct_trainset(self.raw_ratings) + from pkg_resources import get_distribution + +from . import dump, model_selection +from .builtin_datasets import get_dataset_dir + +from .dataset import Dataset + +from .prediction_algorithms import ( + AlgoBase, + BaselineOnly, + CoClustering, + KNNBaseline, + KNNBasic, + KNNWithMeans, + KNNWithZScore, + NMF, + NormalPredictor, + Prediction, + PredictionImpossible, + SlopeOne, + SVD, + SVDpp, +) +from .reader import Reader +from .trainset import Trainset + +__all__ = [ + ""AlgoBase"", + ""NormalPredictor"", + ""BaselineOnly"", + ""KNNBasic"", + ""KNNWithMeans"", + ""KNNBaseline"", + ""SVD"", + ""SVDpp"", + ""NMF"", + ""SlopeOne"", + ""CoClustering"", + ""PredictionImpossible"", + """ +"Prediction"", + ""Dataset"", + ""Reader"", + ""Trainset"", + ""dump"", + ""KNNWithZScore"", + ""get_dataset_dir"", + ""model_selection"", +] + +__version__ = get_distribution(""scikit-surprise"").version + """"""This module contains the Reader class."""""" + + +from .builtin_datasets import BUILTIN_DATASETS + + +class Reader: + """"""The Reader class is used to parse a file containing ratings. + + Such a file is assumed to specify only one rating per line, and each line + needs to respect the following structure: :: + + user ; item ; rating ; [timestamp] + + where the order of the fields and the separator (here ';') may be + arbitrarily defined (see below). brackets indicate that the timestamp + field is optional. + + For each built-in dataset, Surprise also provides predefined readers which + are useful if you want to use a custom dataset that has the same format as + a built-in one (see the ``name`` parameter). + + + Args: + name(:obj:`string`, optional): If specified, a Reader for one of the + built-in datasets is returned and any other parameter is ignored. + Accepted values are 'ml-100k', 'ml-1m', and 'jester'. Default + is ``None``. + line_format(:obj:`string`): The fields names, in the order at which + they are encountered on a line. Please note that ``line_format`` is + always space-separated (use the ``sep`` parameter). Default is + ``'user item rating'``. + sep(char): the separator between fields. Example : ``';'``. + rating_scale(:obj:`tuple`, optional): The rating scale used for every + rating. Default is ``(1, 5)``. + skip_lines(:obj:`int`, optional): Number of lines to skip at the + beginning of the file. Default is ``0``. + + """""" + + def __init__( + self, + name=None, + line_format=""user item rating"", + sep=None, + rating_scale=(1, 5), + skip_lines=0, + ): + + if name: + try: + self.__init__(**BUILTIN_DATASETS[name].reader_params) + except KeyError: + raise ValueError( + ""unknown reader "" + + name + + "". Accepted values are "" + + "", "".join(BUILTIN_DATASETS.keys()) + + ""."" + ) + else: + self.sep = sep + self.skip_lines = skip_lines + self.rating_scale = rating_scale + + lower_bound, higher_bound = rating_scale + + splitted_format = line_format.split() + + entities = [""user"", ""item"", ""rating""] + if ""timestamp"" in splitted_format: + self.with_timestamp = True + entities.append(""timestamp"") + else: + self.with_timestamp = False + + # check that all fields are correct + if any(field not in entities for field in splitted_format): + raise ValueError(""line_format parameter is incorrect."") + + self.indexes = [splitted_format.index(entity) for entity in entities] + + def parse_line(self, line): + """"""Parse a line. + + Ratings are translated so that they are all strictly positive. + + Args: + line(str): The line to parse + + Returns: + tuple: User id, item id, rating and timestamp. The timestamp is set + to ``None`` if it does no exist. + """""" + + line = line.split(self.sep) + try: + if self.with_timestamp: + uid, iid, r, timestamp = (line[i].strip() for i in self.indexes) + else: + uid, iid, r = (line[i].strip() for i in self.indexes) + timestamp = None + + except IndexError: + raise ValueError( + ""Impossible to parse line. Check the line_format"" "" and sep parameters."" + ) + + return uid, iid, float(r), timestamp + #!/usr/bin/env python + +import argparse +import os +import random as rd +import shutil +import sys + +import numpy as np + +import surprise.dataset as dataset +from surprise import __version__ +from surprise.builtin_datasets import get_dataset_dir +from surprise.dataset import Dataset +from surprise.model_selection import cross_validate, KFold, PredefinedKFold + +from surprise.prediction_algorithms import ( + BaselineOnly, + CoClustering, + KNNBaseline, + KNNBasic, + KNNWithMeans, + NMF, + NormalPredictor, + SlopeOne, + SVD, + SVDpp, +) +from surprise.reader import Reader # noqa + + +def main(): + class MyParser(argparse.ArgumentParser): + """"""A parser which prints the help message when an error occurs. Taken from + https://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu."""""" # noqa + + def error(self, message): + sys.stderr.write(""error: %s\\n"" % message) + self.print_help() + sys.exit(2) + + parser = MyParser( + description=""Evaluate the performance of a rating prediction "" + + ""algorithm "" + + ""on a given dataset using cross validation. You can use a built-in "" + + ""or a custom dataset, and you can choose to automatically split the "" + + ""dataset into folds, or manually specify train and test files. "" + + ""Please refer to the documentation page "" + + ""(https://surprise.readthedocs.io/) for more details."", + epilog=""""""Example:\\n + surprise -algo SVD -params ""{'n_epochs': 5, 'verbose': True}"" + -load-builtin ml-100k -n-folds 3"""""", + ) + + algo_choices = { + ""NormalPredictor"": NormalPredictor, + ""BaselineOnly"": BaselineOnly, + ""KNNBasic"": KNNBasic, + ""KNNBaseline"": KNNBaseline, + ""KNNWithMeans"": KNNWithMeans, + ""SVD"": SVD, + ""SVDpp"": SVDpp, + ""NMF"": NMF, + ""SlopeOne"": SlopeOne, + ""CoClustering"": CoClustering, + } + + parser.add_argument( + ""-algo"", + type=str, + choices=algo_choices, + help=""The prediction algorithm to use. "" + + ""Allowed values are "" + + "", "".join(algo_choices.keys()) + + ""."", + metavar="""", + ) + + parser.add_argument( + ""-params"", + type=str, + metavar="""", + default=""{}"", + help=""A kwargs dictionary that contains all the "" + + ""algorithm parameters."" + + ""Example: \\""{'n_epochs': 10}\\""."", + ) + + parser.add_argument( + ""-load-builtin"", + type=str, + dest=""load_builtin"", + metavar="""", + default=""ml-100k"", + help=""The name of the built-in dataset to use."" + + ""Allowed values are "" + + "", "".join(dataset.BUILTIN_DATASETS.keys()) + + "". Default is ml-100k."", + ) + + parser.add_argument( + ""-load-custom"", + type=str, + dest=""load_custom"", + metavar="""", + default=None, + help=""A file path to custom dataset to use. "" + + ""Ignored if "" + + ""-loadbuiltin is set. The -reader parameter needs "" + + ""to be set."", + ) + + parser.add_argument( + ""-folds-files"", + type=str, + dest=""folds_files"", + metavar="""", + default=None, + help=""A list of custom train and test files. "" + + ""Ignored if -load-builtin or -load-custom is set. "" + ""The -reader parameter needs to be set."", + ) + + parser.add_argument( + ""-reader"", + type=str, + metavar="""", + default=None, + help=""A Reader to read the custom dataset. Example: "" + + ""\\""Reader(line_format='user item rating timestamp',"" + + "" sep='\\\\t')\\"""", + ) + + parser.add_argument( + ""-n-folds"", + type=int, + dest=""n_folds"", + metavar="""", + default=5, + help=""The number of folds for cross-validation. "" + ""Default is 5."", + ) + + parser.add_argument( + ""-seed"", + type=int, + metavar="""", + default=None, + help=""The seed to use for RNG. "" + ""Default is the current system time."", + ) + + parser.add_argument( + ""--with-dump"", + dest=""with_dump"", + action=""store_true"", + help=""Dump the algorithm "" + + ""results in a file (one file per fold). "" + + ""Default is False."", + ) + + parser.add_argument( + ""-dump-dir"", + dest=""dump_dir"", + type=str, + metavar="""", + default=None, + help=""Where to dump the files. Ignored if "" + + ""with-dump is not set. Default is "" + + os.path.join(get_dataset_dir(), ""dumps/""), + ) + + parser.add_argument( + ""--clean"", + dest=""clean"", + action=""store_true"", + help=""Remove the "" + get_dataset_dir() + "" directory and exit."", + ) + + parser.add_argument(""-v"", ""--version"", action=""version"", version=__version__) + + args = parser.parse_args() + + if args.clean: + folder = get_dataset_dir() + shutil.rmtree(folder) + print(""Removed"", folder) + exit() + + # setup RNG + rd.seed(args.seed) + np.random.seed(args.seed) + + # setup algorithm + params = eval(args.params) + if args.algo is None: + parser.error(""No algorithm was specified."") + algo = algo_choices[args.algo](**params) + + # setup dataset + if args.load_custom is not None: # load custom and split + if args.reader is None: + parser.error(""-reader parameter is needed."") + reader = eval(args.reader) + data = Dataset.load_from_file(args.load_custom, reader=reader) + cv = KFold(n_splits=args.n_folds, random_state=args.seed) + + elif args.folds_files is not None: # load from files + if args.reader is None: + parser.error(""-reader parameter is needed."") + reader = eval(args.reader) + folds_files = args.folds_files.split() + folds_files = [ + (folds_files[i], folds_files[i + 1]) + for i in range(0, len(folds_files) - 1, 2) + ] + data = Dataset.load_from_folds(folds_files=folds_files, reader=reader) + cv = PredefinedKFold() + + else: # load builtin dataset and split + data = Dataset.load_builtin(args.load_builtin) + cv = KFold(n_splits=args.n_folds, random_state=args.seed) + + cross_validate(algo, data, cv=cv, verbose=True) + + +if __name__ == ""__main__"": + main() + """"""This module contains the Trainset class."""""" + + +import numpy as np + + +class Trainset: + """"""A trainset contains all useful data that constitute a training set. + + It is used by the :meth:`fit() + ` method of every + prediction algorithm. You should not try to build such an object on your + own but rather use the :meth:`Dataset.folds() + ` method or the + :meth:`DatasetAutoFolds.build_full_trainset() + ` method. + + Trainsets are different from :class:`Datasets `. + You can think of a :class:`Dataset ` as the raw + data, and Trainsets as higher-level data where useful methods are defined. + Also, a :class:`Dataset ` may be comprised of + multiple Trainsets (e.g. when doing cross validation). + + + Attributes: + ur(:obj:`defaultdict` of :obj:`list`): The users ratings. This is a + dictionary containing lists of tuples of the form ``(item_inner_id, + rating)``. The keys are user inner ids. + ir(:obj:`defaultdict` of :obj:`list`): The items ratings. This is a + dictionary containing lists of tuples of the form ``(user_inner_id, + rating)``. The keys are item inner ids. + n_users: Total number of users :math:`|U|`. + n_items: Total number of items :math:`|I|`. + n_ratings: Total number of ratings :math:`|R_{train}|`. + rating_scale(tuple): The minimum and maximal rating of the rating + scale. + global_mean: The mean of all ratings :math:`\\\\mu`. + """""" + + def __init__( + self, + ur, + ir, + n_users, + n_items, + n_ratings, + rating_scale, + raw2inner_id_users, + raw2inner_id_items, + ): + + self.ur = ur + self.ir = ir + self.n_users = n_users + self.n_items = n_items + self.n_ratings = n_ratings + self.rating_scale = rating_scale + self._raw2inner_id_users = raw2inner_id_users + self._raw2inner_id_items = raw2inner_id_items + self._global_mean = None + # inner2raw dicts could be built right now (or even before) but they + # are not always useful so we wait until we need them. + self._inner2raw_id_users = None + self._inner2raw_id_items = None + + def knows_user(self, uid): + """"""Indicate if the user is part of the trainset. + + A user is part of the trainset if the user has at least one rating. + + Args: + uid(int): The (inner) user id. See :ref:`this + note`. + Returns: + ``True`` if user is part of the trainset, else ``False``. + """""" + + return uid in self.ur + + def knows_item(self, iid): + """"""Indicate if the item is part of the trainset. + + An item is part of the trainset if the item was rated at least once. + + Args: + iid(int): The (inner) item id. See :ref:`this + note`. + Returns: + ``True`` if item is part of the trainset, else ``False``. + """""" + + return iid in self.ir + + def to_inner_uid(self, ruid): + """"""Convert a **user** raw id to an inner id. + + " +"See :ref:`this note`. + + Args: + ruid(str): The user raw id. + + Returns: + int: The user inner id. + + Raises: + ValueError: When user is not part of the trainset. + """""" + + try: + return self._raw2inner_id_users[ruid] + except KeyError: + raise ValueError(""User "" + str(ruid) + "" is not part of the trainset."") + + def to_raw_uid(self, iuid): + """"""Convert a **user** inner id to a raw id. + + See :ref:`this note`. + + Args: + iuid(int): The user inner id. + + Returns: + str: The user raw id. + + Raises: + ValueError: When ``iuid`` is not an inner id. + """""" + + if self._inner2raw_id_users is None: + self._inner2raw_id_users = { + inner: raw for (raw, inner) in self._raw2inner_id_users.items() + } + + try: + return self._inner2raw_id_users[iuid] + except KeyError: + raise ValueError(str(iuid) + "" is not a valid inner id."") + + def to_inner_iid(self, riid): + """"""Convert an **item** raw id to an inner id. + + See :ref:`this note`. + + Args: + riid(str): The item raw id. + + Returns: + int: The item inner id. + + Raises: + ValueError: When item is not part of the trainset. + """""" + + try: + return self._raw2inner_id_items[riid] + except KeyError: + raise ValueError(""Item "" + str(riid) + "" is not part of the trainset."") + + def to_raw_iid(self, iiid): + """"""Convert an **item** inner id to a raw id. + + See :ref:`this note`. + + Args: + iiid(int): The item inner id. + + Returns: + str: The item raw id. + + Raises: + ValueError: When ``iiid`` is not an inner id. + """""" + + if self._inner2raw_id_items is None: + self._inner2raw_id_items = { + inner: raw for (raw, inner) in self._raw2inner_id_items.items() + } + + try: + return self._inner2raw_id_items[iiid] + except KeyError: + raise ValueError(str(iiid) + "" is not a valid inner id."") + + def all_ratings(self): + """"""Generator function to iterate over all ratings. + + Yields: + A tuple ``(uid, iid, rating)`` where ids are inner ids (see + :ref:`this note `). + """""" + + for u, u_ratings in self.ur.items(): + for i, r in u_ratings: + yield u, i, r + + def build_testset(self): + """"""Return a list of ratings that can be used as a testset in the + :meth:`test() ` + method. + + The ratings are all the ratings that are in the trainset, i.e. all the + ratings returned by the :meth:`all_ratings() + ` generator. This is useful in + cases where you want to to test your algorithm on the trainset. + """""" + + return [ + (self.to_raw_uid(u), self.to_raw_iid(i), r) + for (u, i, r) in self.all_ratings() + ] + + def build_anti_testset(self, fill=None): + """"""Return a list of ratings that can be used as a testset in the + :meth:`test() ` + method. + + The ratings are all the ratings that are **not** in the trainset, i.e. + all the ratings :math:`r_{ui}` where the user :math:`u` is known, the + item :math:`i` is known, but the rating :math:`r_{ui}` is not in the + trainset. As :math:`r_{ui}` is unknown, it is either replaced by the + :code:`fill` value or assumed to be equal to the mean of all ratings + :meth:`global_mean `. + + Args: + fill(float): The value to fill unknown ratings. If :code:`None` the + global mean of all ratings :meth:`global_mean + ` will be used. + + Returns: + A list of tuples ``(uid, iid, fill)`` where ids are raw ids. + """""" + fill = self.global_mean if fill is None else float(fill) + + anti_testset = [] + for u in self.all_users(): + user_items = {j for (j, _) in self.ur[u]} + anti_testset += [ + (self.to_raw_uid(u), self.to_raw_iid(i), fill) + for i in self.all_items() + if i not in user_items + ] + return anti_testset + + def all_users(self): + """"""Generator function to iterate over all users. + + Yields: + Inner id of users. + """""" + return range(self.n_users) + + def all_items(self): + """"""Generator function to iterate over all items. + + Yields: + Inner id of items. + """""" + return range(self.n_items) + + @property + def global_mean(self): + if self._global_mean is None: + self._global_mean = np.mean([r for (_, _, r) in self.all_ratings()]) + + return self._global_mean + """"""The utils module contains the get_rng function."""""" + + +import numbers + +import numpy as np + + +def get_rng(random_state): + """"""Return a 'validated' RNG. + + If random_state is None, use RandomState singleton from numpy. Else if + it's an integer, consider it's a seed and initialized an rng with that + seed. If it's already an rng, return it. + """""" + if random_state is None: + return np.random.mtrand._rand + elif isinstance(random_state, (numbers.Integral, np.integer)): + return np.random.RandomState(random_state) + if isinstance(random_state, np.random.RandomState): + return random_state + raise ValueError( + ""Wrong random state. Expecting None, an int or a numpy "" + ""RandomState instance, got a "" + ""{}"".format(type(random_state)) + ) + """""" +the :mod:`knns` module includes some k-NN inspired algorithms. +"""""" + +import heapq + +import numpy as np + +from .algo_base import AlgoBase + +from .predictions import PredictionImpossible + + +# Important note: as soon as an algorithm uses a similarity measure, it should +# also allow the bsl_options parameter because of the pearson_baseline +# similarity. It can be done explicitly (e.g. KNNBaseline), or implicetely +# using kwargs (e.g. KNNBasic). + + +class SymmetricAlgo(AlgoBase): + """"""This is an abstract class aimed to ease the use of symmetric algorithms. + + A symmetric algorithm is an algorithm that can can be based on users or on + items indifferently, e.g. all the algorithms in this module. + + When the algo is user-based x denotes a user and y an item. Else, it's + reversed. + """""" + + def __init__(self, sim_options={}, verbose=True, **kwargs): + + AlgoBase.__init__(self, sim_options=sim_options, **kwargs) + self.verbose = verbose + + def fit(self, trainset): + + AlgoBase.fit(self, trainset) + + ub = self.sim_options[""user_based""] + self.n_x = self.trainset.n_users if ub else self.trainset.n_items + self.n_y = self.trainset.n_items if ub else self.trainset.n_users + self.xr = self.trainset.ur if ub else self.trainset.ir + self.yr = self.trainset.ir if ub else self.trainset.ur + + return self + + def switch(self, u_stuff, i_stuff): + """"""Return x_stuff and y_stuff depending on the user_based field."""""" + + if self.sim_options[""user_based""]: + return u_stuff, i_stuff + else: + return i_stuff, u_stuff + + +class KNNBasic(SymmetricAlgo): + """"""A basic collaborative filtering algorithm. + + The prediction :math:`\\\\hat{r}_{ui}` is set as: + + .. math:: + \\\\hat{r}_{ui} = \\\\frac{ + \\\\sum\\\\limits_{v \\\\in N^k_i(u)} \\\\text{sim}(u, v) \\\\cdot r_{vi}} + {\\\\sum\\\\limits_{v \\\\in N^k_i(u)} \\\\text{sim}(u, v)} + + or + + .. math:: + \\\\hat{r}_{ui} = \\\\frac{ + \\\\sum\\\\limits_{j \\\\in N^k_u(i)} \\\\text{sim}(i, j) \\\\cdot r_{uj}} + {\\\\sum\\\\limits_{j \\\\in N^k_u(i)} \\\\text{sim}(i, j)} + + depending on the ``user_based`` field of the ``sim_options`` parameter. + + Args: + k(int): The (max) number of neighbors to take into account for + aggregation (see :ref:`this note `). Default is + ``40``. + min_k(int): The minimum number of neighbors to take into account for + aggregation. If there are not enough neighbors, the prediction is + set to the global mean of all ratings. Default is ``1``. + sim_options(dict): A dictionary of options for the similarity + measure. See :ref:`similarity_measures_configuration` for accepted + options. + verbose(bool): Whether to print trace messages of bias estimation, + similarity, etc. Default is True. + """""" + + def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs): + + SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs) + self.k = k + self.min_k = min_k + + def fit(self, trainset): + + SymmetricAlgo.fit(self, trainset) + self.sim = self.compute_similarities() + + return self + + def estimate(self, u, i): + + if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)): + raise PredictionImpossible(""User and/or item is unknown."") + + x, y = self.switch(u, i) + + neighbors = [(self.sim[x, x2], r) for (x2, r) in self.yr[y]] + k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0]) + + # compute weighted average + sum_sim = sum_ratings = actual_k = 0 + for (sim, r) in k_neighbors: + if sim > 0: + sum_sim += sim + sum_ratings += sim * r + actual_k += 1 + + if actual_k < self.min_k: + raise PredictionImpossible(""Not enough neighbors."") + + est = sum_ratings / sum_sim + + details = {""actual_k"": actual_k} + return est, details + + +class KNNWithMeans(SymmetricAlgo): + """"""A basic collaborative filtering algorithm, taking into account the mean + ratings of each user. + + The prediction :math:`\\\\hat{r}_{ui}` is set as: + + .. math:: + \\\\hat{r}_{ui} = \\\\mu_u + \\\\frac{ \\\\sum\\\\limits_{v \\\\in N^k_i(u)} + \\\\text{sim}(u, v) \\\\cdot (r_{vi} - \\\\mu_v)} {\\\\sum\\\\limits_{v \\\\in + N^k_i(u)} \\\\text{sim}(u, v)} + + or + + .. math:: + \\\\hat{r}_{ui} = \\\\mu_i + \\\\frac{ \\\\sum\\\\limits_{j \\\\in N^k_u(i)} + \\\\text{sim}(i, j) \\\\cdot (r_{uj} - \\\\mu_j)} {\\\\sum\\\\limits_{j \\\\in + N^k_u(i)} \\\\text{sim}(i, j)} + + depending on the ``user_based`` field of the ``sim_options`` parameter. + + + Args: + k(int): The (max) number of neighbors to take into account for + aggregation (see :ref:`this note `). Default is + ``40``. + min_k(int): The minimum number of neighbors to take into account for + aggregation. If there are not enough neighbors, the neighbor + aggregation is set to zero (so the prediction ends up being + equivalent to the mean :math:`\\\\mu_u` or :math:`\\\\mu_i`). Default is + ``1``. + sim_options(dict): A dictionary of options for the similarity + measure. See :ref:`similarity_measures_configuration` for accepted + options. + verbose(bool): Whether to print trace messages of bias estimation, + similarity, etc. Default is True. + """""" + + def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs): + + SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs) + + self.k = k + self.min_k = min_k + + def fit(self, trainset): + + SymmetricAlgo.fit(self, trainset) + self.sim = self.compute_similarities() + + self.means = np.zeros(self.n_x) + for x, ratings in self.xr.items(): + self.means[x] = np.mean([r for (_, r) in ratings]) + + return self + + def estimate(self, u, i): + + if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)): + raise PredictionImpossible(""User and/or item is unknown."") + + x, y = self.switch(u, i) + + neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]] + k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1]) + + est = self.means[x] + + # compute weighted average + sum_sim = sum_ratings = actual_k = 0 + for (nb, sim, r) in k_neighbors: + if sim > 0: + sum_sim += sim + sum_ratings += sim * (r - self.means[nb]) + actual_k += 1 + + if actual_k < self.min_k: + sum_ratings = 0 + + try: + est += sum_ratings / sum_sim + except ZeroDivisionError: + pass # return mean + + details = {""actual_k"": actual_k} + return est, details + + +class KNNBaseline(SymmetricAlgo): + """"""A basic collaborative filtering algorithm taking into account a + *bas" +"eline* rating. + + + The prediction :math:`\\\\hat{r}_{ui}` is set as: + + .. math:: + \\\\hat{r}_{ui} = b_{ui} + \\\\frac{ \\\\sum\\\\limits_{v \\\\in N^k_i(u)} + \\\\text{sim}(u, v) \\\\cdot (r_{vi} - b_{vi})} {\\\\sum\\\\limits_{v \\\\in + N^k_i(u)} \\\\text{sim}(u, v)} + + or + + + .. math:: + \\\\hat{r}_{ui} = b_{ui} + \\\\frac{ \\\\sum\\\\limits_{j \\\\in N^k_u(i)} + \\\\text{sim}(i, j) \\\\cdot (r_{uj} - b_{uj})} {\\\\sum\\\\limits_{j \\\\in + N^k_u(i)} \\\\text{sim}(i, j)} + + depending on the ``user_based`` field of the ``sim_options`` parameter. For + the best predictions, use the :func:`pearson_baseline + ` similarity measure. + + This algorithm corresponds to formula (3), section 2.2 of + :cite:`Koren:2010`. + + Args: + k(int): The (max) number of neighbors to take into account for + aggregation (see :ref:`this note `). Default is + ``40``. + min_k(int): The minimum number of neighbors to take into account for + aggregation. If there are not enough neighbors, the neighbor + aggregation is set to zero (so the prediction ends up being + equivalent to the baseline). Default is ``1``. + sim_options(dict): A dictionary of options for the similarity + measure. See :ref:`similarity_measures_configuration` for accepted + options. It is recommended to use the :func:`pearson_baseline + ` similarity measure. + + bsl_options(dict): A dictionary of options for the baseline estimates + computation. See :ref:`baseline_estimates_configuration` for + accepted options. + verbose(bool): Whether to print trace messages of bias estimation, + similarity, etc. Default is True. + + """""" + + def __init__( + self, k=40, min_k=1, sim_options={}, bsl_options={}, verbose=True, **kwargs + ): + + SymmetricAlgo.__init__( + self, + sim_options=sim_options, + bsl_options=bsl_options, + verbose=verbose, + **kwargs + ) + + self.k = k + self.min_k = min_k + + def fit(self, trainset): + + SymmetricAlgo.fit(self, trainset) + self.bu, self.bi = self.compute_baselines() + self.bx, self.by = self.switch(self.bu, self.bi) + self.sim = self.compute_similarities() + + return self + + def estimate(self, u, i): + + est = self.trainset.global_mean + if self.trainset.knows_user(u): + est += self.bu[u] + if self.trainset.knows_item(i): + est += self.bi[i] + + x, y = self.switch(u, i) + + if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)): + return est + + neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]] + k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1]) + + # compute weighted average + sum_sim = sum_ratings = actual_k = 0 + for (nb, sim, r) in k_neighbors: + if sim > 0: + sum_sim += sim + nb_bsl = self.trainset.global_mean + self.bx[nb] + self.by[y] + sum_ratings += sim * (r - nb_bsl) + actual_k += 1 + + if actual_k < self.min_k: + sum_ratings = 0 + + try: + est += sum_ratings / sum_sim + except ZeroDivisionError: + pass # just baseline again + + details = {""actual_k"": actual_k} + return est, details + + +class KNNWithZScore(SymmetricAlgo): + """"""A basic collaborative filtering algorithm, taking into account + the z-score normalization of each user. + + The prediction :math:`\\\\hat{r}_{ui}` is set as: + + .. math:: + \\\\hat{r}_{ui} = \\\\mu_u + \\\\sigma_u \\\\frac{ \\\\sum\\\\limits_{v \\\\in N^k_i(u)} + \\\\text{sim}(u, v) \\\\cdot (r_{vi} - \\\\mu_v) / \\\\sigma_v} {\\\\sum\\\\limits_{v + \\\\in N^k_i(u)} \\\\text{sim}(u, v)} + + or + + .. math:: + \\\\hat{r}_{ui} = \\\\mu_i + \\\\sigma_i \\\\frac{ \\\\sum\\\\limits_{j \\\\in N^k_u(i)} + \\\\text{sim}(i, j) \\\\cdot (r_{uj} - \\\\mu_j) / \\\\sigma_j} {\\\\sum\\\\limits_{j + \\\\in N^k_u(i)} \\\\text{sim}(i, j)} + + depending on the ``user_based`` field of the ``sim_options`` parameter. + + If :math:`\\\\sigma` is 0, than the overall sigma is used in that case. + + Args: + k(int): The (max) number of neighbors to take into account for + aggregation (see :ref:`this note `). Default is + ``40``. + min_k(int): The minimum number of neighbors to take into account for + aggregation. If there are not enough neighbors, the neighbor + aggregation is set to zero (so the prediction ends up being + equivalent to the mean :math:`\\\\mu_u` or :math:`\\\\mu_i`). Default is + ``1``. + sim_options(dict): A dictionary of options for the similarity + measure. See :ref:`similarity_measures_configuration` for accepted + options. + verbose(bool): Whether to print trace messages of bias estimation, + similarity, etc. Default is True. + """""" + + def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs): + + SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs) + + self.k = k + self.min_k = min_k + + def fit(self, trainset): + + SymmetricAlgo.fit(self, trainset) + + self.means = np.zeros(self.n_x) + self.sigmas = np.zeros(self.n_x) + # when certain sigma is 0, use overall sigma + self.overall_sigma = np.std([r for (_, _, r) in self.trainset.all_ratings()]) + + for x, ratings in self.xr.items(): + self.means[x] = np.mean([r for (_, r) in ratings]) + sigma = np.std([r for (_, r) in ratings]) + self.sigmas[x] = self.overall_sigma if sigma == 0.0 else sigma + + self.sim = self.compute_similarities() + + return self + + def estimate(self, u, i): + + if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)): + raise PredictionImpossible(""User and/or item is unknown."") + + x, y = self.switch(u, i) + + neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]] + k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1]) + + est = self.means[x] + + # compute weighted average + sum_sim = sum_ratings = actual_k = 0 + for (nb, sim, r) in k_neighbors: + if sim > 0: + sum_sim += sim + sum_ratings += sim * (r - self.means[nb]) / self.sigmas[nb] + actual_k += 1 + + if actual_k < self.min_k: + sum_ratings = 0 + + try: + est += sum_ratings / sum_sim * self.sigmas[x] + except ZeroDivisionError: + pass # return mean + + details = {""actual_k"": actual_k} + return est, details + """""" +This class implements the baseline estimation. +"""""" + +from .algo_base import AlgoBase + + +class BaselineOnly(AlgoBase): + r""""""Algorithm predicting the baseline estimate for given user and item. + + :math:`\\hat{r}_{ui} = b_{ui} = \\mu + b_u + b_i` + + If user :math:`u` is unknown, then the bias :math:`b_u` is assumed to be + zero. The same applies for item :math:`i` with :math:`b_i`. + + See section 2.1 of :cite:`Koren:2010` for details. + + Args: + bsl_options(dict): A dictionary of options for the baseline estimates + computation. See :ref:`baseline_estimates_configuration` for + accepted options. + verbose(bool): Whether to print trace messages of bias estimation, + similarity, etc. Default is True. + """""" + + def __init__(self, bsl_options={}, verbose=True): + + AlgoBase.__init__(self, bsl_options=bsl_options) + self.verbose = verbose + + def fit(self, trainset): + + AlgoBase.fit(self, trainset) + self.bu, self.bi = self.compute_baselines() + + return self + + def estimate(self, u, i): + + est = self.trainset.global_mean + if self.trainset.knows_user(u): + est += self.bu[u] + if self.trainset.knows_item(i): + est += self.bi[i] + + return est + """""" +The :mod:`prediction_algorithms` package includes the prediction algorithms +available for recommendation. + +The available prediction algorithms are: + +.. autosummary:: + :nosignatures: + + random_pred.NormalPredictor + baseline_only.BaselineOnly + knns.KNNBasic + knns.KNNWithMeans + knns.KNNWithZScore + knns.KNNBaseline + matrix_factorization.SVD + matrix_factorization.SVDpp + matrix_factorization.NMF + slope_one.SlopeOne + co_clustering.CoClustering +"""""" + +from .algo_base import AlgoBase +from .baseline_only import BaselineOnly +from .co_clustering import CoClustering +from .knns import KNNBaseline, KNNBasic, KNNWithMeans, KNNWithZScore +from .matrix_factorization import NMF, SVD, SVDpp + +from .predictions import Prediction, PredictionImpossible +from .random_pred import NormalPredictor +from .slope_one import SlopeOne + +__all__ = [ + ""AlgoBase"", + ""NormalPredictor"", + ""BaselineOnly"", + ""KNNBasic"", + ""KNNBaseline"", + ""KNNWithMeans"", + ""SVD"", + ""SVDpp"", + ""NMF"", + ""SlopeOne"", + ""CoClustering"", + ""PredictionImpossible"", + ""Prediction"", + ""KNNWithZScore"", +] + """""" Algorithm predicting a random rating. +"""""" + + +import numpy as np + +from .algo_base import AlgoBase + + +class NormalPredictor(AlgoBase): + """"""Algorithm predicting a random rating based on the distribution of the + training set, which is assumed to be normal. + + The prediction :math:`\\\\hat{r}_{ui}` is generated from a normal distribution + :math:`\\\\mathcal{N}(\\\\hat{\\\\mu}, \\\\hat{\\\\sigma}^2)` where :math:`\\\\hat{\\\\mu}` and + :math:`\\\\hat{\\\\sigma}` are estimated from the training data using Maximum + Likelihood Estimation: + + .. math:: + \\\\hat{\\\\mu} &= \\\\frac{1}{|R_{train}|} \\\\sum_{r_{ui} \\\\in R_{train}} + r_{ui}\\\\\\\\\\\\\\\\\\ + \\\\hat{\\\\sigma} &= \\\\sqrt{\\\\sum_{r_{ui} \\\\in R_{train}} + \\\\frac{(r_{ui} - \\\\hat{\\\\mu})^2}{|R_{train}|}} + """""" + + def __init__(self): + + AlgoBase.__init__(self) + + def fit(self, trainset): + + AlgoBase.fit(self, trainset) + + num = sum( + (r - self.trainset.global_mean) ** 2 + for (_, _, r) in self.trainset.all_ratings() + ) + denum = self.trainset.n_ratings + self.sigma = np.sqrt(num / denum) + + return self + + def estimate(self, *_): + + return np.random.normal(self.trainset.global_mean, self.sigma) + """""" +The :mod:`surprise.prediction_algorithms.algo_base` module defines the base +class :class:`AlgoBase` from which every single prediction algorithm has to +inherit. +"""""" +import heapq + +from .. import similarities as sims +from .optimize_baselines import baseline_als, baseline_sgd +from .predictions import Prediction, PredictionImpossible + + +class AlgoBase: + """"""Abstract class where is defined the basic behavior of a prediction + algorithm. + + Keyword Args: + baseline_options(dict, optional): If the algorithm needs to compute a + baseline estimate, the ``baseline_options`` parameter is used to + configure how they are computed. See + :ref:`baseline_estimates_configuration` for usage. + """""" + + def __init__(self, **kwargs): + + self.bsl_options = kwargs.get(""bsl_options"", {}) + self.sim_options = kwargs.get(""sim_options"", {}) + if ""user_based"" not in self.sim_options: + self.sim_options[""user_based""] = True + + def fit(self, trainset): + """"""Train an algorithm on a given training set. + + This method is called by every derived class as the first basic step + for training an algorithm. It basically just initializes some internal + structures and set the self.trainset attribute. + + Args: + trainset(:obj:`Trainset `) : A training + set, as returned by the :meth:`folds + ` method. + + Returns: + self + """""" + + self.trainset = trainset + + # (re) Initialise baselines + self.bu = self.bi = None + + return self + + def predict(self, uid, iid, r_ui=None, clip=True, verbose=False): + """"""Compute the rating prediction for given user and item. + + The ``predict`` method converts raw ids to inner ids and then calls the + ``estimate`` method which is defined in every derived class. If the + prediction is impossible (e.g. because the user and/or the item is + unknown), the prediction is set according to + :meth:`default_prediction() + `. + + Args: + uid: (Raw) id" +"of the user. See :ref:`this note`. + iid: (Raw) id of the item. See :ref:`this note`. + r_ui(float): The true rating :math:`r_{ui}`. Optional, default is + ``None``. + clip(bool): Whether to clip the estimation into the rating scale. + For example, if :math:`\\\\hat{r}_{ui}` is :math:`5.5` while the + rating scale is :math:`[1, 5]`, then :math:`\\\\hat{r}_{ui}` is + set to :math:`5`. Same goes if :math:`\\\\hat{r}_{ui} < 1`. + Default is ``True``. + verbose(bool): Whether to print details of the prediction. Default + is False. + + Returns: + A :obj:`Prediction\\ + ` object + containing: + + - The (raw) user id ``uid``. + - The (raw) item id ``iid``. + - The true rating ``r_ui`` (:math:`r_{ui}`). + - The estimated rating (:math:`\\\\hat{r}_{ui}`). + - Some additional details about the prediction that might be useful + for later analysis. + """""" + + # Convert raw ids to inner ids + try: + iuid = self.trainset.to_inner_uid(uid) + except ValueError: + iuid = ""UKN__"" + str(uid) + try: + iiid = self.trainset.to_inner_iid(iid) + except ValueError: + iiid = ""UKN__"" + str(iid) + + details = {} + try: + est = self.estimate(iuid, iiid) + + # If the details dict was also returned + if isinstance(est, tuple): + est, details = est + + details[""was_impossible""] = False + + except PredictionImpossible as e: + est = self.default_prediction() + details[""was_impossible""] = True + details[""reason""] = str(e) + + # clip estimate into [lower_bound, higher_bound] + if clip: + lower_bound, higher_bound = self.trainset.rating_scale + est = min(higher_bound, est) + est = max(lower_bound, est) + + pred = Prediction(uid, iid, r_ui, est, details) + + if verbose: + print(pred) + + return pred + + def default_prediction(self): + """"""Used when the ``PredictionImpossible`` exception is raised during a + call to :meth:`predict() + `. By + default, return the global mean of all ratings (can be overridden in + child classes). + + Returns: + (float): The mean of all ratings in the trainset. + """""" + + return self.trainset.global_mean + + def test(self, testset, verbose=False): + """"""Test the algorithm on given testset, i.e. estimate all the ratings + in the given testset. + + Args: + testset: A test set, as returned by a :ref:`cross-validation + itertor` or by the + :meth:`build_testset() ` + method. + verbose(bool): Whether to print details for each predictions. + Default is False. + + Returns: + A list of :class:`Prediction\\ + ` objects + that contains all the estimated ratings. + """""" + + # The ratings are translated back to their original scale. + predictions = [ + self.predict(uid, iid, r_ui_trans, verbose=verbose) + for (uid, iid, r_ui_trans) in testset + ] + return predictions + + def compute_baselines(self): + """"""Compute users and items baselines. + + The way baselines are computed depends on the ``bsl_options`` parameter + passed at the creation of the algorithm (see + :ref:`baseline_estimates_configuration`). + + This method is only relevant for algorithms using :func:`Pearson + baseline similarity` or the + :class:`BaselineOnly + ` algorithm. + + Returns: + A tuple ``(bu, bi)``, which are users and items baselines."""""" + + # Firt of, if this method has already been called before on the same + # trainset, then just return. Indeed, compute_baselines may be called + # more than one time, for example when a similarity metric (e.g. + # pearson_baseline) uses baseline estimates. + if self.bu is not None: + return self.bu, self.bi + + method = dict(als=baseline_als, sgd=baseline_sgd) + + method_name = self.bsl_options.get(""method"", ""als"") + + try: + if getattr(self, ""verbose"", False): + print(""Estimating biases using"", method_name + ""..."") + self.bu, self.bi = method[method_name](self) + return self.bu, self.bi + except KeyError: + raise ValueError( + ""Invalid method "" + + method_name + + "" for baseline computation."" + + "" Available methods are als and sgd."" + ) + + def compute_similarities(self): + """"""Build the similarity matrix. + + The way the similarity matrix is computed depends on the + ``sim_options`` parameter passed at the creation of the algorithm (see + :ref:`similarity_measures_configuration`). + + This method is only relevant for algorithms using a similarity measure, + such as the :ref:`k-NN algorithms `. + + Returns: + The similarity matrix."""""" + + construction_func = { + ""cosine"": sims.cosine, + ""msd"": sims.msd, + ""pearson"": sims.pearson, + ""pearson_baseline"": sims.pearson_baseline, + } + + if self.sim_options[""user_based""]: + n_x, yr = self.trainset.n_users, self.trainset.ir + else: + n_x, yr = self.trainset.n_items, self.trainset.ur + + min_support = self.sim_options.get(""min_support"", 1) + + args = [n_x, yr, min_support] + + name = self.sim_options.get(""name"", ""msd"").lower() + if name == ""pearson_baseline"": + shrinkage = self.sim_options.get(""shrinkage"", 100) + bu, bi = self.compute_baselines() + if self.sim_options[""user_based""]: + bx, by = bu, bi + else: + bx, by = bi, bu + + args += [self.trainset.global_mean, bx, by, shrinkage] + + try: + if getattr(self, ""verbose"", False): + print(f""Computing the {name} similarity matrix..."") + sim = construction_func[name](*args) + if getattr(self, ""verbose"", False): + print(""Done computing similarity matrix."") + return sim + except KeyError: + raise NameError( + ""Wrong sim name "" + + name + + "". Allowed values "" + + ""are "" + + "", "".join(construction_func.keys()) + + ""."" + ) + + def get_neighbors(self, iid, k): + """"""Return the ``k`` nearest neighbors of ``iid``, which is the inner id + of a user or an item, depending on the ``user_based`` field of + ``sim_options`` (see :ref:`similarity_measures_configuration`). + + As the similarities are computed on the basis of a similarity measure, + this method is only relevant for algorithms using a similarity measure, + such as the :ref:`k-NN algorithms `. + + For a usage example, see the :ref:`FAQ `. + + Args: + iid(int): The (inner) id of the user (or item) for which we want + the nearest neighbors. See :ref:`this note`. + + k(int): The number of neighbors to retrieve. + + Returns: + The list of the ``k`` (inner) ids of the closest users (or items) + to ``iid``. + """""" + + if self.sim_options[""user_based""]: + all_instances = self.trainset.all_users + else: + all_instances = self.trainset.all_items + others = [(x, self.sim[iid, x]) for x in all_instances() if x != iid] + others = heapq.nlargest(k, others, key=lambda tple: tple[1]) + k_nearest_neighbors = [j for (j, _) in others] + + return k_nearest_neighbors + """""" +The :mod:`surprise.prediction_algorithms.predictions` module defines the +:class:`Prediction` named tuple and the :class:`PredictionImpossible` +exception. +"""""" + + +from collections import namedtuple + + +class PredictionImpossible(Exception): + r""""""Exception raised when a prediction is impossible. + + When raised, the estimation :math:`\\hat{r}_{ui}` is set to the global mean + of all ratings :math:`\\mu`. + """""" + + pass + + +class Prediction(namedtuple(""Prediction"", [""uid"", ""iid"", ""r_ui"", ""est"", ""details""])): + """"""A named tuple for storing the results of a prediction. + + It's wrapped in a class, but only for documentation and printing purposes. + + Args: + uid: The (raw) user id. See :ref:`this note`. + iid: The (raw) item id. See :ref:`this note`. + r_ui(float): The true rating :math:`r_{ui}`. + est(float): The estimated rating :math:`\\\\hat{r}_{ui}`. + details (dict): Stores additional details about the prediction that + might be useful for later analysis. + """""" + + __slots__ = () # for memory saving purpose. + + def __str__(self): + s = f""user: {self.uid:<10} "" + s += f""item: {self.iid:<10} "" + if self.r_ui is not None: + s += f""r_ui = {self.r_ui:1.2f} "" + else: + s += ""r_ui = None "" + s += f""est = {self.est:1.2f} "" + s += str(self.details) + + return s + from .search import GridSearchCV, RandomizedSearchCV +from .split import ( + KFold, + LeaveOneOut, + PredefinedKFold, + RepeatedKFold, + ShuffleSplit, + train_test_split, +) + +from .validation import cross_validate + +__all__ = [ + ""KFold"", + ""ShuffleSplit"", + ""train_test_split"", + ""RepeatedKFold"", + ""LeaveOneOut"", + ""PredefinedKFold"", + ""cross_validate"", + ""GridSearchCV"", + ""RandomizedSearchCV"", +] + """""" +The :mod:`model_selection.split` module +contains various cross-validation iterators. Design and tools are inspired from +the mighty scikit learn. + +The available iterators are: + +.. autosummary:: + :nosignatures: + + KFold + RepeatedKFold + ShuffleSplit + LeaveOneOut + PredefinedKFold + +This module also contains a function for splitting datasets into trainset and +testset: + +.. autosummary:: + :nosignatures: + + train_test_split + +"""""" + +import numbers +from collections import defaultdict +from itertools import chain +from math import ceil, floor + +import numpy as np + +from ..utils import get_rng + + +def get_cv(cv): + """"""Return a 'validated' CV iterator."""""" + + if cv is None: + return KFold(n_splits=5) + if isinstance(cv, numbers.Integral): + return KFold(n_splits=cv) + if hasattr(cv, ""split"") and not isinstance(cv, str): + return cv # str have split + + raise ValueError( + ""Wrong CV object. Expecting None, an int or CV iterator, "" + ""got a {}"".format(type(cv)) + ) + + +class KFold: + """"""A basic cross-validation iterator. + + Each fold is used once as a testset while the k - 1 remaining folds are + used for training. + + See an example in the :ref:`User Guide `. + + Args: + n_splits(int): The number of folds. + random_state(int, RandomState instance from numpy, or ``None``): + Determines the RNG that will be used for determining the folds. If + int, ``random_state`` will be used as a seed for a new RNG. This is + useful to get the same splits over multiple calls to ``split()``. + If RandomState instance, this same instance is used as RNG. If + ``None``, the current RNG from numpy is used. ``random_state`` is + only used if ``shuffle`` is ``True``. Default is ``None``. + shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter + of the ``split()`` method. Shuffling is not done in-place. Default + is ``True``. + """""" + + def __init__(self, n_splits=5, random_state=None, shuffle=True): + + self.n_splits = n_splits + self.shuffle = shuffle + self.random_state = random_state + + def split(self, data): + """"""Generator function to iterate over trainsets and testsets. + + Args: + data(:obj:`Dataset`): The data containing + ratings that will be divided into trainsets and testsets. + + Yields: + tuple of (trainset, testset) + """""" + + if self.n_splits > len(data.raw_ratings) or self.n_splits < 2: + raise ValueError( + ""Incorrect value for n_splits={}. "" + ""Must be >=2 and less than the number "" + ""of ratings"".format(len(data.raw_ratings)) + ) + + # We use indices to avoid shuffling the original data.raw_ratings list. + indices = np.arange(len(data.raw_ratings)) + + if self.shuffle: + get_rng(self.random_state).shuffle(indices) + + start, stop = 0, 0 + for fold_i in range(self.n_splits): + start = stop + stop += len(indices) // self.n_splits + if fold_i < len(indices) % self.n_splits: + stop += 1 + + raw_trainset = [ + data.raw_ratings[i] for i in chain(indices[:start], indices[stop:]) + ] + raw_testset = [data.raw_ratings[i] for i in indices[start:stop]] + + trainset = data.construct_trainset(raw_trainset) + testset = data.construct_testset(raw_testset) + + yield trainset, testset + + def get_n_folds(self):" +" + + return self.n_splits + + +class RepeatedKFold: + """""" + Repeated :class:`KFold` cross validator. + + Repeats :class:`KFold` n times with different randomization in each + repetition. + + See an example in the :ref:`User Guide `. + + Args: + n_splits(int): The number of folds. + n_repeats(int): The number of repetitions. + random_state(int, RandomState instance from numpy, or ``None``): + Determines the RNG that will be used for determining the folds. If + int, ``random_state`` will be used as a seed for a new RNG. This is + useful to get the same splits over multiple calls to ``split()``. + If RandomState instance, this same instance is used as RNG. If + ``None``, the current RNG from numpy is used. ``random_state`` is + only used if ``shuffle`` is ``True``. Default is ``None``. + shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter + of the ``split()`` method. Shuffling is not done in-place. Default + is ``True``. + """""" + + def __init__(self, n_splits=5, n_repeats=10, random_state=None): + + self.n_repeats = n_repeats + self.random_state = random_state + self.n_splits = n_splits + + def split(self, data): + """"""Generator function to iterate over trainsets and testsets. + + Args: + data(:obj:`Dataset`): The data containing + ratings that will be divided into trainsets and testsets. + + Yields: + tuple of (trainset, testset) + """""" + + rng = get_rng(self.random_state) + + for _ in range(self.n_repeats): + cv = KFold(n_splits=self.n_splits, random_state=rng, shuffle=True) + yield from cv.split(data) + + def get_n_folds(self): + + return self.n_repeats * self.n_splits + + +class ShuffleSplit: + """"""A basic cross-validation iterator with random trainsets and testsets. + + Contrary to other cross-validation strategies, random splits do not + guarantee that all folds will be different, although this is still very + likely for sizeable datasets. + + See an example in the :ref:`User Guide `. + + Args: + n_splits(int): The number of folds. + test_size(float or int ``None``): If float, it represents the + proportion of ratings to include in the testset. If int, + represents the absolute number of ratings in the testset. If + ``None``, the value is set to the complement of the trainset size. + Default is ``.2``. + train_size(float or int or ``None``): If float, it represents the + proportion of ratings to include in the trainset. If int, + represents the absolute number of ratings in the trainset. If + ``None``, the value is set to the complement of the testset size. + Default is ``None``. + random_state(int, RandomState instance from numpy, or ``None``): + Determines the RNG that will be used for determining the folds. If + int, ``random_state`` will be used as a seed for a new RNG. This is + useful to get the same splits over multiple calls to ``split()``. + If RandomState instance, this same instance is used as RNG. If + ``None``, the current RNG from numpy is used. ``random_state`` is + only used if ``shuffle`` is ``True``. Default is ``None``. + shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter + of the ``split()`` method. Shuffling is not done in-place. Setting + this to `False` defeats the purpose of this iterator, but it's + useful for the implementation of :func:`train_test_split`. Default + is ``True``. + """""" + + def __init__( + self, + n_splits=5, + test_size=0.2, + train_size=None, + random_state=None, + shuffle=True, + ): + + if n_splits <= 0: + raise ValueError( + ""n_splits = {} should be strictly greater than "" ""0."".format(n_splits) + ) + if test_size is not None and test_size <= 0: + raise ValueError( + ""test_size={} should be strictly greater than "" ""0"".format(test_size) + ) + + if train_size is not None and train_size <= 0: + raise ValueError( + ""train_size={} should be strictly greater than "" ""0"".format(train_size) + ) + + self.n_splits = n_splits + self.test_size = test_size + self.train_size = train_size + self.random_state = random_state + self.shuffle = shuffle + + def validate_train_test_sizes(self, test_size, train_size, n_ratings): + + if test_size is not None and test_size >= n_ratings: + raise ValueError( + ""test_size={} should be less than the number of "" + ""ratings {}"".format(test_size, n_ratings) + ) + + if train_size is not None and train_size >= n_ratings: + raise ValueError( + ""train_size={} should be less than the number of"" + "" ratings {}"".format(train_size, n_ratings) + ) + + if np.asarray(test_size).dtype.kind == ""f"": + test_size = ceil(test_size * n_ratings) + + if train_size is None: + train_size = n_ratings - test_size + elif np.asarray(train_size).dtype.kind == ""f"": + train_size = floor(train_size * n_ratings) + + if test_size is None: + test_size = n_ratings - train_size + + if train_size + test_size > n_ratings: + raise ValueError( + ""The sum of train_size and test_size ({}) "" + ""should be smaller than the number of "" + ""ratings {}."".format(train_size + test_size, n_ratings) + ) + + return int(train_size), int(test_size) + + def split(self, data): + """"""Generator function to iterate over trainsets and testsets. + + Args: + data(:obj:`Dataset`): The data containing + ratings that will be divided into trainsets and testsets. + + Yields: + tuple of (trainset, testset) + """""" + + test_size, train_size = self.validate_train_test_sizes( + self.test_size, self.train_size, len(data.raw_ratings) + ) + rng = get_rng(self.random_state) + + for _ in range(self.n_splits): + + if self.shuffle: + permutation = rng.permutation(len(data.raw_ratings)) + else: + permutation = np.arange(len(data.raw_ratings)) + + raw_trainset = [data.raw_ratings[i] for i in permutation[:test_size]] + raw_testset = [ + data.raw_ratings[i] + for i in permutation[test_size : (test_size + train_size)] + ] + + trainset = data.construct_trainset(raw_trainset) + testset = data.construct_testset(raw_testset) + + yield trainset, testset + + def get_n_folds(self): + + return self.n_splits + + +def train_test_split( + data, test_size=0.2, train_size=None, random_state=None, shuffle=True +): + """"""Split a dataset into trainset and testset. + + See an example in the :ref:`User Guide `. + + Note: this function cannot be used as a cross-validation iterator. + + Args: + data(:obj:`Dataset `): The dataset to split + into trainset and testset. + test_size(float or int ``None``): If float, it represents the + proportion of ratings to include in the testset. If int, + represents the absolute number of ratings in the testset. If + ``None``, the value is set to the complement of the trainset size. + Default is ``.2``. + train_size(float or int or ``None``): If float, it represents the + proportion of ratings to include in the trainset. If int, + represents the absolute number of ratings in the trainset. If + ``None``, the value is set to the complement of the testset size. + Default is ``None``. + random_state(int, RandomState instance from numpy, or ``None``): + Determines the RNG that will be used for determining the folds. If + int, ``random_state`` will be used as a seed for a new RNG. This is + useful to get the same splits over multiple calls to ``split()``. + If RandomState instance, this same instance is used as RNG. If + ``None``, the current RNG from numpy is used. ``random_state`` is + only used if ``shuffle`` is ``True``. Default is ``None``. + shuffle(bool): Whether to shuffle the ratings in the ``data`` + parameter. Shuffling is not done in-place. Default is ``True``. + """""" + ss = ShuffleSplit( + n_splits=1, + test_size=test_size, + train_size=train_size, + random_state=random_state, + shuffle=shuffle, + ) + return next(ss.split(data)) + + +class LeaveOneOut: + """"""Cross-validation iterator where each user has exactly one rating in the + testset. + + Contrary to other cross-validation strategies, ``LeaveOneOut`` does not + guarantee that all folds will be different, although this is still very + likely for sizeable datasets. + + See an example in the :ref:`User Guide `. + + Args: + n_splits(int): The number of folds. + random_state(int, RandomState instance from numpy, or ``None``): + Determines the RNG that will be used for determining the folds. If + int, ``random_state`` will be used as a seed for a new RNG. This is + useful to get the same splits over multiple calls to ``split()``. + If RandomState instance, this same instance is used as RNG. If + ``None``, the current RNG from numpy is used. ``random_state`` is + only used if ``shuffle`` is ``True``. Default is ``None``. + min_n_ratings(int): Minimum number of ratings for each user in the + trainset. E.g. if ``min_n_ratings`` is ``2``, we are sure each user + has at least ``2`` ratings in the trainset (and ``1`` in the + testset). Other users are discarded. Default is ``0``, so some + users (having only one rating) may be in the testset and not in the + trainset. + """""" + + def __init__(self, n_splits=5, random_state=None, min_n_ratings=0): + + self.n_splits = n_splits + self.random_state = random_state + self.min_n_ratings = min_n_ratings + + def split(self, data): + """"""Generator function to iterate over trainsets and testsets. + + Args: + data(:obj:`Dataset`): The data containing + ratings that will be divided into trainsets and testsets. + + Yields: + tuple of (trainset, testset) + """""" + + # map ratings to the users ids + user_ratings = defaultdict(list) + for uid, iid, r_ui, _ in data.raw_ratings: + user_ratings[uid].append((uid, iid, r_ui, None)) + + rng = get_rng(self.random_state) + + for _ in range(self.n_splits): + # for each user, randomly choose a rating and put it in the + # testset. + raw_trainset, raw_testset = [], [] + for uid, ratings in user_ratings.items(): + if len(ratings) > self.min_n_ratings: + i = rng.randint(0, len(ratings)) + raw_testset.append(ratings[i]) + raw_trainset += [ + rating for (j, rating) in enumerate(ratings) if j != i + ] + + if not raw_trainset: + raise ValueError( + ""Could not build any trainset. Maybe "" ""min_n_ratings is too high?"" + ) + trainset = data.construct_trainset(raw_trainset) + testset = data.construct_testset(raw_testset) + + yield trainset, testset + + def get_n_folds(self): + + return self.n_splits + + +class PredefinedKFold: + """"""A cross-validation iterator to when a dataset has been loaded with the + :meth:`load_from_folds ` + method. + + See an example in the :ref:`User Guide `. + """""" + + def split(self, data): + """"""Generator function to iterate over trainsets and testsets. + + Args: + data(:obj:`Dataset`): The data containing + ratings that will be divided into trainsets and testsets. + + Yields: + tuple of (trainset, testset) + """""" + + self.n_splits = len(data.folds_files) + for train_file, test_file in data.folds_files: + + raw_trainset = data.read_ratings(train_file) + raw_testset = data.read_ratings(test_file) + trainset = data.construct_trainset(raw_trainset) + testset = data.construct_testset(raw_testset) + + yield trainset, testset + + def get_n_folds(self): + + return self.n_splits + """""" +The validation module contains the cross_validate function, inspired from +the mighty scikit learn. +"""""" + +import time + +import numpy as np +from joblib import delayed, Parallel + +from .. import accuracy + +from .split import get_cv + + +def cross_validate( + algo, + data, + measures=[""rmse"", ""mae""], + cv=None, + return_train_measures=False, + n_jobs=1, + pre_dispatch=""2*n_jobs"", + verbose=False, +): + """""" + Run a cross validation procedure for a given algorithm, reporting accuracy + measures and computation times. + + See an example in the :ref:`User Guide `. + + Args: + algo(:obj:`AlgoBase \\ + `): + The algorithm to evaluate. + data(:obj:`Dataset `): The dataset on which + to evaluate the algorithm. + measures(list of string): The performance measures to compute. Allowed + names are function names as defined in the :mod:`accuracy + ` module. Default is ``['rmse', 'mae']``. + cv(cross-validation iterator, int or ``None``): Det" +"ermines how the + ``data`` parameter will be split (i.e. how trainsets and testsets + will be defined). If an int is passed, :class:`KFold + ` is used with the + appropriate ``n_splits`` parameter. If ``None``, :class:`KFold + ` is used with + ``n_splits=5``. + return_train_measures(bool): Whether to compute performance measures on + the trainsets. Default is ``False``. + n_jobs(int): The maximum number of folds evaluated in parallel. + + - If ``-1``, all CPUs are used. + - If ``1`` is given, no parallel computing code is used at all,\\ + which is useful for debugging. + - For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\\ + used. For example, with ``n_jobs = -2`` all CPUs but one are\\ + used. + + Default is ``1``. + pre_dispatch(int or string): Controls the number of jobs that get + dispatched during parallel execution. Reducing this number can be + useful to avoid an explosion of memory consumption when more jobs + get dispatched than CPUs can process. This parameter can be: + + - ``None``, in which case all the jobs are immediately created\\ + and spawned. Use this for lightweight and fast-running\\ + jobs, to avoid delays due to on-demand spawning of the\\ + jobs. + - An int, giving the exact number of total jobs that are\\ + spawned. + - A string, giving an expression as a function of ``n_jobs``,\\ + as in ``'2*n_jobs'``. + + Default is ``'2*n_jobs'``. + verbose(int): If ``True`` accuracy measures for each split are printed, + as well as train and test times. Averages and standard deviations + over all splits are also reported. Default is ``False``: nothing is + printed. + + Returns: + dict: A dict with the following keys: + + - ``'test_*'`` where ``*`` corresponds to a lower-case accuracy + measure, e.g. ``'test_rmse'``: numpy array with accuracy values + for each testset. + + - ``'train_*'`` where ``*`` corresponds to a lower-case accuracy + measure, e.g. ``'train_rmse'``: numpy array with accuracy values + for each trainset. Only available if ``return_train_measures`` is + ``True``. + + - ``'fit_time'``: numpy array with the training time in seconds for + each split. + + - ``'test_time'``: numpy array with the testing time in seconds for + each split. + + """""" + + measures = [m.lower() for m in measures] + + cv = get_cv(cv) + + delayed_list = ( + delayed(fit_and_score)(algo, trainset, testset, measures, return_train_measures) + for (trainset, testset) in cv.split(data) + ) + out = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch)(delayed_list) + + (test_measures_dicts, train_measures_dicts, fit_times, test_times) = zip(*out) + + test_measures = dict() + train_measures = dict() + ret = dict() + for m in measures: + # transform list of dicts into dict of lists + # Same as in GridSearchCV.fit() + test_measures[m] = np.asarray([d[m] for d in test_measures_dicts]) + ret[""test_"" + m] = test_measures[m] + if return_train_measures: + train_measures[m] = np.asarray([d[m] for d in train_measures_dicts]) + ret[""train_"" + m] = train_measures[m] + + ret[""fit_time""] = fit_times + ret[""test_time""] = test_times + + if verbose: + print_summary( + algo, + measures, + test_measures, + train_measures, + fit_times, + test_times, + cv.n_splits, + ) + + return ret + + +def fit_and_score(algo, trainset, testset, measures, return_train_measures=False): + """"""Helper method that trains an algorithm and compute accuracy measures on + a testset. Also report train and test times. + + Args: + algo(:obj:`AlgoBase \\ + `): + The algorithm to use. + trainset(:obj:`Trainset `): The trainset. + testset(:obj:`testset`): The testset. + measures(list of string): The performance measures to compute. Allowed + names are function names as defined in the :mod:`accuracy + ` module. + return_train_measures(bool): Whether to compute performance measures on + the trainset. Default is ``False``. + + Returns: + tuple: A tuple containing: + + - A dictionary mapping each accuracy metric to its value on the + testset (keys are lower case). + + - A dictionary mapping each accuracy metric to its value on the + trainset (keys are lower case). This dict is empty if + return_train_measures is False. + + - The fit time in seconds. + + - The testing time in seconds. + """""" + + start_fit = time.time() + algo.fit(trainset) + fit_time = time.time() - start_fit + start_test = time.time() + predictions = algo.test(testset) + test_time = time.time() - start_test + + if return_train_measures: + train_predictions = algo.test(trainset.build_testset()) + + test_measures = dict() + train_measures = dict() + for m in measures: + f = getattr(accuracy, m.lower()) + test_measures[m] = f(predictions, verbose=0) + if return_train_measures: + train_measures[m] = f(train_predictions, verbose=0) + + return test_measures, train_measures, fit_time, test_time + + +def print_summary( + algo, measures, test_measures, train_measures, fit_times, test_times, n_splits +): + """"""Helper for printing the result of cross_validate."""""" + + print( + ""Evaluating {} of algorithm {} on {} split(s)."".format( + "", "".join(m.upper() for m in measures), algo.__class__.__name__, n_splits + ) + ) + print() + + row_format = ""{:<18}"" + ""{:<8}"" * (n_splits + 2) + s = row_format.format( + """", *[f""Fold {i + 1}"" for i in range(n_splits)] + [""Mean""] + [""Std""] + ) + s += ""\\n"" + s += ""\\n"".join( + row_format.format( + key.upper() + "" (testset)"", + *[f""{v:1.4f}"" for v in vals] + + [f""{np.mean(vals):1.4f}""] + + [f""{np.std(vals):1.4f}""], + ) + for (key, vals) in test_measures.items() + ) + if train_measures: + s += ""\\n"" + s += ""\\n"".join( + row_format.format( + key.upper() + "" (trainset)"", + *[f""{v:1.4f}"" for v in vals] + + [f""{np.mean(vals):1.4f}""] + + [f""{np.std(vals):1.4f}""], + ) + for (key, vals) in train_measures.items() + ) + s += ""\\n"" + s += row_format.format( + ""Fit time"", + *[f""{t:.2f}"" for t in fit_times] + + [f""{np.mean(fit_times):.2f}""] + + [f""{np.std(fit_times):.2f}""], + ) + s += ""\\n"" + s += row_format.format( + ""Test time"", + *[f""{t:.2f}"" for t in test_times] + + [f""{np.mean(test_times):.2f}""] + + [f""{np.std(test_times):.2f}""], + ) + print(s) + from abc import ABC, abstractmethod +from itertools import product + +import numpy as np +from joblib import delayed, Parallel + +from ..dataset import DatasetUserFolds +from ..utils import get_rng + +from .split import get_cv +from .validation import fit_and_score + + +class BaseSearchCV(ABC): + """"""Base class for hyper parameter search with cross-validation."""""" + + @abstractmethod + def __init__( + self, + algo_class, + measures=[""rmse"", ""mae""], + cv=None, + refit=False, + return_train_measures=False, + n_jobs=1, + pre_dispatch=""2*n_jobs"", + joblib_verbose=0, + ): + + self.algo_class = algo_class + self.measures = [measure.lower() for measure in measures] + self.cv = cv + + if isinstance(refit, str): + if refit.lower() not in self.measures: + raise ValueError( + ""It looks like the measure you want to use "" + ""with refit ({}) is not in the measures "" + ""parameter"" + ) + + self.refit = refit.lower() + + elif refit is True: + self.refit = self.measures[0] + + else: + self.refit = False + + self.return_train_measures = return_train_measures + self.n_jobs = n_jobs + self.pre_dispatch = pre_dispatch + self.joblib_verbose = joblib_verbose + + def _parse_options(self, params): + # As sim_options and bsl_options are dictionaries, they require a + # special treatment. + + if ""sim_options"" in params: + sim_options = params[""sim_options""] + sim_options_list = [ + dict(zip(sim_options, v)) for v in product(*sim_options.values()) + ] + params[""sim_options""] = sim_options_list + + if ""bsl_options"" in params: + bsl_options = params[""bsl_options""] + bsl_options_list = [ + dict(zip(bsl_options, v)) for v in product(*bsl_options.values()) + ] + params[""bsl_options""] = bsl_options_list + + return params + + def fit(self, data): + """"""Runs the ``fit()`` method of the algorithm for all parameter + combinations, over different splits given by the ``cv`` parameter. + + Args: + data (:obj:`Dataset `): The dataset on + which to evaluate the algorithm, in parallel. + """""" + + if self.refit and isinstance(data, DatasetUserFolds): + raise ValueError( + ""refit cannot be used when data has been "" + ""loaded with load_from_folds()."" + ) + + cv = get_cv(self.cv) + + delayed_list = ( + delayed(fit_and_score)( + self.algo_class(**params), + trainset, + testset, + self.measures, + self.return_train_measures, + ) + for params, (trainset, testset) in product( + self.param_combinations, cv.split(data) + ) + ) + out = Parallel( + n_jobs=self.n_jobs, + pre_dispatch=self.pre_dispatch, + verbose=self.joblib_verbose, + )(delayed_list) + + (test_measures_dicts, train_measures_dicts, fit_times, test_times) = zip(*out) + + # test_measures_dicts is a list of dict like this: + # [{'mae': 1, 'rmse': 2}, {'mae': 2, 'rmse': 3} ...] + # E.g. for 5 splits, the first 5 dicts are for the first param + # combination, the next 5 dicts are for the second param combination, + # etc... + # We convert it into a dict of list: + # {'mae': [1, 2, ...], 'rmse': [2, 3, ...]} + # Each list is still of size n_parameters_combinations * n_splits. + # Then, reshape each list to have 2-D arrays of shape + # (n_parameters_combinations, n_splits). This way we can easily compute + # the mean and std dev over all splits or over all param comb. + test_measures = dict() + train_measures = dict() + new_shape = (len(self.param_combinations), cv.get_n_folds()) + for m in self.measures: + test_measures[m] = np.asarray([d[m] for d in test_measures_dicts]) + test_measures[m] = test_measures[m].reshape(new_shape) + if self.return_train_measures: + train_measures[m] = np.asarray([d[m] for d in train_measures_dicts]) + train_measures[m] = train_measures[m].reshape(new_shape) + + cv_results = dict() + best_index = dict() + best_params = dict() + best_score = dict() + best_estimator = dict() + for m in self.measures: + # cv_results: set measures for each split and each param comb + for split in range(cv.get_n_folds()): + cv_results[f""split{split}_test_{m}""] = test_measures[m][:, split] + if self.return_train_measures: + cv_results[f""split{split}_train_{m}""] = train_measures[m][:, split] + + # cv_results: set mean and std over all splits (testset and + # trainset) for each param comb + mean_test_measures = test_measures[m].mean(axis=1) + cv_results[f""mean_test_{m}""] = mean_test_measures + cv_results[f""std_test_{m}""] = test_measures[m].std(axis=1) + if self.return_train_measures: + mean_train_measures = train_measures[m].mean(axis=1) + cv_results[f""mean_train_{m}""] = mean_train_measures + cv_results[f""std_train_{m}""] = train_measures[m].std(axis=1) + + # cv_results: set rank of each param comb + # also set best_index, and best_xxxx attributes + indices = cv_results[f""mean_test_{m}""].argsort() + cv_results[f""rank_test_{m}""] = np.empty_like(indices) + if m in (""mae"", ""rmse"", ""mse""): + cv_results[f""rank_test_{m}""][indices] = ( + np.arange(len(indices)) + 1 + ) # sklearn starts at 1 as well + best_index[m] = mean_test_measures.argmin() + elif m in (""fcp"",): + cv" +"_results[f""rank_test_{m}""][indices] = np.arange(len(indices), 0, -1) + best_index[m] = mean_test_measures.argmax() + best_params[m] = self.param_combinations[best_index[m]] + best_score[m] = mean_test_measures[best_index[m]] + best_estimator[m] = self.algo_class(**best_params[m]) + + # Cv results: set fit and train times (mean, std) + fit_times = np.array(fit_times).reshape(new_shape) + test_times = np.array(test_times).reshape(new_shape) + for s, times in zip((""fit"", ""test""), (fit_times, test_times)): + cv_results[f""mean_{s}_time""] = times.mean(axis=1) + cv_results[f""std_{s}_time""] = times.std(axis=1) + + # cv_results: set params key and each param_* values + cv_results[""params""] = self.param_combinations + for param in self.param_combinations[0]: + cv_results[""param_"" + param] = [ + comb[param] for comb in self.param_combinations + ] + + if self.refit: + best_estimator[self.refit].fit(data.build_full_trainset()) + + self.best_index = best_index + self.best_params = best_params + self.best_score = best_score + self.best_estimator = best_estimator + self.cv_results = cv_results + + def test(self, testset, verbose=False): + """"""Call ``test()`` on the estimator with the best found parameters + (according the the ``refit`` parameter). See :meth:`AlgoBase.test() + `. + + Only available if ``refit`` is not ``False``. + """""" + + if not self.refit: + raise ValueError(""refit is False, cannot use test()"") + + return self.best_estimator[self.refit].test(testset, verbose) + + def predict(self, *args): + """"""Call ``predict()`` on the estimator with the best found parameters + (according the the ``refit`` parameter). See :meth:`AlgoBase.predict() + `. + + Only available if ``refit`` is not ``False``. + """""" + + if not self.refit: + raise ValueError(""refit is False, cannot use predict()"") + + return self.best_estimator[self.refit].predict(*args) + + +class GridSearchCV(BaseSearchCV): + """"""The :class:`GridSearchCV` class computes accuracy metrics for an + algorithm on various combinations of parameters, over a cross-validation + procedure. This is useful for finding the best set of parameters for a + prediction algorithm. It is analogous to `GridSearchCV + `_ from scikit-learn. + + See an example in the :ref:`User Guide `. + + Args: + algo_class(:obj:`AlgoBase \\ + `): The class + of the algorithm to evaluate. + param_grid(dict): Dictionary with algorithm parameters as keys and + list of values as keys. All combinations will be evaluated with + desired algorithm. Dict parameters such as ``sim_options`` require + special treatment, see :ref:`this note`. + measures(list of string): The performance measures to compute. Allowed + names are function names as defined in the :mod:`accuracy + ` module. Default is ``['rmse', 'mae']``. + cv(cross-validation iterator, int or ``None``): Determines how the + ``data`` parameter will be split (i.e. how trainsets and testsets + will be defined). If an int is passed, :class:`KFold + ` is used with the + appropriate ``n_splits`` parameter. If ``None``, :class:`KFold + ` is used with + ``n_splits=5``. + refit(bool or str): If ``True``, refit the algorithm on the whole + dataset using the set of parameters that gave the best average + performance for the first measure of ``measures``. Other measures + can be used by passing a string (corresponding to the measure + name). Then, you can use the ``test()`` and ``predict()`` methods. + ``refit`` can only be used if the ``data`` parameter given to + ``fit()`` hasn't been loaded with :meth:`load_from_folds() + `. Default is ``False``. + return_train_measures(bool): Whether to compute performance measures on + the trainsets. If ``True``, the ``cv_results`` attribute will + also contain measures for trainsets. Default is ``False``. + n_jobs(int): The maximum number of parallel training procedures. + + - If ``-1``, all CPUs are used. + - If ``1`` is given, no parallel computing code is used at all,\\ + which is useful for debugging. + - For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\\ + used. For example, with ``n_jobs = -2`` all CPUs but one are\\ + used. + + Default is ``1``. + pre_dispatch(int or string): Controls the number of jobs that get + dispatched during parallel execution. Reducing this number can be + useful to avoid an explosion of memory consumption when more jobs + get dispatched than CPUs can process. This parameter can be: + + - ``None``, in which case all the jobs are immediately created\\ + and spawned. Use this for lightweight and fast-running\\ + jobs, to avoid delays due to on-demand spawning of the\\ + jobs. + - An int, giving the exact number of total jobs that are\\ + spawned. + - A string, giving an expression as a function of ``n_jobs``,\\ + as in ``'2*n_jobs'``. + + Default is ``'2*n_jobs'``. + joblib_verbose(int): Controls the verbosity of joblib: the higher, the + more messages. + + Attributes: + best_estimator (dict of AlgoBase): + Using an accuracy measure as key, get the algorithm that gave the + best accuracy results for the chosen measure, averaged over all + splits. + best_score (dict of floats): + Using an accuracy measure as key, get the best average score + achieved for that measure. + best_params (dict of dicts): + Using an accuracy measure as key, get the parameters combination + that gave the best accuracy results for the chosen measure (on + average). + best_index (dict of ints): + Using an accuracy measure as key, get the index that can be used + with ``cv_results`` that achieved the highest accuracy for that + measure (on average). + cv_results (dict of arrays): + A dict that contains accuracy measures over all splits, as well as + train and test time for each parameter combination. Can be imported + into a pandas `DataFrame` (see :ref:`example + `). + """""" + + def __init__( + self, + algo_class, + param_grid, + measures=[""rmse"", ""mae""], + cv=None, + refit=False, + return_train_measures=False, + n_jobs=1, + pre_dispatch=""2*n_jobs"", + joblib_verbose=0, + ): + + super().__init__( + algo_class=algo_class, + measures=measures, + cv=cv, + refit=refit, + return_train_measures=return_train_measures, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + joblib_verbose=joblib_verbose, + ) + + self.param_grid = self._parse_options(param_grid.copy()) + self.param_combinations = [ + dict(zip(self.param_grid, v)) for v in product(*self.param_grid.values()) + ] + + +class RandomizedSearchCV(BaseSearchCV): + """"""The :class:`RandomizedSearchCV` class computes accuracy metrics for an + algorithm on various combinations of parameters, over a cross-validation + procedure. As opposed to GridSearchCV, which uses an exhaustive + combinatorial approach, RandomizedSearchCV samples randomly from the + parameter space. This is useful for finding the best set of parameters + for a prediction algorithm, especially using a coarse to fine approach. + It is analogous to `RandomizedSearchCV `_ from + scikit-learn. + + See an example in the :ref:`User Guide `. + + Args: + algo_class(:obj:`AlgoBase \\ + `): The class + of the algorithm to evaluate. + param_distributions(dict): Dictionary with algorithm parameters as + keys and distributions or lists of parameters to try. Distributions + must provide a rvs method for sampling (such as those from + scipy.stats.distributions). If a list is given, it is sampled + uniformly. Parameters will be sampled n_iter times. + n_iter(int): Number of times parameter settings are sampled. Default is + ``10``. + measures(list of string): The performance measures to compute. Allowed + names are function names as defined in the :mod:`accuracy + ` module. Default is ``['rmse', 'mae']``. + cv(cross-validation iterator, int or ``None``): Determines how the + ``data`` parameter will be split (i.e. how trainsets and testsets + will be defined). If an int is passed, :class:`KFold + ` is used with the + appropriate ``n_splits`` parameter. If ``None``, :class:`KFold + ` is used with + ``n_splits=5``. + refit(bool or str): If ``True``, refit the algorithm on the whole + dataset using the set of parameters that gave the best average + performance for the first measure of ``measures``. Other measures + can be used by passing a string (corresponding to the measure + name). Then, you can use the ``test()`` and ``predict()`` methods. + ``refit`` can only be used if the ``data`` parameter given to + ``fit()`` hasn't been loaded with :meth:`load_from_folds() + `. Default is ``False``. + return_train_measures(bool): Whether to compute performance measures on + the trainsets. If ``True``, the ``cv_results`` attribute will + also contain measures for trainsets. Default is ``False``. + n_jobs(int): The maximum number of parallel training procedures. + + - If ``-1``, all CPUs are used. + - If ``1`` is given, no parallel computing code is used at all,\\ + which is useful for debugging. + - For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\\ + used. For example, with ``n_jobs = -2`` all CPUs but one are\\ + used. + + Default is ``1``. + pre_dispatch(int or string): Controls the number of jobs that get + dispatched during parallel execution. Reducing this number can be + useful to avoid an explosion of memory consumption when more jobs + get dispatched than CPUs can process. This parameter can be: + + - ``None``, in which case all the jobs are immediately created\\ + and spawned. Use this for lightweight and fast-running\\ + jobs, to avoid delays due to on-demand spawning of the\\ + jobs. + - An int, giving the exact number of total jobs that are\\ + spawned. + - A string, giving an expression as a function of ``n_jobs``,\\ + as in ``'2*n_jobs'``. + + Default is ``'2*n_jobs'``. + random_state(int, RandomState or None): Pseudo random number + generator seed used for random uniform sampling from lists of + possible values instead of scipy.stats distributions. If int, + ``random_state`` is the seed used by the random number generator. + If ``RandomState`` instance, ``random_state`` is the random number + generator. If ``None``, the random number generator is the + RandomState instance used by ``np.random``. Default is ``None``. + joblib_verbose(int): Controls the verbosity of joblib: the higher, the + more messages. + + Attributes: + best_estimator (dict of AlgoBase): + Using an accuracy measure as key, get the algorithm that gave the + best accuracy results for the chosen measure, averaged over all + splits. + best_score (dict of floats): + Using an accuracy measure as key, get the best average score + achieved for that measure. + best_params (dict of dicts): + Using an accuracy measure as key, get the parameters combination + that gave the best accuracy results for the chosen measure (on + average). + best_index (dict of ints): + Using an accuracy measure as key, get the index that can be used + with ``cv_results`` that achieved the highest accuracy for that + measure (on average). + cv_results (dict of arrays): + A dict that contains accuracy measures over all splits, as well as + train and test time for each parameter combination. Can be imported + into a pandas `DataFrame` (see :ref:`example + `). + """""" + + def __init__( + self, + algo_class, + param_distributions, + n_iter=10, + measures=[""rmse"", ""mae""], + cv=None, + refit=False, + return_train_measures=False, + n_jobs=1, + pre_dispatch=""2*n_jobs"", + random_state=None, + joblib_verbose=0, + ): + + super().__init__( + algo_class=algo_class, + measures=measures, + cv=cv, + refit=refit, + return_train_measures=return_train_measures, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + joblib_verbose=joblib_verbose, + ) + + self.n_iter = n_iter + self.random_state = random_state + self.param_distributions = self._parse_options(param_distributions.copy()) + self.param_combinations = self._sample_parameters( + self.param_distributions, self.n_iter, self.random_state + ) + + @staticmethod + def _sample_parameters(param_distributions, n_iter, random_state=None): + """"""Samples ``n_iter`` parameter combinations from + ``param_distributions`` using ``random_state`` as a seed. + + Non-deterministic iterable over random candidate combinations for + hyper-parameter search. If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not" +" + accept a custom RNG instance and always use the singleton RNG from + ``numpy.random``. Hence setting ``random_state`` will not guarantee a + deterministic iteration whenever ``scipy.stats`` distributions are used + to define the parameter search space. Deterministic behavior is however + guaranteed from SciPy 0.16 onwards. + + Args: + param_distributions(dict): Dictionary where the keys are + parameters and values are distributions from which a parameter + is to be sampled. Distributions either have to provide a + ``rvs`` function to sample from them, or can be given as a list + of values, where a uniform distribution is assumed. + n_iter(int): Number of parameter settings produced. + Default is ``10``. + random_state(int, RandomState instance or None): + Pseudo random number generator seed used for random uniform + sampling from lists of possible values instead of scipy.stats + distributions. If ``None``, the random number generator is the + random state instance used by np.random. Default is ``None``. + + Returns: + combos(list): List of parameter dictionaries with sampled values. + """""" + + # check if all distributions are given as lists + # if so, sample without replacement + all_lists = np.all( + [not hasattr(v, ""rvs"") for v in param_distributions.values()] + ) + rnd = get_rng(random_state) + + # sort for reproducibility + items = sorted(param_distributions.items()) + + if all_lists: + # create exhaustive combinations + param_grid = [ + dict(zip(param_distributions, v)) + for v in product(*param_distributions.values()) + ] + combos = np.random.choice(param_grid, n_iter, replace=False) + + else: + combos = [] + for _ in range(n_iter): + params = dict() + for k, v in items: + if hasattr(v, ""rvs""): + params[k] = v.rvs(random_state=rnd) + else: + params[k] = v[rnd.randint(len(v))] + combos.append(params) + + return combos + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import kfp +import kfp.dsl as dsl +import json +from pathlib import Path + + +class aionpipeline(): + + containerRegistry = str() + containerLabel = str() + containerSecret = str() + + pipelineName = 'AION MLOps Pipeline {0}' + exeCmd = 'python' + codeFile = 'aionCode.py' + mntPoint = '/aion' + inputArg = '-i' + msIP = '0.0.0.0' + port = '8094' + cachingStrategy = 'P0D' + + deafultVolume = '1Gi' + volName = 'aion-pvc' + volMode = 'ReadWriteMany' + fileExt = '.tar.gz' + fileName = 'aion_mlops_pipeline_{0}' + + containerMM = 'modelmonitoring' + containerDI = 'dataingestion' + containerDT = 'datatransformation' + containerFE = 'featureengineering' + containerMR = 'modelregistry' + containerMS = 'modelserving' + containerImage = '{0}/{1}:{2}' + + models = {} + nameSeprator = '-' + modelsLiteral = 'models' + modelNameLiteral = 'modelname' + msTemplate = '{""apiVersion"": ""v1"", ""kind"": ""Pod"", ""metadata"": {""name"": ""{{workflow.name}}-{0}""}, ""spec"": {""containers"": [{""name"": ""{0}"", ""image"": ""{1}"", ""command"": [""python""], ""args"": [""aionCode.py"", ""-ip"", ""{2}"", ""-pn"", ""{3}""],""volumeMounts"": [{""name"": ""aion-pvc"", ""mountPath"": ""{4}""}], ""ports"": [{""name"": ""http"", ""containerPort"": {3}, ""protocol"": ""TCP""}]}], ""imagePullSecrets"": [{""name"": ""{5}""}], ""volumes"": [{""name"": ""aion-pvc"", ""persistentVolumeClaim"": {""claimName"": ""{{workflow.name}}-{6}""}}]}}' + + def __init__(self, models, containerRegistry, containerLabel, containerSecret=str()): + self.models = models + self.containerRegistry = containerRegistry + self.containerLabel = containerLabel + self.containerSecret = containerSecret + + + @dsl.pipeline( + name=pipelineName.format(containerLabel), + description=pipelineName.format(containerLabel), + ) + def aion_mlops(self, inputUri=str(), volSize=deafultVolume): + vop = dsl.VolumeOp( + name=self.volName + self.nameSeprator + self.containerLabel, + resource_name=self.volName, + modes=[self.volMode], + size=volSize + ) + + mm = dsl.ContainerOp( + name=self.containerMM, + image=self.containerImage.format(self.containerRegistry,self.containerMM,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + self.inputArg, + inputUri, + ], + pvolumes={self.mntPoint: vop.volume} + ) + mm.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + di = dsl.ContainerOp( + name=self.containerDI, + image=self.containerImage.format(self.containerRegistry,self.containerDI,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: mm.pvolume} + ) + di.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + dt = dsl.ContainerOp( + name=self.containerDT, + image=self.containerImage.format(self.containerRegistry,self.containerDT,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: di.pvolume} + ) + dt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + fe = dsl.ContainerOp( + name=self.containerFE, + image=self.containerImage.format(self.containerRegistry,self.containerFE,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: dt.pvolume} + ) + fe.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + + dictMT = {} + listMTOps = [] + + for model in self.models[self.modelsLiteral]: + modelName = model[self.modelNameLiteral] + mt=dsl.ContainerOp( + name=modelName, + image=self.containerImage.format(self.containerRegistry,modelName,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: fe.pvolume}) + mt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + listMTOps.append(mt) + dictMT[self.mntPoint]=mt.pvolume + + + mr = dsl.ContainerOp( + name=self.containerMR, + image=self.containerImage.format(self.containerRegistry,self.containerMR,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes=dictMT + ).after(*tuple(listMTOps)) + + mr.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + msJson = self.msTemplate.replace(str({0}),self.containerMS).replace(str({1}),self.containerImage.format(self.containerRegistry,self.containerMS,self.containerLabel)).replace(str({2}),self.msIP).replace(str({3}),self.port).replace(str({4}),self.mntPoint).replace(str({5}),self.containerSecret).replace(str({6}),self.volName) + ms = dsl.ResourceOp( + name=self.containerMS + self.nameSeprator + self.containerLabel, + k8s_resource=json.loads(msJson), + ) + ms.after(mr) + + + def compilepl(self, targetPath=str()): + filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt + if targetPath != str(): + filePath = Path(targetPath, filePath) + kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath)) + + + def executepl(self, kfhost=str()): + client = kfp.Client(kfhost) + client.create_run_from_pipeline_func(self.aion_mlops,arguments={}) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from pathlib import Path +import sqlite3 +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + db_file = str(location / self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + + def table_exists(self, name): + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + return len(listOfTables) > 0 + + def read_data(self, table_name, condition = None): + if condition: + query = f""SELECT * FROM {table_name} WHERE ""+condition + else: + query = f""SELECT * FROM {table_name}"" + row = self.cursor.execute(query).fetchall() + return list(row) + + def column_names(self, table_name): + query = f""SELECT * FROM {table_name}"" + row = self.cursor.execute(query).fetchall() + column_names = list(map(lambda x:x[0],self.cursor.description)) + return column_names + # return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + + def create_table(self, name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + + def delete_record(self, table_name, col_name, col_value): + try: + query = f""DELETE FROM {table_name} WHERE {col_name}='{col_value}'"" + self.conn.execute(query) + self.conn.commit() + return 'success' + except Exception as e: + print(str(e)) + print(""Deletion Failed"") + return 'error' + def drop_table(self,table_name): + query = f""DROP TABLE {table_name}"" + self.cursor.execute(query) + print(""Table dropped... "") + + # Commit your changes in the database + self.conn.commit() + def get_data(self, table_name, col_name, col_value): + query = f""SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"" + row = self.cursor.execute(query).fetchone() + if (row == None): + return [] + return list(row) + + def execute_query(self,query): + self.cursor.execute(query) + self.conn.commit() + + def write_data(self, data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def update_dict_data(self,data:dict,condition,table_name): + if not data: + return + if not table_name: + raise ValueError('Database table name is not provided') + updates = '' + #TODO validation of keys + for i,kv in enumerate(data.items()): + if i: + updates += ',' + updates += f'""{kv[0]}""=""{kv[1]}""' + if condition == '': + update_query = f'UPDATE {table_name} SET {updates}' + else: + update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' + self.cursor.execute(update_query) + self.conn.commit() + + def update_data(self,updates,condition,table_name): + if condition == '': + update_query = f'UPDATE {table_name} SET {updates}' + else: + update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' + self.cursor.execute(update_query) + self.conn.commit() + + def close(self): + self.conn.close() # -*- coding: utf-8 -*- +import os +import glob, os +import pandas as pd +from openai.embeddings_utils import cosine_similarity +import numpy as np +from openai.embeddings_utils import get_embedding +import tiktoken +import openai +import importlib.util +from sklearn.preprocessing import StandardScaler +import numpy as np +from sklearn.cluster import DBSCAN +from sklearn import metrics +import time +from tqdm import tqdm +import concurrent.futures +from openai.error import RateLimitError, Timeout +try: + import chromadb + from chromadb.api.types import Documents, Embeddings +except: + #Looks no chromadb installed,just proceed to use csv embedd + pass + +from openai.embeddings_utils import get_embedding +import json +from openai.embeddings_utils import cosine_similarity +from langchain.schema import Document +from langchain.vectorstores import Chroma +import warnings +import logging +warnings.simplefilter(action='ignore', category=FutureWarning) + +" +"""""""Code clone detection parent class, based on user input data,the class will detect similar code snippets in the python file """""" +class CodeCloneDetection: + #Constructor for base inputs + def __init__(self,rootdir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId): + self.rootdir=rootdir + self.embedd_storage_path=embedd_storage_path + self.openai_baseurl=openai_baseurl + self.openai_key=openai_key + self.openai_api_type=openai_api_type + self.openai_api_version=openai_api_version + self.ccdreportpath = os.path.join(self.embedd_storage_path, ""codeCloneReport"") + self.generativeai_chat_model=generativeai_chat_model + self.generativeai_embedding_engine = generativeai_embedding_engine + self.generativeai_embedding_model = generativeai_embedding_model + self.generativeai_deploymentId = generativeai_deploymentId + try: + os.makedirs(self.ccdreportpath, exist_ok = True) + except OSError as error: + print(""Directory 'codeclonedetection' can not be created"",self.ccdreportpath) + + try: + self.logpath = os.path.join(self.ccdreportpath,'codeclonelog.log') + logging.basicConfig(level=logging.INFO,filename=self.logpath,filemode='w',format='%(message)s') + self.log = logging.getLogger() + except Exception as e: + print(""code clone log object creation error."",e) + + def get_function_name(self,code): + """""" + Extract function name from a line beginning with ""def "" + """""" + assert code.startswith(""def "") + return code[len(""def ""): code.index(""("")] + + def get_until_no_space(self,all_lines, i) -> str: + """""" + Get all lines until a line outside the function definition is found. + """""" + ret = [all_lines[i]] + for j in range(i + 1, i + 10000): + if j < len(all_lines): + if len(all_lines[j]) == 0 or all_lines[j][0] in ["" "", ""\\t"", "")""]: + ret.append(all_lines[j]) + else: + break + return ""\\n"".join(ret) + + def chunk_functions(self,function_code, chunk_size): + """""" To chunk input for gpt models because max token per model is 4090 """""" + try: + # chunk_size = 1900 + chunks = [function_code[i:i + chunk_size] for i in range(0, len(function_code), chunk_size)] + except Exception as e: + self.log.info('Error in chunking input prompt data.') + return chunks + + def get_functions(self,filepath): + """""" + Get all functions in a Python file. + """""" + try: + whole_code = open(filepath).read().replace(""\\r"", ""\\n"") + all_lines = whole_code.split(""\\n"") + for i, l in enumerate(all_lines): + if l.startswith(""def ""): + code = self.get_until_no_space(all_lines, i) + function_name = self.get_function_name(code) + yield {""code"": code, ""function_name"": function_name, ""filepath"": filepath} + except Exception as e: + self.log.info(""Error in getting function from file. Error message: \\n""+str(e)) + + def get_clone_function_details(self): + """""" To get available functions from python files """""" + try: + code_root=self.rootdir + from glob import glob + code_files = [y for x in os.walk(code_root) for y in glob(os.path.join(x[0], '*.py'))] + if code_files: + all_funcs = [] + total_locs = 0 + for code_file in code_files: + with open(code_file) as f: + total_locs += len(f.readlines()) + + funcs = list(self.get_functions(code_file)) + for func in funcs: + all_funcs.append(func) + return all_funcs,code_root,code_files,total_locs + else: + self.log.info(""no python files available in the dir:""+str(code_root)) + return {""pythondiles_error"":""No python files are found.""} + except Exception as e: + print(""Error in reading the functions from the given directory. Error message: \\n"",e) + self.log.info(""Error in reading the functions from the given directory. Error message: \\n""+str(e)) + + + + def getOpenAICredentials(self): + """""" To set openai credential using user input """""" + #Currently only support openai + try: + package_name = 'openai' + lib_name = importlib.util.find_spec(package_name) + if lib_name is None: + return ""openai_pkg_check_failed"" + else: + + embedding_model_lib ='openai' + # + if isinstance(self.openai_baseurl,str) and isinstance(self.openai_key,str) and isinstance(self.openai_api_type,str): + os.environ['OPENAI_API_TYPE'] = self.openai_api_type + os.environ['OPENAI_API_BASE'] = self.openai_baseurl + # os.environ['OPENAI_API_VERSION'] = '2023-05-15' + # os.environ['OPENAI_API_VERSION'] = ""2022-12-01"" + os.environ['OPENAI_API_VERSION'] = self.openai_api_version + os.environ['OPENAI_API_KEY'] = self.openai_key + + if (embedding_model_lib.lower()=='openai'): + try: + openai.api_type=os.getenv('OPENAI_API_TYPE') + openai.api_base = os.getenv('OPENAI_API_BASE') + openai.api_key = os.getenv('OPENAI_API_KEY') + openai.api_version = os.getenv('OPENAI_API_VERSION') + + + except Exception as e: + self.log.info(""Unable to get openai credentials,please provide proper credentials.""+str(e)) + return {""error_msg"":""openai_environment_error""} + except Exception as e: + print(""Openai credential set and get function error. Error message: \\n"",e) + + return openai.api_type,openai.api_base,openai.api_key,openai.api_version + + + + def get_embedding_local(self,model: str, text: str) -> list[float]: + """""" To get embedding data for single user given prompt text"""""" + try: + response = openai.Embedding.create( + input=text, + engine=self.generativeai_embedding_engine) + except Exception as e: + self.log.info(""openai embedding creation error.""+str(e)) + + return result['data'][0]['embedding'] + + + + def get_embeddings_pyfiles(self,all_funcs): + """""" To get embedding for python functions """""" + + try: + import tiktoken + openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() + encoding = tiktoken.encoding_for_model(""text-embedding-ada-002"") + df = pd.DataFrame(all_funcs) + df[""tokens""] = df[""code""].apply(lambda c: len(encoding.encode(c))) + embedding_cost = df[""tokens""].sum() * (0.0004/1000) + EMBEDDING_FILEPATH=self.ccdreportpath+'\\code_embeddings.csv' + self.log.info(""embedding storage location: ""+str(EMBEDDING_FILEPATH)) + vdb_status = self.get_vdb_status('chromadb') + ##Currently chromadb not integrated + vdb_status = False + if not vdb_status: + df['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, engine=self.generativeai_embedding_engine)) + df['filepath'] = df['filepath'].apply(lambda x: x.replace(self.rootdir, """")) + df.to_csv(EMBEDDING_FILEPATH, index=False) + + else: + df = self.chromadb_embedding(df) + + """""" Please uncomment below, currently assumption is each run we create embedd based on python files dir """""" + import numpy as np + df = pd.read_csv(EMBEDDING_FILEPATH) + df[""code_embedding""] = df[""code_embedding""].apply(eval).apply(np.array) + except Exception as e: + self.log.info(""Error in get_embeddings_pyfiles for embedding conversion process. Error Message: ""+str(e)) + raise Exception(""Error in get_embeddings_pyfiles for embedding conversion process."") + + return df,embedding_cost + + + def search_functions_vectordb(df,db, code_query, n=3, pprint=True, n_lines=7): + """""" Search function for user query (prompt content), used for vector database embedding query option. """""" + try: + docs = db.similarity_search_with_score(code_query )[:n] + docs = [{""similarities"":score, ""code"": d.page_content, **d.metadata} for d,score in docs] + res = pd.DataFrame(docs).drop(""_additional"", axis=1) + ##Uncomment for debug + # if pprint: + # for r in res.iterrows(): + # print(r[1].filepath+"" : ""+r[1].function_name + "" score="" + str(round(r[1].similarities, 3))) + # print(""\\n"".join(r[1].code.split(""\\n"")[:n_lines])) + # print('-'*70) + except Exception as e: + self.log.info(""Error in search_functions_vectordb to get similarity information based on user query. Error Message: ""+str(e)) + raise Exception(""Error in search_functions_csv to get similarity information based on user query."") + + return res + + def search_functions_csv(self,df, code_query, n=3, pprint=True, n_lines=7): + """""" Search function for user query (prompt content), used for csv embedding query option. """""" + try: + embedding = get_embedding(code_query, engine=self.generativeai_embedding_engine) + df['similarities'] = df.code_embedding.apply(lambda x: cosine_similarity(x, embedding)) + res = df.sort_values('similarities', ascending=False) + ## uncomment for debug purpose + # if pprint: + # for r in res.iterrows(): + # print(r[1].filepath+"" : ""+r[1].function_name + "" score="" + str(round(r[1].similarities, 3))) + # print(""\\n"".join(r[1].code.split(""\\n"")[:n_lines])) + # print('-'*70) + except Exception as e: + self.log.info(""Error in search_functions_functions_csv to get similarity information based on user query. Error Message: ""+str(e)) + raise Exception(""Error in search_functions_csv to get similarity information based on user query."") + return res + + def get_prediction(self,prompt_data): + """""" To get prediction for given user data """""" + try: + all_funcs,code_root,code_files,total_locs=self.get_clone_function_details() + if not isinstance(all_funcs,type(None)): + df,embedding_cost=self.get_embeddings_pyfiles(all_funcs) + res = self.search_functions_csv(df, prompt_data, n=3) + return res + else: + return dict({""error"":""Empty_root_directory""}) + except Exception as e: + self.log.info(""Error in get prediction for user prompt information. Error Message: ""+str(e)) + raise Exception(""Error in get prediction for user prompt information. ."") + + def get_vdb_status(self,vdb_name): + """""" To check chromadb python package installed or not"""""" + try: + vdb_name = 'chromadb' + vdb_status=False + lib_name = importlib.util.find_spec(vdb_name) + if lib_name is None: + vdb_status=False + else: + vdb_status=True + ## Processing the files and create a embedding and save it using csv. + except Exception as e: + self.log.info(""Error in checking chromadb installed or not. Error Message: ""+str(e)) + raise Exception(""Error in checking chromadb installed or not. ."") + ## Currently vector db (chromadb) not implemented, so vdb_status is set as False + vdb_status = False + return vdb_status + + + def create_chroma_db(self,documents, name): + """""" Craete chromadb instance (persistant) """""" + #get openai status + openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() + # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() + try: + from langchain.embeddings.openai import OpenAIEmbeddings + embed_function = OpenAIEmbeddings(deployment=self.generativeai_embedding_engine, chunk_size=1) + except: + from chromadb.utils import embedding_functions + embed_function = embedding_functions.OpenAIEmbeddingFunction( + api_key=openai.api_key, + api_base=openai.api_base, + api_type = openai.api_type, + model_name=self.generativeai_embedding_model + ) + + try: + # chroma_client = chromadb.Client() + persist_directory = self.embedd_storage_path + + chroma_client = chromadb.Client( + Settings( + persist_directory=persist_directory, + chroma_db_impl=""duckdb+parquet"", + ) + ) + # Start from scratch + chroma_client.reset() + chroma_client.persist() + + try: + embed_function = OpenAIEmbeddings(deployment=self.generativeai_embedding_engine, chunk_size=1) + except: + embed_function = OpenAIEmbeddings() + db = Chroma.from_documents(documents, embed_function, persist_directory=persist_directory) + db.persist() + except Exception as e: + self.log.info(""Error in chromadb based embeding creation. Error Message: ""+str(e)) + raise Exception(""Error in chromadb based embeding creation."") + return db,chroma_client + + def chromadb_embedding(self,df): + """""" Base chromadb embedding creation and storage function, it" +"calls above create_chroma_db() to create db. + """""" + try: + documents = df.apply(lambda x: Document(page_content= x[""code""], metadata= {""function_name"": x[""function_name""], ""filepath"": x[""filepath""]}), axis=1) + #setup the chromadb + db,chroma_client = self.create_chroma_db(documents,collection_name) + try: + chromadb_df=pd.DataFrame(db) + except: + db_json = db.get(include=['embeddings', 'documents', 'metadatas']) + chromadb_df = pd.read_json(db_json) + self.log.info(""chromadb_df records (top ~5 records): ""+str(chromadb_df.head(5))) + except Exception as e: + self.log.info(""chromadb embedding error. Error message: ""+str(e)) + + return chromadb_df + + def num_tokens_from_string(self, string: str) -> int: + """""" Get number of tokens of text using tiktokens lib."""""" + encoding = tiktoken.encoding_for_model(""text-embedding-ada-002"") + num_tokens = len(encoding.encode(string)) + return num_tokens + + def validate_code_clone_with_explanation(self,code1, code2, verbose=False): + """""" Validate clone detection code snippet and get explanation from openai chat model (gpt-3.5-turbo) """""" + ## Openai using 4 chars as 1 token, same method here followed. Here,we dont need to call tiktoken lib to save cost. + if (len(code1)/4 >1900): + chunk = self.chunk_functions(code1, 1900) + code1 = chunk[0] + print(""In side , len of code1\\n"",len(code1)) + + if (len(code2)/4 >1900): + chunk = self.chunk_functions(code2, 1900) + code2 = chunk[0] + print(""In side , len of code2\\n"",len(code2)) + try: + SYS_ROLE = ""You are a Senior Code Reviewer, who helps in Code review and integration using code clone detection approach."" + openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() + # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() + prompt = f""""""Given two code snippets, find if they are clones or not with suitable explaination. + + Four types of clone: + + 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. + 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. + 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. + + Use JSON object format with following keys: + + IsClone: (True, False) wheather two code snippets are clone or not. + CloneType: (Exact clone, Parameterized clone, Never-miss clone, Semantic clone) Choose appropriate clone type or ""None"". + Explanation: A short explanation for the above answer. + + ### Code Snippets: + + ## Code 1: + {code1} + + ## Code 2: + {code2} + + ### Answer(Valid JSON object): + """""" + + response = openai.ChatCompletion.create(deployment_id=self.generativeai_deploymentId, + messages=[{""role"": ""system"", ""content"": SYS_ROLE}, + {""role"": ""user"", ""content"": prompt},], + temperature = 0,max_tokens = 3900,request_timeout=90) + + + + text = response['choices'][0]['message']['content'] + if verbose: + self.log.info(""validate_code_clone_with_explanation, text: ""+str(text)) + except Exception as e: + print("" validate_code_clone_with_explanation: \\n"",e) + response = ""OpenAI Model Connection"" + if e.code == ""invalid_request"" and ""token limit"" in e.message.lower(): + # Implement your logic to reduce the length of messages or split them into smaller parts + # Modify messages or take appropriate action + self.log.info(""Given function is too large and exceeds openai chat model token limit,please review the source file function length. ""+str(e)) + + return response + + def validate_code_clone_with_explanation_davinci(self,code1, code2, verbose=False): + """""" Validate clone detection code snippet and get explanation from openai chat model (davinci) """""" + if (len(code1)/4 >1900): + chunk = self.chunk_functions(code1, 1900) + code1 = chunk[0] + if (len(code2)/4 >1900): + chunk = self.chunk_functions(code2, 1900) + code2 = chunk[0] + + try: + SYS_ROLE = ""You are a Senior Code Reviewer, who helps in Code review and integration. Detecting code clone in the repository."" + openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() + # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() + prompt = f""""""Given two code snippets, find if they are clones or not with suitable explaination. + + Four types of clone: + + 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. + 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. + 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. + + Use JSON object format with following keys: + + IsClone: (True, False) wheather two code snippets are clone or not. + CloneType: (Exact clone, Parameterized clone, Never-miss clone, Semantic clone) Choose appropriate clone type or ""None"". + Explanation: A short explanation for the above answer. + + ### Code Snippets: + + ## Code 1: + {code1} + + ## Code 2: + {code2} + + ### Answer(Valid JSON object): + """""" + # response = openai.Completion.create(engine='Text-Datvinci-03', prompt=prompt, temperature=0, max_tokens=1166) + response = openai.Completion.create(engine=self.generativeai_chat_model, prompt=prompt, temperature=0, max_tokens=3900) + text = response.choices[0][""text""] + if verbose: + self.log.info(""validate_code_clone_with_explanation, text (chatmodel response) ""+str(text)) + except Exception as e: + response = ""OpenAI Model Connection Error"" + if e.code == ""invalid_request"" and ""token limit"" in e.message.lower(): + # Implement your logic to reduce the length of messages or split them into smaller parts + # Modify messages or take appropriate action + self.log.info(""Given function is too large and exceeds openai chat model token limit,please review the source file function length. Error msg: ""+str(e)) + + return response + + + + + +## For dbscan based clone detction from python files, we use CodeCloneDetection parent class. (Using inheritance) +class CodeCloneDetectionFiles(CodeCloneDetection): + """"""For dbscan based clone detction from python files, we use CodeCloneDetection + parent class. (Using inheritance) + """""" + def __init__(self,root_dir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId): + super().__init__(root_dir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId) + def get_embedd_fns(self): + """""" To get embedd vector, using parent class methods"""""" + try: + ## Processing the files and create a embedding and save it using csv. + vdb_status = super().get_vdb_status('chromadb') + self.log.info(""<------- AION Code Clone Detection started ... ------>\\n "") + if not vdb_status: + openai_api_type,openai_api_base,openai_api_key,openai_api_version = super().getOpenAICredentials() + # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() + all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() + if (openai.api_key or openai_api_key): + if not isinstance(all_funcs,type(None)): + embedded_df,embedding_cost=super().get_embeddings_pyfiles(all_funcs) + else: + return status + except Exception as e: + # print(""Error in getting embedding vector using openai. Error message: "",e) + self.log.info(""Error in getting embedding vector using openai. Error message: ""+str(e)) + raise Exception(""Error in getting embedding vector using openai."") + return embedded_df,embedding_cost + + + + def dbscan_clone_detection(self,df): + """""" DBScan based code clone similarity detection (for functions in given dir """""" + try: + vdb_status = super().get_vdb_status('chromadb') + if not vdb_status: + X = np.array(list(df.code_embedding.values)) + else: + X = np.array(list(df.embeddings.values)) + #X = StandardScaler().fit_transform(X) + db = DBSCAN(eps=0.2, min_samples=2).fit(X) + labels = db.labels_ + # Number of clusters in labels, ignoring noise if present. + n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) + n_noise_ = list(labels).count(-1) + df[""cluster""] = labels + cluster_result = [] + for i in range(n_clusters_): + cluster_df = df.loc[df['cluster'] == i] + # with open(""{}/cluster_{}.txt"".format(self.ccdreportpath,i), ""w"") as f: + for index, row in cluster_df.iterrows(): + cluster_result.append({""cluster_id"": i,""filepath"": row.filepath,""function_name"": row.function_name,""code"": row.code }) + # f.write(f""Source File: {row.filepath}, Function Name: {row.function_name}"") + #f.write(f""\\n{row.code}\\n\\n{'-'*80}\\n\\n"") + + cluster_result_df = pd.DataFrame(cluster_result) + codeclonereport_df = os.path.join(self.ccdreportpath,'cluster_result.csv') + cluster_result_df.to_csv(codeclonereport_df, index=False) + return cluster_result_df + except Exception as e: + self.log.info(""Error in dbscan based similar code clone clustering. Error Message: ""+str(e)) + raise Exception(""Error in dbscan based similar code clone clustering."") + + def make_pairs(self,data_list:list): + try: + if len(data_list) <=1: + return [] + + return [(data_list[0], d) for d in data_list[1:]] + self.make_pairs(data_list[1:]) + except Exception as e: + self.log.info(""Error in make pairs function, error message: ""+str(e)) + raise Exception(""Error in clone code mapping."") + + def code_clone_check_with_retry(self,code1,code2, retry_interval=1): + """""" Call chat models for code clone detection with retry mechanism. """""" + try: + # res = super().validate_code_clone_with_explanation(code1,code2) + ##sj + if (self.generativeai_embedding_model.lower() =='text-embedding-ada-002' and self.generativeai_chat_model.lower() == 'text-datvinci-03'): + res = super().validate_code_clone_with_explanation_davinci(code1,code2) + return res + elif (self.generativeai_embedding_model.lower() =='text-embedding-ada-002' and self.generativeai_chat_model.lower() == 'gpt-3.5-turbo'): + res = super().validate_code_clone_with_explanation(code1,code2) + return res + + except (RateLimitError, Timeout) as e: + self.log.info(""Calling chat model issue in code clone check function, error message: ""+str(e)) + time.sleep(retry_interval) + return self.code_clone_check_with_retry(code1, code2) + + def res_formater(self,inp): + """""" Function to format gpt-3.5 or text-davinci-003 response body. """""" + try: + line = inp.replace('{','') + line = line.replace('}','') + line = line.replace('""','') + end=line.split(',') + d1={} + l2=[] + for l in end: + l=l.split(',') + for i in l: + l1=i.split("":"") + l2.append(l1) + import pandas as pd + df=pd.DataFrame(l2) + df=df.T + df.columns = df.iloc[0] + df = df[1:] + df.columns = df.columns.str.replace('[#,@,&,\\']', '') + # df.to_csv('test1.csv', index=False) + response=df.iloc[0] + fl=response.to_list() + clone_status=fl[0] + clone_type=fl[1] + result=fl[2] + except Exception as e: + self.log.info(""chat model response formatter error. Error message: ""+str(e)) + return clone_status,clone_type,result + + + def getcloneresult_modelspecific(self,code_clone_check_tasks,embedding_cost): + """""" get the clone type and associated information from chat model response data. """""" + try: + max_workers = min(len(code_clone_check_tasks), 100) + all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() + if (self.generativeai_chat_model.lower() == 'text-datvinci-03'): + self.log.info(""<--- Text-Dat" +"vinci-03 chat model based code clone detection. --->"") + code_clone_result = [] + for task in code_clone_check_tasks: + response=self.code_clone_check_with_retry(task[0][""code""], task[1][""code""]) + with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor: + llm_requests = { + executor.submit(self.code_clone_check_with_retry, task[0][""code""], task[1][""code""]): task for task in code_clone_check_tasks + } + + with tqdm(total= len(llm_requests)) as progress: + for future in concurrent.futures.as_completed(llm_requests): + task = llm_requests[future] + try: + res = future.result() + try: + my_openai_obj1 = res[""choices""][0][""text""] + clone_status,clone_type,result = self.res_formater(my_openai_obj1) + model_value=res['model'] + total_tokens_value=res['usage']['total_tokens'] + + code_clone_result.append({""task"": task, + ""result"":result, + ""IsClone"": clone_status, + ""CloneType"": clone_type, + ""model"":model_value, + ""total_tokens"":total_tokens_value}) + except Exception as e: + self.log.info(""getCloneReport, code_clone_result.append error: ""+str(e)) + except Exception as exc: + self.log.info(""getCloneReport error (text davinci chat model): ""+str(exc)) + progress.update() + ## Please uncomment below part if you need to check chat model response body. + #codeclonecheckresult_json = os.path.join(self.ccdreportpath,'code_clone_chatmodel_responsebody.json') + #with open(codeclonecheckresult_json, ""w+"") as fp: + #json.dump(code_clone_result, fp, indent=2) + + code_clone_result_json=json.dumps(code_clone_result) + clone_report=pd.read_json(code_clone_result_json) + cr_totaltokens = clone_report['total_tokens'] + total_amt = (cr_totaltokens).sum() * (0.002/1000) + clone_report[""function1""] = clone_report[""task""].apply(lambda x: x[0][""filepath""] + "" -> "" + x[0][""function_name""]) + clone_report[""function2""] = clone_report[""task""].apply(lambda x: x[1][""filepath""] + "" -> "" + x[1][""function_name""]) + # clone_report[""clone_type""] = clone_report[""result""].apply(lambda x: x[""CloneType""]) + clone_report[""clone_type""] = clone_report[""CloneType""] + + code_dir = code_root + total_files = len(code_files) + total_locs = total_locs + total_functions = len(all_funcs) + total_tokens = clone_report['total_tokens'].sum() + total_cost= embedding_cost + clone_report['total_tokens'].sum() * (0.002/1000) + total_clones = len(clone_report[clone_report.clone_type != ""None""]) + code_clone_count_by_df = clone_report[clone_report.clone_type != ""None""].groupby(""clone_type"").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql') + clone_functions = clone_report[[""function1"", ""function2"", ""clone_type""]][clone_report.clone_type != ""None""].sort_values(""function1"").to_markdown(tablefmt='psql', index=False) + code_clone_count_dict = clone_report[clone_report.clone_type != ""None""].groupby(""clone_type"").agg(Count=('clone_type', 'count')) + clone_function_dict = clone_report[[""function1"", ""function2"", ""clone_type""]][clone_report.clone_type != ""None""].sort_values(""function1"") + ##Final report on code clone detection + report_str = f""""""Code_directory: {code_dir} + Files: {total_files} + LOCs: {total_locs} + Functions: {total_functions} + + Total_code_clones_detected: {total_clones} + Tokens used: {total_tokens} + Total cost(embedding + clone check): {total_cost} + + Four_types_of_clone: + 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. + 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. + 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. + + Code_clones_count_by_clone_type: + {code_clone_count_by_df} + + Clone_functions: + {clone_functions} + """""" + codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt') + with open(codeclonereport_txt, ""w"") as f: + f.write(report_str) + + report_dict=dict({""Code_directory"":code_dir,""total_files"":total_files, + ""total_locs"":total_locs,""total_functions"":total_functions,""total_clones"":total_clones, + ""total_tokens"":total_tokens,""total_cost"":total_cost, + ""Code_clones_count_by_clone_type"":code_clone_count_dict,""clone_functions"":clone_function_dict}) + ## report for chat model is gpt 3.5 turbo + elif (self.generativeai_chat_model.lower() == 'gpt-3.5-turbo'): + try: + self.log.info(""<--- gpt-3.5-turbo chat model based code clone detection. --->"") + code_clone_result = [] + for task in code_clone_check_tasks: + response=self.code_clone_check_with_retry(task[0][""code""], task[1][""code""]) + with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor: + llm_requests = { + executor.submit(self.code_clone_check_with_retry, task[0][""code""], task[1][""code""]): task for task in code_clone_check_tasks + } + + with tqdm(total= len(llm_requests)) as progress: + for future in concurrent.futures.as_completed(llm_requests): + task = llm_requests[future] + try: + res = future.result() + my_openai_obj1 = res[""choices""][0][""message""]['content'] + clone_status,clone_type,result = self.res_formater(my_openai_obj1) + # result = json.loads(res['choices'][0]['message']['content']) + total_tokens = res[""usage""][""total_tokens""] + code_clone_result.append({""task"": task, + ""result"":result , + ""CloneType"": clone_type, + ""total_tokens"": total_tokens}) + except Exception as exc: + self.log.info(""gpt 3.5 chat model error: ""+str(exc)) + + progress.update() + except Exception as e: + print(""In gpt3.5,getcloneresult_modelspecific fn exception : \\n"",e) + import traceback + print(""traceback, In gpt3.5,getcloneresult_modelspecific fn exception \\n"",traceback.print_exc()) + ## Please uncomment below part if you need to check chat model response body. + #codeclonecheckresult_json = os.path.join(self.ccdreportpath,'code_clone_chatmodel_responsebody.json') + #with open(codeclonecheckresult_json, ""w+"") as fp: + #json.dump(code_clone_result, fp, indent=2) + try: + code_clone_result_json=json.dumps(code_clone_result) + clone_report = pd.read_json(code_clone_result_json) + codeclone_total_amt = clone_report[""total_tokens""].sum() * (0.002/1000) + clone_report[""function1""] = clone_report[""task""].apply(lambda x: x[0][""filepath""] + "" -> "" + x[0][""function_name""]) + clone_report[""function2""] = clone_report[""task""].apply(lambda x: x[1][""filepath""] + "" -> "" + x[1][""function_name""]) + # clone_report[""clone_type""] = clone_report[""result""].apply(lambda x: x[""CloneType""]) + clone_report[""clone_type""] = clone_report[""CloneType""] + code_dir = code_root + total_files = len(code_files) + total_locs = total_locs + total_functions = len(all_funcs) + total_tokens = clone_report[""total_tokens""].sum() + except Exception as e: + self.log.info(""Error in getting clone report: ""+str(e)) + total_cost= embedding_cost + clone_report[""total_tokens""].sum() * (0.002/1000) + total_clones = len(clone_report[clone_report.clone_type != ""None""]) + code_clone_count_by_df = clone_report[clone_report.clone_type != ""None""].groupby(""clone_type"").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql') + clone_functions = clone_report[[""function1"", ""function2"", ""clone_type""]][clone_report.clone_type != ""None""].sort_values(""function1"").to_markdown(tablefmt='psql', index=False) + code_clone_count_dict = clone_report[clone_report.clone_type != ""None""].groupby(""clone_type"").agg(Count=('clone_type', 'count')) + clone_function_dict = clone_report[[""function1"", ""function2"", ""clone_type""]][clone_report.clone_type != ""None""].sort_values(""function1"") + report_str = f""""""Code_directory: {code_dir} + Files: {total_files} + LOCs: {total_locs} + Functions: {total_functions} + + Total code clones detected: {total_clones} + Tokens used: {total_tokens} + Total cost(embedding + clone check): {total_cost} + + Four types of clone: + 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. + 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. + 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. + 5. None: Not a clone, discard this one. + + Code_clones_count_by_clone_type: + {code_clone_count_by_df} + + Clone_functions: + {clone_functions} + """""" + codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt') + with open(codeclonereport_txt, ""w"") as f: + f.write(report_str) + + report_dict=dict({""Code_directory"":code_dir,""total_files"":total_files, + ""total_locs"":total_locs,""total_functions"":total_functions,""total_clones"":total_clones, + ""total_tokens"":total_tokens,""total_cost"":total_cost, + ""Code_clones_count_by_clone_type"":code_clone_count_dict,""clone_functions"":clone_function_dict}) + + except Exception as e: + self.log.info(""Error in clone type and information retrival process .Error message: ""+str(e)) + + return code_clone_result,report_str,report_dict + + + def getCloneReport(self): + """""" To get the clone report from the given python directory """""" + try: + self.log.info(""To get clone report, we are calling embedding and chat model."") + import time + vdb_status = super().get_vdb_status('chromadb') + start_time = time.time() + # self.log.info(""code clone detection start time.""+str(start_time)) + if not vdb_status: + embedded_df,embedding_cost = self.get_embedd_fns() + cluster_df = self.dbscan_clone_detection(embedded_df) + cluster_df_group = cluster_df.groupby(""cluster_id"") + len_cluster_df_group = len(cluster_df_group) + code_clone_check_tasks = [] + for name, group in cluster_df_group: + res = self.make_pairs(group.to_dict(orient=""records"")) + code_clone_check_tasks += res + + #For text-embedding-ada-002 and gpt 3.5 chat model + code_clone_result,report_str,report_dict = self.getcloneresult_modelspecific(code_clone_check_tasks,embedding_cost) + end_time = time.time() + total_time_taken = end_time - start_time + self.log.info(""Total time taken for code clone detction: ""+str(total_time_taken)) + self.log.info(""<------------- Final code clone report: -------------------> \\n""+str(report_str)) + report_df = pd.DataFrame.from_dict(report_dict, orient=""index"").reset_index() + report_df.columns = ['ccd_properties', 'Values'] + report_df=report_df.T + codecloneresult_df = os.path.join(self.ccdreportpath,'code_clone_report_df.csv') + report_df.to_csv(codecloneresult_df) + return report_str,report_dict,report_df,json.dumps(report_str) + else: + #Below code indended for vector db. + all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() + df = pd.DataFrame(all_funcs) + df['filepath'] = df['filepath'].apply(lambda x: x.replace(code_root, """")) + chromadb_df=super().chromadb_embedding(df) + df = self.dbscan_clone_detection(chromadb_df) + cluster_df_group = cluster_df.groupby(""cluster_id"") + len_cluster_df_group = len(cluster_df_group) + code_clone_check_tasks = [] + for name, group in cluster_df_group: + res = self.make_pairs(group" +".to_dict(orient=""records"")) + code_clone_check_tasks += res + code_clone_result = [] + max_workers = min(len(code_clone_check_tasks), 100) + with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor: + llm_requests = { + executor.submit(self.code_clone_check_with_retry, task[0][""code""], task[1][""code""]): task for task in code_clone_check_tasks + } + + with tqdm(total= len(llm_requests)) as progress: + for future in concurrent.futures.as_completed(llm_requests): + task = llm_requests[future] + try: + res = future.result() + code_clone_result.append({""task"": task, + ""result"": json.loads(res['choices'][0]['message']['content']), + ""total_tokens"": res[""usage""][""total_tokens""]}) + + except Exception as exc: + print('%r generated an exception: %s' % (task, exc)) + + progress.update() + with open(""code_clone_check_result.json"", ""w+"") as fp: + json.dump(code_clone_result, fp, indent=2) + + code_clone_result_json=json.dumps(code_clone_result) + clone_report=pd.read_json(code_clone_result_json) + total_amt = clone_report[""total_tokens""].sum() * (0.002/1000) + clone_report[""function1""] = clone_report[""task""].apply(lambda x: x[0][""filepath""] + "" -> "" + x[0][""function_name""]) + clone_report[""function2""] = clone_report[""task""].apply(lambda x: x[1][""filepath""] + "" -> "" + x[1][""function_name""]) + clone_report[""clone_type""] = clone_report[""result""].apply(lambda x: x[""CloneType""]) + all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() + + code_dir = code_root + total_files = len(code_files) + total_locs = total_locs + total_functions = len(all_funcs) + total_tokens = clone_report[""total_tokens""].sum() + # total_cost= embedding_cost + clone_report[""total_tokens""].sum() * (0.002/1000) + total_clones = len(clone_report[clone_report.clone_type != ""None""]) + code_clone_count_by_df = clone_report[clone_report.clone_type != ""None""].groupby(""clone_type"").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql') + clone_functions = clone_report[[""function1"", ""function2"", ""clone_type""]][clone_report.clone_type != ""None""].sort_values(""function1"").to_markdown(tablefmt='psql', index=False) + code_clone_count_dict = clone_report[clone_report.clone_type != ""None""].groupby(""clone_type"").agg(Count=('clone_type', 'count')) + clone_function_dict = clone_report[[""function1"", ""function2"", ""clone_type""]][clone_report.clone_type != ""None""].sort_values(""function1"") + + ##Final report on code clone detection + report_str = f""""""Code_directory: {code_dir} + Files: {total_files} + LOCs: {total_locs} + Functions: {total_functions} + + Total code clones detected: {total_clones} + Tokens used: {total_tokens} + + Four types of clone: + 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. + 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. + 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. + + Code_clones_count_by_clone_type: + {code_clone_count_by_df} + + Clone_functions: + {clone_functions} + """""" + with open(""code_clone_report.txt"", ""w"") as f: + f.write(report_str) + + # print(report_str) + self.log.info(""<------------- Final code clone report: -------------------> \\n""+str(report_str)) + self.log.info(""<------------- clone_functions code clone report: -------------------> \\n""+str(clone_functions)) + report_dict=dict({""Code_directory"":code_dir,""total_files"":total_files, + ""total_locs"":total_locs,""total_functions"":total_functions,""total_clones"":total_clones, + ""total_tokens"":total_tokens, + ""Code_clones_count_by_clone_type"":code_clone_count_dict,""clone_functions"": clone_function_dict}) + + + report_df= pd.DataFrame([report_dict.keys(), report_dict.values()]).T + report_df.columns = [""Code_directory"", ""total_files"",""total_locs"",""total_functions"",""total_clones"",""total_tokens"",""Code_clones_count_by_clone_type"",""clone_functions""] + report_df.to_csv(""code_clone_report_df.csv"") + return report_str,report_dict,report_df,json.dumps(report_str) + except Exception as e: + self.log.info(""Error in clone detection function call. Error Message: \\n""+str(e)) + raise Exception(""Error in clone detection function."") + +#For testing and code instance privacy +if __name__=='__main__': + ## For testing purpose.Uncomment n use. + root_directory = r""C:\\AION_Works\\Anomaly_Detection\\anomalydetectionpackage\\code_clone_testing_pyfiles\\code_clone_testing_pyfiles_large"" + embedd_storage_path = r""C:\\AION_Works\\ccddir"" + generativeai_credentials={'openai_baseurl':"""", + 'openai_key':"""", + 'openai_api_type':"""", + 'openai_api_version':"""", + 'generativeai_embedding_engine':"""", + 'generativeai_embedding_model':"""", + 'generativeai_chat_model':"""", + 'generativeai_deploymentId':""""} + openai_baseurl = generativeai_credentials['openai_baseurl'] + openai_key = generativeai_credentials['openai_key'] + openai_api_type = generativeai_credentials['openai_api_type'] + openai_api_version = generativeai_credentials['openai_api_version'] + generativeai_embedding_engine = generativeai_credentials['generativeai_embedding_engine'] + generativeai_embedding_model = generativeai_credentials['generativeai_embedding_model'] + generativeai_chat_model = generativeai_credentials['generativeai_chat_model'] + generativeai_deploymentId = generativeai_credentials['generativeai_deploymentId'] + codeclonedetection_obj = CodeCloneDetectionFiles(root_directory,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId) + report_str,report_dict,report_json = codeclonedetection_obj.getCloneReport() + print(""End of code clone detection....\\n"") + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sqlite3 +from pathlib import Path +import json +import os +import rsa +import boto3 #usnish +import pandas as pd +import time +import sqlite3 + +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + + def table_exists(self, name): + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + return len(listOfTables) > 0 + + def read_data(self, table_name): + query = f""SELECT * FROM {table_name}"" + row = self.cursor.execute(query).fetchall() + return list(row) + #return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + def delete_record(self,table_name,col_name, col_value): + try: + query = f""DELETE FROM {table_name} WHERE {col_name}='{col_value}'"" + self.conn.execute(query) + self.conn.commit() + return 'success' + except Exception as e : + print(str(e)) + print(""Deletion Failed"") + return 'error' + + def get_data(self,table_name,col_name,col_value): + query = f""SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"" + row = self.cursor.execute(query).fetchone() + if(row == None): + return [] + return list(row) + + def write_data(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def close(self): + self.conn.close() + +def add_new_GCSBucket(request): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + + print(request.POST[""aionreferencename""]) + print(request.POST[""serviceaccountkey""]) + print(request.POST[""bucketname""]) + if request.POST[""aionreferencename""] =='' or request.POST[""serviceaccountkey""] == '' or request.POST[""bucketname""] == '' : + + return 'error' + newdata = {} + newdata['Name'] = [request.POST[""aionreferencename""]] + newdata['GCSServiceAccountKey'] = [request.POST[""serviceaccountkey""]] + newdata['GCSbucketname'] = [request.POST[""bucketname""]] + name = request.POST[""aionreferencename""] + if sqlite_obj.table_exists(""gcsbucket""): + if(len(sqlite_obj.get_data(""gcsbucket"",'Name',name))>0): + return 'error1' + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'gcsbucket') + except: + return 'error' + +def get_gcs_bucket(): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + temp_data = sqlite_obj.read_data('gcsbucket') + data = [] + for x in temp_data: + data_dict = {} + data_dict['Name'] = x[0] + data_dict['GCSServiceAccountKey'] = x[1] + data_dict['GCSbucketname'] = x[2] + data.append(data_dict) + except Exception as e: + print(e) + data = [] + return data + +def read_gcs_bucket(name,filename,DATA_FILE_PATH): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + data = sqlite_obj.get_data(""gcsbucket"",'Name',name) + except: + data = [] + found = False + if len(data)!=0: + GCSServiceAccountKey = data[1] + GCSbucketname = data[2] + found = True + #print(found) + #print(name) + try: + if found: + import io + from google.cloud import storage + + #print(GCSServiceAccountKey) + #print(GCSbucketname) + try: + storage_client = storage.Client.from_service_account_json(GCSServiceAccountKey) + bucket = storage_client.get_bucket(GCSbucketname) + blob = bucket.blob(filename) + data = blob.download_as_string() + df = pd.read_csv(io.BytesIO(data), encoding = 'utf-8', sep = ',',encoding_errors= 'replace') + except Exception as e: + return ""Error"",str(e), pd.DataFrame() + return 'Success',"""",df + except Exception as e: + print(e) + return 'Error',""Please check bucket configuration"",pd.DataFrame() + +def remove_gcs_bucket(name): + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + return sqlite_obj.delete_record('gcsbucket','Name',name) + + ''' +* +*" +"============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confide" +"deploy_dir, chunk_size): + self.files_dir = files_dir + self.deploy_dir = deploy_dir + self.chunk_size = chunk_size + try: + self.ccdreportpath = os.path.join(self.deploy_dir, ""codeCloneReport"") + os.makedirs(self.ccdreportpath, exist_ok = True) + except OSError as error: + print(""Directory 'codeCloneReport' cann't be created.Error msg: "",error) + + try: + current_datetime = datetime.now().strftime(""%Y-%m-%d_%H-%M-%S"") + str_current_datetime = str(current_datetime) + log_file_name = 'codeclonelog_sklearn'+f""_{str_current_datetime}""+"".log"" + logpath = os.path.join(self.ccdreportpath,log_file_name) + logging.basicConfig(level=logging.INFO,filename=logpath,filemode='w',format='%(message)s') + self.log = logging.getLogger() + except Exception as e: + print(""code clone log object creation error."",e) + pass + + def get_function_names(self,filename): + """""" Get the function names from python files """""" + try: + with open(filename, 'r') as file: + content = file.read() + tree = ast.parse(content) + function_names = [] + + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef): + function_names.append(node.name) + except Exception as e: + self.log.info(""function name read error: ""+str(e)) + + return function_names + + def get_function_code(self,filename, function_name): + """""" To get the function codes """""" + try: + with open(filename, 'r') as file: + content = file.read() + tree = ast.parse(content) + function_code = """" + + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef) and node.name == function_name: + function_code = ast.unparse(node) + except Exception as e: + self.log.info(""function name read error: ""+str(e)) + + return function_code + + def get_python_files(self,root_dir): + """""" Walk thru the directory user given, get all py files. """""" + try: + + code_files = [y for x in os.walk(root_dir) for y in glob(os.path.join(x[0], '*.py'))] + except Exception as e: + self.log.info(""Python file read error: ""+str(e)) + return code_files + + def chunk_functions(self,function_code, chunk_size): + """""" Check the function size based on chunk size. """""" + try: + if (len(function_code) > 20): + chunks = [function_code[i:i + chunk_size] for i in range(0, len(function_code), chunk_size)] + else: + chunks = list((function_code,)) + except Exception as e: + self.log.info(""function chunk based on chunk_size error: ""+str(e)) + total_tokens = round(len(function_code)/4) + return chunks,total_tokens + + def get_clone(self): + """""" Main code clone detection function using sklearn tfidf_vectorizer and cosine_similarity. + return values:report_dict which contains total_clones, """""" + try: + + start_time = time.time() + chunk_size = int(self.chunk_size) + ccdreportpath = os.path.join(self.deploy_dir, ""codeCloneReport"") + python_files = self.get_python_files(self.files_dir) + total_files = len(python_files) + # print('python_files: \\n',python_files) + function_codes = [] + function_n = [] + file_name=[] + # files_info=[] + total_tokens_used = [] + for file in python_files: + function_names = self.get_function_names(file) + for i,function_name in enumerate(function_names): + file_name.append(file) + function_n.append(function_name) + function_code = self.get_function_code(file, function_name) + chunks,total_tokens = self.chunk_functions(function_code, chunk_size) + total_tokens_used.append(total_tokens) + function_codes.extend(chunks) + total_functions = len(function_n) + files_info=list(zip(file_name, function_n,function_codes)) + tfidf_vectorizer = TfidfVectorizer() + ## we can use other vectorizer models also. + # tfidf_vectorizer = HashingVectorizer() + tfidf_matrix = tfidf_vectorizer.fit_transform(function_codes) + similarity_matrix = cosine_similarity(tfidf_matrix) + #Uncomment if you want to send two different code clonne blocks at a time for similarity comparison + # similarity_matrix = cosine_similarity(tfidf_matrix, tfidf_matrix) + clone_d = dict() + total_clones = 0 + final_report=list() + #getting funtion and next function for comparison + for i in range(len(similarity_matrix)): + for j in range(i + 1, len(similarity_matrix)): + if(similarity_matrix[i, j] >= 0.90 and similarity_matrix[i, j] <= 0.95): + clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone1_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone1_path':files_info[j][0]},'cloneType':'parametricClone'}}) + report_json = json.dumps(clone_d, indent = 4) + total_clones=total_clones+1 + elif(similarity_matrix[i, j] > 0.95): + clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone_path':files_info[j][0] + },'cloneType':'exactClone'}}) + report_json = json.dumps(clone_d, indent = 4) + final_report.append(clone_d) + total_clones=total_clones+1 + elif(similarity_matrix[i, j] > 0.80 and similarity_matrix[i, j] < 0.90): + clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone_path':files_info[j][0] + },'cloneType':'NearMissClones'}}) + report_json = json.dumps(clone_d, indent = 4) + final_report.append(clone_d) + total_clones=total_clones+1 + else: + ##add other conditionas in future + pass + ## To get clone type + clone_type = [list(item.values())[2] for item in list(clone_d.values())] + report_str = json.dumps(final_report) + json_l=json.loads(report_str) + json_keys = list(json_l[0].keys()) + json_values = list(json_l[0].values()) + end_time = time.time() + total_time_taken = end_time - start_time + + # self.log.info(""ccd_report: \\n""+str(ccd_report)) + f_df=pd.DataFrame(list(zip(json_keys, json_values,clone_type)), + columns =['Clone', 'CloneDetails','CloneType']) + codeclonereport_file = os.path.join(self.ccdreportpath,'clone_detection_report_sklearn.csv') + f_df.to_csv(codeclonereport_file, index=False) + ccd_report = f_df.to_markdown(tablefmt='psql') + self.log.info(""total_clones: \\n""+str(total_clones)) + exact_clone_count = f_df['CloneType'].str.count(""exactClone"").sum() + parametric_clone_count = f_df['CloneType'].str.count(""parametricClone"").sum() + nearmiss_clone_count = f_df['CloneType'].str.count(""NearMissClones"").sum() + total_tokens = sum(total_tokens_used) + # nearmiss_clone_count =0 + self.log.info(""exact_clone_count: \\n""+str(exact_clone_count)) + self.log.info(""parametric_clone_count: \\n""+str(parametric_clone_count)) + self.log.info(""nearmiss_clone_count: \\n""+str(nearmiss_clone_count)) + self.log.info(""Total tokens used: \\n""+str(total_tokens)) + self.log.info(""Total time taken to excute code clone detction: \\t""+str(total_time_taken)) + clone_info=""1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces,\\ + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments and less similarity threshold (0.90-0.95), result in this clone,\\ + 3. Near-miss clone: Near-miss clone are clones detected with less similarity threshold."" + clone_count = {""Exact Clone"":exact_clone_count,""Parametric Clone"":parametric_clone_count,""Nearmiss Clone"":nearmiss_clone_count} + report_str = f""""""Code_directory: {self.files_dir} + Files: {total_files} + Functions: {total_functions} + + Total_code_clones_detected: {total_clones} + Tokens used: {total_tokens} + + Three_types_of_clone: + 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. + 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments and less similarity threshold (0.90-0.95), result in this clone. + 3. Near-miss clone: Near-miss clone are clones detected with less similarity threshold. + + Code_clones_count_by_clone_type: + {clone_count} + + Clone_functions: + {ccd_report} + + total_time_taken: {total_time_taken} + """""" + codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt') + with open(codeclonereport_txt, ""w"") as f: + f.write(report_str) + report_dict = {""clone_info"":clone_info,""total_clones"":total_clones,'total_files':total_files,""exact_clone_count"":exact_clone_count,'total_functions':total_functions,""total_tokens"":total_tokens, ""parametric_clone_count"":parametric_clone_count,""nearmiss_clone_count"":nearmiss_clone_count,""result_df"":f_df } + self.log.info(""ccd_report: \\n""+str(ccd_report)) + # print(""report_dict:\\n\\n"",report_dict) + # end_time = time.time() + # total_time = (end_time - start_time) + + return report_dict + except Exception as e: + self.log.info(""Clone detection function error. error msg: ""+str(e)) + # import traceback + # print(""traceback error: \\n"",traceback.print_exc()) + +if __name__ == ""__main__"": + print(""code clone detection started...."") + ##Use this for standalone fn debuging. ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +import time +import os +import subprocess +import base64 +import sys +import re +from appbe.dataIngestion import getcommonfields +from appbe.dataIngestion import getusercasestatus +def startSummarization(request,DEFAULT_FILE_PATH,CONFIG_PATH,DATA_FILE_PATH): + try: + if request.FILES: + Datapath = request.FILES['summarypath'] + ext = str(Datapath).split('.')[-1] + filetimestamp = str(int(time.time())) + if ext.lower() in ['txt','pdf','doc','docs']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) + else: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + + configFile = os.path.join(DEFAULT_FILE_PATH,'aion_textSummerization.json') + filetimestamp = str(int(time.time())) + config_json_filename = os.path.join(CONFIG_PATH, 'AION_' + filetimestamp + '.json') + f = open(configFile) + data = json.load(f) + f.close() + data['basic']['dataLocation'] = dataFile + type = request.POST.get('type') + model = request.POST.get('model') + slength = request.POST.get('length') + types = data['basic']['analysisAproach']['textSummarization'] + for x in list(types.keys()): + data['basic']['analysisAproach']['textSummarization'][x] = 'False' + data['basic']['analysisAproach']['textSummarization'][type] = 'True' + format = request.POST.get('format') + algorithm = data['basic']['algorithms']['textSummarization'] + for x in list(algorithm.keys()): + data['basic']['algorithms']['textSummarization'][x] = 'False' + data['basic']['algorithms']['textSummarization'][model]='True' + length = data['advance']['textSummarization']['summaryLength'] + for x in list(types.keys()): + data['advance']['textSummarization']['summaryLength'][x] = 'False' + data['advance']['textSummarization']['summaryLength'][slength] = 'True' + with open(config_json_filename, ""w"") as outfile: + json.dump(data, outfile) + outfile.close() + from bin.aion_text_summarizer import" +"aion_textsummary + outputStr = aion_textsummary(config_json_filename) + #scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','bin','aion_text_summarizer.py')) + #outputStr = subprocess.check_output([sys.executable, scriptPath, config_json_filename]) + #outputStr = outputStr.decode('utf-8') + #outputStr = re.search(r'Summary:(.*)', str(outputStr), re.IGNORECASE).group(1) + predict_dict = json.loads(str(outputStr)) + summary = predict_dict['summary'] + except Exception as e: + print(e) + summary = str(e) + context = getcommonfields() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + context.update({'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}) + context.update({'summary':summary}) + return context ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import json +import os,sys +from appbe import help_Text as ht + +def save(request): + from appbe.dataPath import DEFAULT_FILE_PATH + + if request.method == 'POST': + submittype = request.POST.get('AdvanceSubmit') + if submittype != 'AdvanceDefault': + configFile = request.session['config_json'] + f = open(configFile, ""r+"") + configSettingsData = f.read() + configSettings = json.loads(configSettingsData) + try: + if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'false': + numericselectedmethod = request.POST.get('numericfillmethod') + for x in list(configSettings['advance']['profiler']['numericalFillMethod'].keys()): + configSettings['advance']['profiler']['numericalFillMethod'][x] = 'False' + configSettings['advance']['profiler']['numericalFillMethod'][numericselectedmethod] = 'True' + + categoricalselectedmethod = request.POST.get('categorialfillmethod') + for x in list(configSettings['advance']['profiler']['categoricalFillMethod'].keys()): + configSettings['advance']['profiler']['categoricalFillMethod'][x] = 'False' + configSettings['advance']['profiler']['categoricalFillMethod'][categoricalselectedmethod] = 'True' + + categoryEncodingMethod = request.POST.get('categoryencoding') + for x in list(configSettings['advance']['profiler']['categoryEncoding'].keys()): + configSettings['advance']['profiler']['categoryEncoding'][x] = 'False' + configSettings['advance']['profiler']['categoryEncoding'][categoryEncodingMethod] = 'True' + + outlierDetection = request.POST.get('outlierDetection') + + for x in list(configSettings['advance']['profiler']['outlierDetection'].keys()): + configSettings['advance']['profiler']['outlierDetection'][x] = 'False' + if outlierDetection != 'Disable': + configSettings['advance']['profiler']['outlierDetection'][outlierDetection] = 'True' + + #configSettings['advance']['profiler']['outlierDetectionStatus'] = request.POST.get('AnamolyDetectionStatus') + #configSettings['advance']['profiler']['outlierDetectionMethod'] = request.POST.get('AnaTreatmentMethod') + configSettings['advance']['profiler']['misValueRatio'] = request.POST.get('MisValueRatio') + #configSettings['advance']['profiler']['categoricalToNumeric'] = request.POST.get('CategoricalToNumeric') + configSettings['advance']['profiler']['numericFeatureRatio'] = request.POST.get('NumFeatureRatio') + configSettings['advance']['profiler']['categoryMaxLabel'] = request.POST.get('CatMaxLabels') + configSettings['advance']['selector']['categoryMaxLabel'] = request.POST.get('CatMaxLabels') + normalizationtypes = configSettings['advance']['profiler']['normalization'] + for k in normalizationtypes.keys(): + configSettings['advance']['profiler']['normalization'][k] = 'False' + if request.POST.get('NormalizationMethod').lower() != 'none': + configSettings['advance']['profiler']['normalization'][request.POST.get('NormalizationMethod')] = 'True' + #configSettings['advance']['profiler']['normalizationMethod'] = request.POST.get('NormalizationMethod') + configSettings['advance']['profiler']['removeDuplicate'] = request.POST.get('removeDuplicate') + # ---------------------------------------------- Debiasing Changes ---------------------------------------------- + configSettings['advance']['profiler']['deBiasing']['FeatureName'] = request.POST.get('InputFeature') + configSettings['advance']['profiler']['deBiasing']['ClassName'] = request.POST.get('InputClass') + configSettings['advance']['profiler']['deBiasing']['Algorithm'] = request.POST.get('InputAlgorithm') + configSettings['advance']['profiler']['deBiasing']['TargetFeature'] = configSettings['basic']['targetFeature'] + # ---------------------------------------------- ---------------------------------------------- + problemtypes = configSettings['basic']['analysisType'] + problem_type = """" + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_type = k + break + if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'false' and configSettings['basic']['onlineLearning'].lower() == 'false' and configSettings['basic']['distributedLearning'].lower() == 'false': + configSettings['advance']['profiler']['textCleaning']['removeNoise'] = request.POST.get('noiseStatus') + + # -------------------------------- 12301:Remove Noise Config related Changes S T A R T -------------------------------- + if request.POST.get('noiseStatus') == 'True': + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['decodeHTML'] = request.POST.get('DecodeHTML') + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHyperLinks'] = request.POST.get('removeHyperlinks') + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeMentions'] = request.POST.get('RemoveMentions') + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHashtags'] = request.POST.get('removeHashtags') + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeEmoji'] = request.POST.get('removeEmoji') + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['unicodeToAscii'] = request.POST.get('unicodeToAscii') + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeNonAscii'] = request.POST.get('removeNonAscii') + else: + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['decodeHTML'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHyperLinks'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeMentions'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHashtags'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeEmoji'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['unicodeToAscii'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeNonAscii'] = ""False"" + # ---------------------------------------------------------------- E N D ---------------------------------------------------------------- + + configSettings['advance']['profiler']['textCleaning']['expandContractions'] = request.POST.get( + 'expandContractions') + configSettings['advance']['profiler']['textCleaning']['normalize'] = request.POST.get('normalize') + if (request.POST.get('normalizeMethod') == 'Lemmatization'): + configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['lemmatization'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['stemming'] = ""False"" + else: + configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['stemming'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['lemmatization'] = ""False"" + + configSettings['advance']['profiler']['textCleaning']['replaceAcronym'] = request.POST.get('replaceAcronym') + if request.POST.get('acronymDict') != '' and request.POST.get('acronymDict') != 'None': + configSettings['advance']['profiler']['textCleaning']['acronymConfig']['acronymDict'] = eval(request.POST.get( + 'acronymDict')) + configSettings['advance']['profiler']['textCleaning']['correctSpelling'] = request.POST.get( + 'correctSpelling') + configSettings['advance']['profiler']['textCleaning']['removeStopwords'] = request.POST.get( + 'removeStopwords') + if (request.POST.get('ExtendOrReplace') == 'NA'): + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = ""False"" + elif (request.POST.get('ExtendOrReplace') == 'Extend'): + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = ""False"" + else: + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = ""True"" + + configSettings['advance']['profiler']['textCleaning']['stopWordsConfig'][ + 'stopwordsList'] = request.POST.get('stopwordsList') + configSettings['advance']['profiler']['textCleaning']['removePunctuation'] = request.POST.get( + 'removePunctuation') + configSettings['advance']['profiler']['textCleaning']['removePunctuationConfig'][ + 'removePuncWithinTokens'] = request.POST.get('removePuncWithinTokens') + configSettings['advance']['profiler']['textCleaning']['removeNumericTokens'] = request.POST.get( + 'removeNumericTokens') + configSettings['advance']['profiler']['textCleaning']['removeNumericConfig'][ + 'removeNumeric_IncludeSpecialCharacters'] = request.POST.get('removeNumeric_IncludeSpecialCharacters') + + if (request.POST.get('tokenizationLib') == 'nltk'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'textblob'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'gensim'] = ""False"" + elif (request.POST.get('tokenizationLib') == 'textblob'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'textblob'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'gensim'] = ""False"" + elif (request.POST.get('tokenizationLib') == 'spacy'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'textblob'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'gensim'] = ""False"" + elif (request.POST.get('tokenizationLib') == 'keras'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'textblob'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'gensim'] = ""False"" + + else: + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ + 'textblob'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['gensim'] = ""True"" + + if (request.POST.get('lemmatizationLib') == 'nltk'): + configSettings['advance" +"']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ + 'textblob'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ + 'spacy'] = ""False"" + + elif (request.POST.get('lemmatizationLib') == 'textblob'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ + 'textblob'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ + 'spacy'] = ""False"" + + else: + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ + 'textblob'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['spacy'] = ""True"" + + if (request.POST.get('stopwordsRemovalLib') == 'nltk'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'nltk'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'gensim'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'spacy'] = ""False"" + + elif (request.POST.get('stopwordsRemovalLib') == 'gensim'): + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'gensim'] = ""True"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'spacy'] = ""False"" + + else: + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'nltk'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'gensim'] = ""False"" + configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ + 'spacy'] = ""True"" + configSettings['advance']['profiler']['textFeatureExtraction']['n_grams'] = request.POST.get('n_grams') + configSettings['advance']['profiler']['textFeatureExtraction']['n_grams_config'][ + 'min_n'] = int(request.POST.get('range_min_n')) + configSettings['advance']['profiler']['textFeatureExtraction']['n_grams_config'][ + 'max_n'] = int(request.POST.get('range_max_n')) + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags'] = request.POST.get('pos_tags') + + if (request.POST.get('pos_tags_lib') == 'nltk'): + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = ""True"" + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = ""False"" + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = ""False"" + + elif (request.POST.get('pos_tags_lib') == 'textblob'): + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = ""True"" + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = ""False"" + + else: + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = ""False"" + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = ""False"" + configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = ""True"" + textconvertionmethods = configSettings['advance']['profiler']['textConversionMethod'] + for k in textconvertionmethods.keys(): + configSettings['advance']['profiler']['textConversionMethod'][k] = 'False' + if problem_type.lower() not in ['similarityidentification','contextualsearch']: + configSettings['advance']['profiler']['textConversionMethod'][request.POST.get('textConvertionMethod')] = 'True' + if 'embeddingSize' in configSettings['advance']['profiler']: + glove = configSettings['advance']['profiler']['embeddingSize']['Glove'] + for k in glove.keys(): + configSettings['advance']['profiler']['embeddingSize']['Glove'][k] = 'False' + configSettings['advance']['profiler']['embeddingSize']['Glove'][request.POST.get('txtglovedimensions')] = 'True' + fastText = configSettings['advance']['profiler']['embeddingSize']['FastText'] + for k in fastText.keys(): + configSettings['advance']['profiler']['embeddingSize']['FastText'][k] = 'False' + configSettings['advance']['profiler']['embeddingSize']['FastText'][request.POST.get('txtFastTextdimensions')] = 'True' + if 'LatentSemanticAnalysis' in configSettings['advance']['profiler']['embeddingSize']: + LatentSemanticAnalysis = configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] + for k in LatentSemanticAnalysis.keys(): + configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'][k] = 'False' + configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'][request.POST.get('txttfidfdimensions')] = 'True' + if 'TF_IDF' in configSettings['advance']['profiler']['embeddingSize']: + configSettings['advance']['profiler']['embeddingSize']['TF_IDF']['maxFeatures'] = request.POST.get('tfidfmaxfeatures') + if 'CountVectors' in configSettings['advance']['profiler']['embeddingSize']: + configSettings['advance']['profiler']['embeddingSize']['CountVectors']['maxFeatures'] = request.POST.get('cvmaxfeatures') + if problem_type.lower() == 'imageclassification': + configSettings['advance']['image_config']['img_width'] = int(request.POST.get('img_width')) + configSettings['advance']['image_config']['img_height'] = int(request.POST.get('img_height')) + configSettings['advance']['image_config']['img_channel'] = int(request.POST.get('img_channel')) + configSettings['advance']['image_config']['lr'] = float(request.POST.get('lr')) + configSettings['advance']['image_config']['epochs'] = int(request.POST.get('epochs')) + configSettings['advance']['image_config']['test_split_ratio'] = float(request.POST.get('test_split_ratio')) + if problem_type.lower() == ""llmfinetuning"": + configSettings = llmadvancesettings(configSettings,request) + + if problem_type.lower() == 'objectdetection' or problem_type.lower() == 'imageclassification': + configSettings['advance']['ImageAugmentation']['Enable'] = request.POST.get('advance_ImageAugmentation_Enable') + configSettings['advance']['ImageAugmentation']['KeepAugmentedImages'] = request.POST.get('advance_ImageAugmentation_keepAugmentedImages') + configSettings['advance']['ImageAugmentation']['Noise']['Blur'] = request.POST.get('advance_ImageAugmentation_Noise_Blur') + configSettings['advance']['ImageAugmentation']['Noise']['Brightness'] = request.POST.get('advance_ImageAugmentation_Noise_Brightness') + configSettings['advance']['ImageAugmentation']['Noise']['Contrast'] = request.POST.get('advance_ImageAugmentation_Noise_Contrast') + configSettings['advance']['ImageAugmentation']['Transformation']['Flip'] = request.POST.get('advance_ImageAugmentation_Transformation_Flip') + configSettings['advance']['ImageAugmentation']['Transformation']['Rotate'] = request.POST.get('advance_ImageAugmentation_Transformation_Rotate') + configSettings['advance']['ImageAugmentation']['Transformation']['Shift'] = request.POST.get('advance_ImageAugmentation_Transformation_Shift') + configSettings['advance']['ImageAugmentation']['Transformation']['Crop'] = request.POST.get('advance_ImageAugmentation_Transformation_Crop') + configSettings['advance']['ImageAugmentation']['configuration']['Blur']['noOfImages'] = request.POST.get('noofblurimages') + configSettings['advance']['ImageAugmentation']['configuration']['Blur']['limit'] = request.POST.get('limitblurimage') + configSettings['advance']['ImageAugmentation']['configuration']['Brightness']['noOfImages'] = request.POST.get('noofbrightnessimages') + configSettings['advance']['ImageAugmentation']['configuration']['Brightness']['limit'] = request.POST.get('limitbrightnessimage') + configSettings['advance']['ImageAugmentation']['configuration']['Contrast']['noOfImages'] = request.POST.get('noofcontrastimages') + configSettings['advance']['ImageAugmentation']['configuration']['Contrast']['limit'] = request.POST.get('limitcontrastimage') + configSettings['advance']['ImageAugmentation']['configuration']['Flip']['noOfImages'] = request.POST.get('noofflipimages') + configSettings['advance']['ImageAugmentation']['configuration']['Rotate']['noOfImages'] = request.POST.get('noofrotateimages') + configSettings['advance']['ImageAugmentation']['configuration']['Shift']['noOfImages'] = request.POST.get('noofshiftimages') + configSettings['advance']['ImageAugmentation']['configuration']['Crop']['noOfImages'] = request.POST.get('noofcropimages') + + configSettings['advance']['selector']['selectionMethod']['featureSelection'] = 'False' + configSettings['advance']['selector']['selectionMethod']['featureEngineering'] = 'False' + configSettings['advance']['selector']['featureSelection']['allFeatures'] = 'False' + configSettings['advance']['selector']['featureSelection']['statisticalBased'] = 'False' + configSettings['advance']['selector']['featureSelection']['modelBased'] = 'False' + if(request.POST.get('selectionMethod') == 'FeatureSelection'): + configSettings['advance']['selector']['selectionMethod']['featureSelection'] = 'True' + else: + configSettings['advance']['selector']['selectionMethod']['featureEngineering'] = 'True' + if request.POST.get('allFeatures'): + configSettings['advance']['selector']['featureSelection']['allFeatures'] = request.POST.get('allFeatures') + if request.POST.get('statisticalBased'): + configSettings['advance']['selector']['featureSelection']['statisticalBased'] = request.POST.get('statisticalBased') + if request.POST.get('modelBased'): + configSettings['advance']['selector']['featureSelection']['modelBased'] = request.POST.get('modelBased') + + dimentionalityreductionmethod = request.POST.get('dimentionalityreductionmethod') + for x in list(configSettings['advance']['selector']['featureEngineering'].keys()): + if x != 'numberofComponents': + configSettings['advance']['selector']['featureEngineering'][x] = 'False' + configSettings['advance']['selector']['featureEngineering'][dimentionalityreductionmethod] = 'True' + + + configSettings['advance']['selector']['featureEngineering']['numberofComponents'] = request.POST.get('numberofComponents') + #configSettings['advance']['selector']['categoricalFeatureRatio'] = request.POST.get('CatFeatureRatio') + configSettings['advance']['selector']['statisticalConfig']['correlationThresholdFeatures'] = request.POST.get('correlationThresholdFeatures') + configSettings['advance']['selector']['statisticalConfig']['correlationThresholdTarget'] = request.POST.get('correlationThresholdTarget') + configSettings['advance']['selector']['statisticalConfig']['pValueThresholdFeatures'] = request.POST.get('pValueThresholdFeatures') + configSettings['advance']['selector']['statisticalConfig']['pValueThresholdTarget'] = request.POST.get('pValueThresholdTarget') + configSettings['advance']['selector']['statisticalConfig']['varianceThreshold'] = request.POST.get('VarianceThreshold') + + + + + if problem_type.lower() == 'recommendersystem': + configSettings['advance']['recommenderparam']['svd_params']= eval(request.POST.get('svd_params')) + configSettings['advance']['associationrule']['modelParams']['apriori'] = eval(request.POST.get('apriori')) + configSettings['advance']['textSimilarityConfig'] = eval(request.POST.get('textsimilarity')) + if configSettings['basic']['distributedLearning'].lower() == 'true': + configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('classDistributedXGBoost')) + configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('classDistributedLightGBM')) + configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('DistributedXGBoostreg')) + configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('DistributedLightGBMreg')) + if configSettings['basic']['onlineLearning'].lower() != 'true' and configSettings['basic']['distributedLearning'].lower() != 'true': + if (problem_type.lower() == 'classification') or (problem_type.lower() == 'regression') or (problem_type.lower() == 'clustering') or (problem_type.lower() == 'topicmodelling'): + + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Logistic Regression'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] = eval(request.POST.get('classification_LogisticRegression')) + if problem_type.lower() ==" +"'classification' and configSettings['basic']['algorithms']['classification']['Naive Bayes'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] = eval(request.POST.get('classification_GaussianNB')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Support Vector Machine'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Support Vector Machine'] = eval(request.POST.get('classification_SVC')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['K Nearest Neighbors'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] = eval(request.POST.get('classification_KNeighborsClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Decision Tree'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] = eval(request.POST.get('classification_DecisionTreeClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Random Forest'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] = eval(request.POST.get('classification_RandomForestClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Gradient Boosting'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] = eval(request.POST.get('classification_GradientBoostingClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Extreme Gradient Boosting (XGBoost)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('classification_ExtremeGradientBoostingClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Light Gradient Boosting (LightGBM)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('classification_LightGradientBoostingClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Categorical Boosting (CatBoost)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Categorical Boosting (CatBoost)'] = eval(request.POST.get('classification_CategoricalBoostingClassifier')) + + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Linear Regression'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] = eval(request.POST.get('regression_LinearRegression')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Lasso'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Lasso'] = eval(request.POST.get('regression_Lasso')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Ridge'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Ridge'] = eval(request.POST.get('regression_Ridge')) + + if problem_type.lower() == 'topicmodelling' and configSettings['basic']['algorithms']['topicModelling']['LDA'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['topicModellingParams']['LDA']= eval(request.POST.get('topicmodeling_lda')) + + if problem_type.lower() == 'clustering' and configSettings['basic']['algorithms']['clustering']['KMeans'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['clusteringModelParams']['KMeans']= eval(request.POST.get('cluster_kmeans')) + if problem_type.lower() == 'clustering' and configSettings['basic']['algorithms']['clustering']['DBSCAN'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['clusteringModelParams']['DBSCAN']= eval(request.POST.get('cluster_DBSCAN')) + + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Decision Tree'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] = eval(request.POST.get('regression_DecisionTreeRegressor')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Random Forest'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] = eval(request.POST.get('regression_RandomForestRegressor')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Extreme Gradient Boosting (XGBoost)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('regression_XGBoostRegressor')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Light Gradient Boosting (LightGBM)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('regression_LightGBMRegressor')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Categorical Boosting (CatBoost)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Categorical Boosting (CatBoost)'] = eval(request.POST.get('regression_CatBoostRegressor')) + configSettings['advance']['mllearner_config']['modelparamsfile'] = request.POST.get('ModelParamFile') + configSettings['advance']['mllearner_config']['optimizationMethod'] = request.POST.get('OptimizationMethod') + configSettings['advance']['mllearner_config']['optimizationHyperParameter'][ + 'iterations'] = request.POST.get('iterations') + configSettings['advance']['mllearner_config']['optimizationHyperParameter'][ + 'trainTestCVSplit'] = request.POST.get('trainTestCVSplit') + configSettings['advance']['mllearner_config']['thresholdTunning'] = request.POST.get('thresholdTunning') + configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] = request.POST.get('EnsembleStacking') + configSettings['advance']['mllearner_config']['Voting (Ensemble)'] = request.POST.get('EnsembleVoting') + + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['enable'] = request.POST.get('ensemple_bagging_lr_enable') + if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['param'] = eval(request.POST.get('classi_ensemple_bagging_lr_param')) + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['enable'] = request.POST.get('ensemple_bagging_naivebayes_enable') + if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['param'] = eval(request.POST.get('classi_ensemple_bagging_naivebayes_param')) + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['enable'] = request.POST.get('ensemple_bagging_svm_enable') + if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['param'] = eval(request.POST.get('classi_ensemple_bagging_svm_param')) + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['enable'] = request.POST.get('ensemple_bagging_knn_enable') + if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['param'] = eval(request.POST.get('classi_ensemple_bagging_knn_param')) + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] = request.POST.get('ensemple_bagging_dt_enable') + if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['param'] = eval(request.POST.get('classi_ensemple_bagging_dt_param')) + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['enable'] = request.POST.get('ensemple_bagging_rf_enable') + if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['param'] = eval(request.POST.get('classi_ensemple_bagging_rf_param')) + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['enable'] = request.POST.get('ensemple_bagging_lir_enable') + if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['param'] = eval(request.POST.get('reg_ensemple_bagging_lir_param')) + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] = request.POST.get('ensemple_bagging_dit_enable') + if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['param'] = eval(request.POST.get('reg_ensemple_bagging_dit_param')) + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['enable'] = request.POST.get('ensemple_bagging_ridge_enable') + if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['enable'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['param'] = eval(request.POST.get('reg_ensemple_bagging_ridge_param')) + if problem_type.lower() == 'classification': + if configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] = eval(request.POST.get('ensamblestackingClassifierparams')) + if problem_type.lower() == 'regression': + if configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] == 'True': + configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] = eval(request.POST.get('ensamblestackingRegressorparams')) + configSettings['basic']['filterExpression'] = request.POST.get('filterExpression') + #configSettings['advance']['mllearner_config']['trainPercentage'] = request.POST.get('trainPercentage') + + if (problem_type.lower() == 'classification') or (problem_type.lower() == 'regression'): + configSettings['advance']['modelEvaluation']['smcStrategy'] = request.POST.get('smcStrategy') + configSettings['advance']['modelEvaluation']['smcMaxDepth'] = request.POST.get('smcMaxDepth') + configSettings['advance']['modelEvaluation']['smcCondition'] = request.POST.get('smcCondition') + configSettings['advance']['modelEvaluation']['miCondition'] = request.POST.get('miCondition') + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Neural Network'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] = eval( + request.POST.get('dl_classification_SNN')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network'] = eval( + request.POST.get('dl_classification_RNN')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (GRU)'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (GRU)'] = eval( + request.POST.get('dl_classification_GRURNN')) + if problem_type.lower() == '" +"classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (LSTM)'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)'] = eval( + request.POST.get('dl_classification_LSTMRNN')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Convolutional Neural Network (1D)'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Convolutional Neural Network (1D)'] = eval( + request.POST.get('dl_classification_CNN')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification'].get('Neural Architecture Search') == 'True': + configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Architecture Search'] = eval( + request.POST.get('dl_classification_NAS')) + + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Neural Network'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] = eval( + request.POST.get('dl_regression_SNN')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] = eval( + request.POST.get('dl_regression_RNN')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network (GRU)'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] = eval( + request.POST.get('dl_regression_GRURNN')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network (LSTM)'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] = eval( + request.POST.get('dl_regression_LSTMRNN')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Convolutional Neural Network (1D)'] == 'True': + configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Convolutional Neural Network (1D)'] = eval( + request.POST.get('dl_regression_CNN')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression'].get('Neural Architecture Search') == 'True': + configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Architecture Search'] = eval( + request.POST.get('dl_regression_NAS')) + #configSettings['advance']['dllearner_config']['optimizationMethod'] = request.POST.get('DLOptimizationMethod') + else: + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Logistic Regression'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Logistic Regression'] = eval(request.POST.get('OnlineLogisticRegression')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Decision Tree Classifier'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Decision Tree Classifier'] = eval(request.POST.get('OnlineDecisionTreeClassifier')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Softmax Regression'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Softmax Regression'] = eval(request.POST.get('OnlineSoftmaxRegression')) + if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online KNN Classifier'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online KNN Classifier'] = eval(request.POST.get('OnlineKNNClassifier')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online Linear Regression'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Linear Regression'] = eval(request.POST.get('OnlineLinearRegression')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online Decision Tree Regressor'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Decision Tree Regressor'] = eval(request.POST.get('OnlineDecisionTreeRegressor')) + if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online KNN Regressor'] == 'True': + configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online KNN Regressor'] = eval(request.POST.get('OnlineKNNRegressor')) + configSettings['advance']['profiler']['targetEncodingParams'] = eval(request.POST.get('targetEncodingParams')) + configSettings['advance']['profiler']['outlierDetectionParams'] = eval(request.POST.get('outlierDetectionParams')) + + if problem_type.lower() == 'objectdetection': + configSettings['advance']['objectDetection']['pretrainedModel']= request.POST.get('objectdetectionpretrainedmodel') + configSettings['advance']['objectDetection']['n_epoch'] = int(request.POST.get('objectDetection_n_epoch')) + configSettings['advance']['objectDetection']['batch_size'] = int(request.POST.get('objectDetection_batch_size')) + + if problem_type.lower() == 'timeseriesforecasting': #task 11997 #task 13052 + + configSettings['advance']['timeSeriesForecasting']['fix_seasonality'] = request.POST.get('seasionality') # task 13052 + configSettings['advance']['timeSeriesForecasting']['fix_stationarity'] =request.POST.get('stationarity') # task 13052 + configSettings['advance']['timeSeriesForecasting']['modelParams']['ARIMA'] = eval(request.POST.get('ARIMA')) #task 11997 + configSettings['advance']['timeSeriesForecasting']['modelParams']['FBPROPHET'] = eval(request.POST.get('FBPROPHET')) #task 11997 + configSettings['advance']['timeSeriesForecasting']['modelParams']['LSTM'] = eval(request.POST.get('TSLSTM')) #task 11997 + configSettings['advance']['timeSeriesForecasting']['modelParams']['Encoder_Decoder_LSTM_MVI_UVO'] = eval(request.POST.get('TSLSTMencoderdecoder')) + configSettings['advance']['timeSeriesForecasting']['modelParams']['MLP'] = eval(request.POST.get('TSMLP')) #task 11997 + + if problem_type.lower() == 'timeseriesanomalydetection': + configSettings['advance']['timeSeriesAnomalyDetection']['modelParams']['AutoEncoder'] = eval(request.POST.get('autoEncoderAD')) #task 11997 + configSettings['advance']['timeSeriesAnomalyDetection']['modelParams']['DBScan'] = eval(request.POST.get('dbscanAD')) #task 13316 + if problem_type.lower() == 'anomalydetection': + configSettings['advance']['anomalyDetection']['modelParams']['IsolationForest'] = eval(request.POST.get('IsolationForest')) + configSettings['advance']['anomalyDetection']['modelParams']['oneclassSVM'] = eval(request.POST.get('oneclassSVM')) + configSettings['advance']['anomalyDetection']['modelParams']['DBScan'] = eval(request.POST.get('DBScanAD')) + + + + updatedConfigSettingsJson = json.dumps(configSettings) + f.seek(0) + f.write(updatedConfigSettingsJson) + f.truncate() + f.close() + errormsg = 'NA' + request.session['ModelStatus'] = 'Not Trained' + except Exception as e: + import sys + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + errormsg = 'Input value error' + print(e) + + if 'NoOfRecords' in request.session: + records = request.session['NoOfRecords'] + else: + records = 'NA' + if request.session['datatype'] in ['Video', 'Image','Document']: + folderLocation = str(request.session['datalocation']) + dataFilePath = os.path.join(folderLocation, request.session['csvfullpath']) + else: + dataFilePath = str(request.session['datalocation']) + # dataFilePath = configSettings['basic']['dataLocation'] + #df = pd.read_csv(dataFilePath, encoding='latin1') + featuresList = configSettings['basic']['featureList'] + + config = {} + config['modelName'] = configSettings['basic']['modelName'] + config['modelVersion'] = configSettings['basic']['modelVersion'] + config['datetimeFeatures'] = configSettings['basic']['dateTimeFeature'] + config['sequenceFeatures'] = configSettings['basic']['indexFeature'] + config['FeaturesList'] = featuresList + config['unimportantFeatures'] = list(set(featuresList) - set(configSettings['basic']['trainingFeatures'])) + config['targetFeature'] = configSettings['basic']['targetFeature'] + scoring = configSettings['basic']['scoringCriteria'] + scoringCriteria = """" + for k in scoring.keys(): + if configSettings['basic']['scoringCriteria'][k] == 'True': + scoringCriteria = k + break + config['scoringCriteria'] = scoringCriteria + + temp = {} + temp['ModelName'] = configSettings['basic']['modelName'] + temp['Version'] = configSettings['basic']['modelVersion'] + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context = {'tab': 'advconfig', 'config': config, 'temp': temp, 'advconfig': configSettings, + 'noOfRecords': records, 'advance_status_msg': 'Configuration Done', + 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'errormsg':errormsg, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'selected': 'modeltraining'} + return context + elif submittype == 'AdvanceDefault': + try: + MachineLearningModels = [] + configFile = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"") + configSettingsData = f.read() + updateconfigSettingsJson = json.loads(configSettingsData) + configSettingsJson = json.loads(configSettings) + temp = {} + temp['ModelName'] = request.session['UseCaseName'] + temp['Version'] = request.session['ModelVersion'] + config = {} + config['modelName'] = request.session['UseCaseName'] + config['modelVersion'] = request.session['ModelVersion'] + config['datetimeFeatures'] = updateconfigSettingsJson['basic']['dateTimeFeature'] + config['sequenceFeatures'] = updateconfigSettingsJson['basic']['indexFeature'] + config['FeaturesList'] = updateconfigSettingsJson['basic']['trainingFeatures'] + config['unimportantFeatures'] = '' + config['targetFeature'] = updateconfigSettingsJson['basic']['targetFeature'] + problemtypes = updateconfigSettingsJson['basic']['analysisType'] + problem_type = """" + for k in problemtypes.keys(): + if updateconfigSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + selectAlgo = """" + if problem_type in ['classification','regression','timeSeriesForecasting', + 'timeSeriesAnomalyDetection', + 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition']: #task 11997 + for key in updateconfigSettingsJson['basic']['algorithms'][problem_type]: + if updateconfigSettingsJson['basic']['algorithms'][problem_type][key] == 'True': + if selectAlgo != """": + selectAlgo += ',' + selectAlgo += key + if problem_type not in ['classification','regression']: + break + for key in updateconfigSettingsJson['basic']['algorithms'][problem_type]: + if updateconfigSettingsJson['basic']['algorithms'][problem_type][key] == 'True': + MachineLearningModels.append(key) + if problem_type == 'objectDetection': + from AION import pretrainedModels + ptmObj = pretrainedModels() + obModels = ptmObj.get_info(selectAlgo) + else: + obModels = {} + + problemType = problem_type + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 2 + if request.session['finalstate'] <= 2: + request.session['finalstate'] = 2 + outlierDetection = 'False' + updateconfigSettingsJson['advance'] = configSettingsJson['advance'] + for x in list(updateconfigSettingsJson['advance']['profiler']['outlierDetection'].keys()): + if updateconfigSettingsJson['advance']['profiler']['outlierDetection'][x] == 'True': + outlierDetection = 'True' + if outlierDetection == 'False': + updateconfigSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'True' + else: + updateconfigSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'False' + updateconfigSettingsJson = advanceConfigfields(updateconfigSettingsJson) + #print(configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['ExtremeGradientBoostingClassifier']) + updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = 'None' + normalizationtypes = updateconfigSettingsJson['advance']['profiler']['normalization'] + for k in normalizationtypes.keys(): + if updateconfigSettings" +"Json['advance']['profiler']['normalization'][k] == 'True': + updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = k + break + + + #---------------- default Hypermarameter changes--- ----------Usnish-------------- + hyperparamFile = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config', 'hyperparam_config.json')) + with open(hyperparamFile) as json_file: + hyperparamConfig = json.load(json_file) + context = {'tab': 'advconfig','temp': temp,'advconfig': updateconfigSettingsJson, + 'config': config, 'selected_use_case': selected_use_case,'MachineLearningModels':MachineLearningModels, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,""obModels"":obModels,""problemType"":problemType, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'selected': 'modeltraning','advance_help':ht.advance_help,'hyperparamConfig':hyperparamConfig} + return context + except Exception as e: + print(e) +def llmadvancesettings(configSettings,request): + algo = '' + for x in list(configSettings['basic']['algorithms']['llmFineTuning'].keys()): + if configSettings['basic']['algorithms']['llmFineTuning'][x] == 'True': + algo = x + + if algo == 'LLaMA-2': + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['fineTuningMethod'] = request.POST.get('llama2fullfinemethod') + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['epochs'] = request.POST.get('llama2epochs') + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['learning_rate'] = request.POST.get('llama2learningrate') + if request.POST.get('llama2fullfinemethod') != 'Full Fine-Tuning': + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['lora_rank'] = request.POST.get('llama2lorarank') + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['lora_alpha'] = request.POST.get('llama2loraalpha') + if algo == 'LLaMA-2-Chat': + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['fineTuningMethod'] = request.POST.get('llama2chatfullfinemethod') + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['epochs'] = request.POST.get('llmllama2chatepochs') + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['learning_rate'] = request.POST.get('llama2chatlearningrate') + if request.POST.get('llama2chatfullfinemethod') != 'Full Fine-Tuning': + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['lora_rank'] = request.POST.get('llama2chatlorarank') + configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['lora_alpha'] = request.POST.get('llama2chatloraalpha') + if algo == 'CodeLLaMA-2': + configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['fineTuningMethod'] = request.POST.get('CodeLLaMA2fullfinemethod') + configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['epochs'] = request.POST.get('CodeLLaMA2epochs') + configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['learning_rate'] = request.POST.get('CodeLLaMA2learningrate') + if request.POST.get('CodeLLaMA2fullfinemethod') != 'Full Fine-Tuning': + configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['lora_rank'] = request.POST.get('CodeLLaMA2lorarank') + configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['lora_alpha'] = request.POST.get('CodeLLaMA2loraalpha') + if algo == 'Falcon': + configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['fullFineTuning'] = request.POST.get('falconfullfinetuning') + configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['epochs'] = request.POST.get('falconepochs') + configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['learning_rate'] = request.POST.get('falconlearningrate') + configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['lora_rank'] = request.POST.get('falconlorarank') + configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['lora_alpha'] = request.POST.get('falconloraalpha') + + + + + return configSettings + +def advanceConfigfields(configSettingsJson): + try: + configSettingsJson['advance']['mllearner_config']['EnsembleStacking'] = \\ + configSettingsJson['advance']['mllearner_config']['Stacking (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['EnsembleVoting'] = \\ + configSettingsJson['advance']['mllearner_config']['Voting (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'LogisticRegression'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GaussianNB'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['SVC'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'Support Vector Machine'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'KNeighborsClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'DecisionTreeClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'RandomForestClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'GradientBoostingClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'ExtremeGradientBoostingClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'LightGradientBoostingClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'Light Gradient Boosting (LightGBM)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'CategoricalBoostingClassifier'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ + 'Categorical Boosting (CatBoost)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SimpleRNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ + 'Recurrent Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['GRURNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ + 'Recurrent Neural Network (GRU)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['LSTMRNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ + 'Recurrent Neural Network (LSTM)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleStacking'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'LogisticRegression'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'Logistic Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'NaiveBayes'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'Naive Bayes'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'SVM'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'Support Vector Machine'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'KNN'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'K Nearest Neighbors'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'DecisionTree'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'Decision Tree'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'RandomForest'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ + 'Random Forest'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Recurrent Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Recurrent Neural Network (GRU)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Recurrent Neural Network (LSTM)'] + + configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DQN'] = \\ + configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network'] + configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DDQN'] = \\ + configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams'][ + 'Dueling Deep Q Network'] + configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DQN'] = \\ + configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network'] + configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DDQN'] = \\ + configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams'][ + 'Dueling Deep Q Network'] + + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['CNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ + 'Convolutional Neural Network (1D)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LinearRegression'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ + 'DecisionTreeRegressor'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ + 'RandomForestRegressor'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['XGBoostRegressor'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ + 'Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LightGBMRegressor'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ + 'Light Gradient Boosting (LightGBM)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['CatBoostRegressor'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ + 'Categorical Boosting (CatBoost)'] + + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleStacking'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ + " +"'LinearRegression'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ + 'Linear Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ + 'DecisionTree'] = \\ + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ + 'Decision Tree'] + + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['NAS'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Neural Architecture Search'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['NAS'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ + 'Neural Architecture Search'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Recurrent Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Recurrent Neural Network (GRU)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Recurrent Neural Network (LSTM)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['CNN'] = \\ + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ + 'Convolutional Neural Network (1D)'] + + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'OnlineLogisticRegression'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'Online Logistic Regression'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'OnlineDecisionTreeClassifier'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'Online Decision Tree Classifier'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'OnlineSoftmaxRegression'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'Online Softmax Regression'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'OnlineKNNClassifier'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ + 'Online KNN Classifier'] + + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ + 'OnlineLinearRegression'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ + 'Online Linear Regression'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ + 'OnlineDecisionTreeRegressor'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ + 'Online Decision Tree Regressor'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ + 'OnlineKNNRegressor'] = \\ + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ + 'Online KNN Regressor'] + + configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] = \\ + configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] + configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] = \\ + configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] + if 'llmFineTuning' in configSettingsJson['advance']: + configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2'] = \\ + configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2'] + configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2Chat'] = \\ + configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2-Chat'] + configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA2'] = \\ + configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA-2'] + + configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2'] = \\ + configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2'] + configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2Chat'] = \\ + configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat'] + configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = \\ + configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2'] + + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \\ + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2'] + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2Chat'] = \\ + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2-Chat'] + configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA2'] = \\ + configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA-2'] + + if 'distributedlearner_config' in configSettingsJson['advance']: + configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ + 'DistributedXGBoost'] = \\ + configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ + 'Distributed Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ + 'DistributedLightGBM'] = \\ + configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ + 'Distributed Light Gradient Boosting (LightGBM)'] + + configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ + 'DistributedXGBoost'] = \\ + configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ + 'Distributed Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ + 'DistributedLightGBM'] = \\ + configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ + 'Distributed Light Gradient Boosting (LightGBM)'] + + problem_type = """" + problemtypes = configSettingsJson['basic']['analysisType'] + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + deepLearning = 'False' + machineLearning = 'False' + reinforcementLearning = 'False' + selectAlgo = """" + if problem_type.lower() in ['classification','regression']: + for key in configSettingsJson['basic']['algorithms'][problem_type]: + if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': + if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Neural Architecture Search']: + deepLearning = 'True' + if key in ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest','Support Vector Machine','K Nearest Neighbors','Gradient Boosting','Extreme Gradient Boosting (XGBoost)','Light Gradient Boosting (LightGBM)','Categorical Boosting (CatBoost)','Linear Regression','Lasso','Ridge','Decision Tree','Random Forest','Bagging (Ensemble)']: + machineLearning = 'True' + if key in ['Deep Q Network','Dueling Deep Q Network']: + reinforcementLearning = 'True' + elif problem_type.lower() in ['clustering','topicmodelling']:#clustering(Bug 12611) + machineLearning = 'True' + configSettingsJson['basic']['deepLearning'] = deepLearning + configSettingsJson['basic']['machineLearning'] = machineLearning + configSettingsJson['basic']['reinforcementLearning'] = reinforcementLearning + except Exception as e: + print(e) + return (configSettingsJson) + +def basicconfignex(request): + #pemfilename = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','modelTraining','static','key','AION_GPU.pem')) + try: + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"") + configSettingsData = f.read() + configSettingsJson = json.loads(configSettingsData) + #---------------- default Hypermarameter changes-------------Usnish-------------- + hyperparamFile = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config', 'hyperparam_config.json')) + with open(hyperparamFile) as json_file: + hyperparamConfig = json.load(json_file) + #---------------- default Hypermarameter changes end-------------Usnish-------------- + # ------------------ Debiasing Changes ------------------ + categorical_features = [] + class_list = [] + MachineLearningModels = [] + check_traget = configSettingsJson['basic']['targetFeature'] + selectedDebiasingFeature = 'None' + selectedDebiasingClass = 'None' + selectedDebiasingAlgorithm = '' + problemtypes = configSettingsJson['basic']['analysisType'] + problem_type = """" + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + if request.method == 'GET': + for key in configSettingsJson['basic']['algorithms'][problem_type]: + if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': + MachineLearningModels.append(key) + else: + MachineLearningModels = request.POST.getlist('MachineLearningModels') + if problem_type.lower() in ['classification','regression']: + if check_traget != '': + try: + if 'deBiasing' in configSettingsJson['advance']['profiler']: + deBiasing = configSettingsJson['advance']['profiler']['deBiasing'] + selectedDebiasingFeature = deBiasing.get('FeatureName','None') + selectedDebiasingClass = deBiasing.get('ClassName','None') + selectedDebiasingAlgorithm = deBiasing.get('Algorithm','') + + if selectedDebiasingFeature != 'None': + df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8',encoding_errors= 'replace') + classeslist = [] + classeslist = df[selectedDebiasingFeature].unique().tolist() + for item in classeslist: + class_list.append(item) + else: + class_list.append('None') + except: + pass + feature_dict = configSettingsJson['advance']['profiler']['featureDict'] + for feature_config in feature_dict: + if feature_config.get('type', '') == 'categorical' and feature_config['feature'] != check_traget: + categorical_features.append(feature_config['feature']) + # ------------------ ------------------ + #print(categorical_features) + temp = {} + temp['ModelName'] = request.session['UseCaseName'] + temp['Version'] = request.session['ModelVersion'] + config = {} + config['modelName'] = request.session['UseCaseName'] + config['modelVersion'] = request.session['ModelVersion'] + config['datetimeFeatures'] = configSettingsJson['basic']['dateTimeFeature'] + config['sequenceFeatures'] = configSettingsJson['basic']['indexFeature'] + config['FeaturesList'] = configSettingsJson['basic']['trainingFeatures'] + config['unimportantFeatures'] = '' + config['targetFeature'] = configSettingsJson['basic']['targetFeature'] + + deepLearning = 'False' + machineLearning = 'False' + reinforcementLearning = 'False' + selectAlgo = """" + print(problem_type) + if problem_type.lower() in ['classification','regression']: + for key in configSettingsJson['basic']['algorithms'][problem_type]: + if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': + if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Neural Architecture Search']: + deepLearning = 'True' + if key in ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest','Support Vector Machine','K Nearest Neighbors','Gradient Boosting','Extreme Gradient Boosting (XGBoost)','Light Gradient Boosting (LightGBM)','Categorical Boosting (CatBoost)','Linear Regression','Lasso','Ridge','Decision Tree','Random Forest','Bagging (Ensemble)']: + machineLearning = 'True' + if key in ['Deep Q Network','Dueling Deep Q Network']: + reinforcementLearning = 'True' + elif problem_type.lower() in ['clustering','topicmodelling']:#clustering(Bug 12611) + machineLearning = 'True' + configSettingsJson['basic']['deepLearning'] = deepLearning + configSettingsJson['basic']['machineLearning'] = machineLearning + configSettingsJson['basic']['reinforcementLearning'] = reinforcementLearning + if problem_type in ['classification','regression','timeSeriesForecasting', + 'timeSeriesAnomalyDetection', + 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition']: #task 11997 + for key in configSettingsJson['basic']['algorithms'][problem_type]: + if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': + if selectAlgo != """": + selectAlgo += ',' + selectAlgo += key + " +"if problem_type not in ['classification','regression']: + break + if problem_type == 'objectDetection': + from AION import pretrainedModels + ptmObj = pretrainedModels() + obModels = ptmObj.get_info(selectAlgo) + else: + obModels = {} + + problemType = problem_type + + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 2 + + #configSettingsJson['advance']['remoteTraining']['ssh']['keyFilePath'] = pemfilename + if request.session['finalstate'] <= 2: + request.session['finalstate'] = 2 + outlierDetection = 'False' + for x in list(configSettingsJson['advance']['profiler']['outlierDetection'].keys()): + if configSettingsJson['advance']['profiler']['outlierDetection'][x] == 'True': + outlierDetection = 'True' + if outlierDetection == 'False': + configSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'True' + else: + configSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'False' + + if 'distributedLearning' not in configSettingsJson['basic']: + configSettingsJson['basic']['distributedLearning'] = 'False' + configSettingsJson['advance']['mllearner_config']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['Stacking (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['EnsembleVoting']=configSettingsJson['advance']['mllearner_config']['Voting (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['LogisticRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GaussianNB'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['SVC'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Support Vector Machine'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['KNeighborsClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['DecisionTreeClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['RandomForestClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['ExtremeGradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['LightGradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Light Gradient Boosting (LightGBM)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['CategoricalBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Categorical Boosting (CatBoost)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (GRU)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']=configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['LogisticRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Logistic Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['NaiveBayes'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Naive Bayes'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['SVM'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Support Vector Machine'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['KNN'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['K Nearest Neighbors'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['DecisionTree'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Decision Tree'] + configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['RandomForest'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Random Forest'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] + + configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network'] + configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DDQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Dueling Deep Q Network'] + configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network'] + configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DDQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Dueling Deep Q Network'] + + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['CNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Convolutional Neural Network (1D)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LinearRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['DecisionTreeRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['RandomForestRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['XGBoostRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LightGBMRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Light Gradient Boosting (LightGBM)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['CatBoostRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Categorical Boosting (CatBoost)'] + + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']=configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['LinearRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['Linear Regression'] + configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['DecisionTree'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['Decision Tree'] + + + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['NAS'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'].get('Neural Architecture Search') + configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['NAS'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'].get('Neural Architecture Search') + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] + configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['CNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Convolutional Neural Network (1D)'] + + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineLogisticRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Logistic Regression'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineDecisionTreeClassifier'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Decision Tree Classifier'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineSoftmaxRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Softmax Regression'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineKNNClassifier'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online KNN Classifier'] + + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineLinearRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Linear Regression'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineDecisionTreeRegressor'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Decision Tree Regressor'] + configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineKNNRegressor'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online KNN Regressor'] + + configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] = configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] + configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] = configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] + if 'llmFineTuning' in configSettingsJson['advance']: + configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2'] + configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2Chat'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2-Chat'] + configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA2'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA-2'] + + configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2'] + configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2Chat'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['" +"LLaMA-2-Chat'] + configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2'] + + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \\ + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2'] + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2Chat'] = \\ + configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2-Chat'] + configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA2'] = \\ + configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA-2'] + + if 'distributedlearner_config' in configSettingsJson['advance']: + configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['DistributedXGBoost'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['DistributedLightGBM'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] + + configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['DistributedXGBoost'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] + configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['DistributedLightGBM'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['Distributed Light Gradient Boosting (LightGBM)'] + + + configSettingsJson['advance']['profiler']['normalizationMethod'] = 'None' + normalizationtypes = configSettingsJson['advance']['profiler']['normalization'] + for k in normalizationtypes.keys(): + if configSettingsJson['advance']['profiler']['normalization'][k] == 'True': + configSettingsJson['advance']['profiler']['normalizationMethod'] = k + break + context = {'temp': temp, 'advconfig': configSettingsJson, 'MachineLearningModels':MachineLearningModels,'hyperparamConfig':hyperparamConfig,'config': config, 'selected_use_case': selected_use_case, + 'categorical_features': categorical_features, 'selectedDebiasingFeature': selectedDebiasingFeature, 'selectedDebiasingAlgorithm': selectedDebiasingAlgorithm, 'Class_list': class_list, 'selectedDebiasingClass': selectedDebiasingClass, #Debiasing Changes + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,""obModels"":obModels,""problemType"":problemType, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'selected': 'modeltraning','advance_help':ht.advance_help} + return context + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + context={'erroradvance':'Fail to load advance config Json file'} + return context + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +from os.path import expanduser +import platform +import json +import subprocess +import re +import sys +import pandas as pd +from django.http import HttpResponse +from appbe.dataPath import DATA_DIR +Usecaselocation = os.path.join(DATA_DIR,'Usecases') +def mlstyles(request): + try: + from appbe.aion_config import settings + usecasetab = settings() + selectid = request.GET['usecaseid'] + configFile = os.path.join(Usecaselocation, 'usecases.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + #usecase = configSettingsJson['usecaselist'] + desciption="""" + usecasename="""" + found = False + for v_id in configSettingsJson['verticallist']: + for p_id in v_id['usecaselist']: + usecaseid = p_id.get('usecaseid') + if str(usecaseid) == str(selectid) : + + usecasename = p_id.get('usecasename') + desciption = p_id.get('desciption') + usecaseid = p_id.get('usecaseid') + iconname = p_id.get('iconname') + prediction_input = p_id.get('prediction_input') + outputtype = p_id.get('outputtype') + smalldescription = p_id.get('smalldescription') + trainingFeatures = p_id.get('trainingFeatures','None') + if trainingFeatures != 'None': + trainingFeatures = trainingFeatures.split(',') + found = True + break + if found == True: + break + #print(usecaseid,selectid) + context ={'usecasename':usecasename,'desciption':desciption,'prediction_input':prediction_input,'usecaseid':usecaseid,'trainingFeatures':trainingFeatures,'iconname':iconname,'smalldescription':smalldescription,'outputtype':outputtype,'usecasetab':usecasetab} + return context + except Exception as inst: + print(inst) + context = { 'error3':'error3','error1': ""No UseCases to show""} + return context +def getusecasedetails(selectid): + configFile = os.path.join(Usecaselocation, 'usecases.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + #usecase = configSettingsJson['usecaselist'] + desciption="""" + usecasename="""" + found = False + for v_id in configSettingsJson['verticallist']: + for p_id in v_id['usecaselist']: + usecaseid = p_id.get('usecaseid') + + if str(usecaseid) == str(selectid) : + usecasename = p_id.get('usecasename') + desciption = p_id.get('desciption') + usecaseid = p_id.get('usecaseid') + modelConfig = p_id.get('modelConfig') + folder = p_id.get('folder') + prediction = p_id.get('prediction') + prediction_input = p_id.get('prediction_input') + ai_modeldata = p_id.get('modeldata') + outputtype = p_id.get('outputtype') + smalldescription = p_id.get('smalldescription') + prediction_template = p_id.get('prediction_template') + trainingFeatures = p_id.get('trainingFeatures','None') + if trainingFeatures != 'None': + trainingFeatures = trainingFeatures.split(',') + found = True + break + if found == True: + break + #print(usecasename) + return(usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures) + + +def mlpredict(request): + selectid=request.POST.get('usecaseid') + mlpredict =request.POST.get('mlpredict') + usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures = getusecasedetails(selectid) + from appbe.aion_config import settings + usecasetab = settings() + usecasename = usecasename + desciption = desciption + input='' + for x in prediction_input: + if input != '': + input += ',' + input = request.POST.get(x['name']) + + if mlpredict in ['prediction','predictsingle']: + if mlpredict == 'prediction': + dataFile = request.POST.get('predictfilePath') + + if(os.path.isfile(dataFile) == False) or dataFile=="""": + context = {'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption , 'error1': 'Please enter a valid csv filepath','usecasetab':usecasetab} + return context, mlpredict + else: + inputFieldsDict = {} + for feature in trainingFeatures: + inputFieldsDict[feature] = request.POST.get(feature) + dataFile = json.dumps(inputFieldsDict) + try: + predictionScriptPath= os.path.join(Usecaselocation,folder,'model',prediction) + # predictionScriptPath = os.path.join(predictionscript, 'aion_prediction.py') + + outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile,input]) + + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + predict_dict = json.loads(outputStr) + #print(predict_dict) + heading ='' + timetaken='' + print(predict_dict) + if (predict_dict['status'] == 'SUCCESS'): + predictionResults = predict_dict['data'] + #print(predictionResults) + if 'heading' in predict_dict: + heading = predict_dict['heading'] + if 'Time' in predict_dict: + timetaken = round(predict_dict['Time'],2) + if outputtype.lower() in ['similarityidentification','contextualsearch']: + data = predictionResults[0] + predictionResults= [] + Results={} + prediction = data['prediction'] + i = 1 + for x in prediction: + te = '' + for y in x: + info = (str(x[y])[:100] + '...') if len(str(x[y])) > 100 else str(x[y]) + te += y+': '+info+'\\n\\n' + Results[i] = te + i = i+1 + + predictionResults.append(Results) + + else: + context = {'usecaseid':selectid ,'dataFile':dataFile,'prediction_input':prediction_input,'usecasename':usecasename,'desciption':desciption , 'error': 'Failed To perform prediction','usecasetab':usecasetab} + return context, mlpredict + print(heading) + context = {'usecasename':usecasename,'desciption':desciption,'prediction_input':prediction_input,'usecaseid':selectid ,'dataFile':dataFile,'predictionResults': predictionResults,'outputtype':outputtype,'heading':heading,'timetaken':timetaken,'usecasetab':usecasetab,'trainingFeatures':trainingFeatures} + return context, mlpredict + except Exception as inst: + print(inst) + context = { 'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'errorp': 'Failed To perform prediction','usecasetab':usecasetab} + return context, mlpredict + if mlpredict == 'download_predict': + # predictionResults = 'C:\\\\DataSets\\\\Classification\\\\bug_severity_class.csv' + try: + csvdata= os.path.join(Usecaselocation,folder,'Data',prediction_template) + if os.path.isfile(csvdata) and os.path.exists(csvdata): + df = pd.read_csv(csvdata,encoding='utf8',encoding_errors= 'replace') + downloadFileName = usecasename.replace("" "", ""_"") + '_predict.csv' + response = HttpResponse(content_type='text/csv') + response['Content-Disposition'] = 'attachment; filename='+downloadFileName + df.to_csv(response, index=False) + return response,mlpredict + else: + context = {'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'error': 'File not found','usecasetab':usecasetab} + return context, mlpredict + + except Exception as inst: + + context = { 'usecaseid':selectid ,'usecasename':usecasename,'desciption':desciption, 'error3':'error3','error1': 'Failed To Download','usecasetab':usecasetab} + return context, mltrain +def process(data): + cleaned_data = {""verticallist"":[]} + for vertical in data['verticallist']: + updated_list = [] + for usecase in vertical['usecaselist']: + if usecase['prediction'] and usecase['prediction'] != ""Not Implemented"": + updated_list.append(usecase) + if updated_list: + cleaned_data['verticallist'].append({'id':vertical['id'],'name':vertical['name'],'usecaselist':updated_list}) + return cleaned_data + +def Aiusecases(request,selectedoption='Implemented'): + try: + from appbe.aion_config import settings + usecasetab = settings() + configFile = os.path.join(Usecaselocation, 'usecases.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + + + + if selectedoption == 'Implemented': + configSettingsJson = process(configSettingsJson) + usecasedetails = configSettingsJson['verticallist'] + context ={'desciption1':usecasedetails,'selected':'AIusecases','usecasetab':usecasetab} + return context + except Exception as e: + print(e) + context ={'error':""No Usecases to Show"",'selected':'AIusecases','usecasetab':usecasetab} + return context + +def mltrain(request): + from appbe.aion_config import settings + usecasetab = settings() + selectid =request.POST.get('usecaseid1') + mltrain =request.POST.get('mltrain') + + usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures = getusecasedetails(selectid) + usecasename = usecasename + desciption = des" +"ciption + + if mltrain == 'training': + dataFile = request.POST.get('trainfilePath') + if(os.path.isfile(dataFile) == False) or dataFile=="""": + context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'error3':'error3','error1': 'Please enter a valid csv filepath'} + return context, mltrain + try: + scriptPath = os.path.join(Usecaselocation,folder,'config','aion_train.py') + print(scriptPath,dataFile) + outputStr = subprocess.check_output([sys.executable, scriptPath, dataFile]) + + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + + + train = json.loads(outputStr) + status = train['status'] + DeployLocation = train['data']['deployLocation'] + ModelType = train['data']['ModelType'] + BestModel = train['data']['BestModel'] + BestScore = train['data']['BestScore'] + ScoreType = train['data']['ScoreType'] + FeaturesUsed = train['data']['featuresused'] + + context={'result':train,'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption,'status':status,'DeployLocation':DeployLocation,'ModelType':ModelType,'BestModel':BestModel,'BestScore':BestScore,'ScoreType':ScoreType,'FeaturesUsed':FeaturesUsed,'result':'result','usecasetab':usecasetab} + return context,mltrain + except Exception as inst: + + context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'errort': 'Failed To perform Training','usecasetab':usecasetab} + return context, mltrain + + if mltrain == 'download_train': + + try: + csvdata= os.path.join(Usecaselocation,folder,'data',ai_modeldata) + #print(csvdata) + if os.path.isfile(csvdata) and os.path.exists(csvdata): + df = pd.read_csv(csvdata,encoding='utf8',encoding_errors= 'replace') + downloadFileName = usecasename.replace("" "", ""_"") + '_training.csv' + response = HttpResponse(content_type='text/csv') + response['Content-Disposition'] = 'attachment; filename='+downloadFileName + df.to_csv(response, index=False) + return response,mltrain + else: + context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'error': 'File not found','usecasetab':usecasetab} + return context, mltrain + except Exception as inst: + + context = { 'usecaseid':selectid ,'usecasename':usecasename,'desciption':desciption, 'error3':'error3','error1': 'Failed To Download','usecasetab':usecasetab} + return context, mltrain + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import pandas as pd +import requests +import re + +import json +import sys +import time + +from appbe.aion_config import get_llm_data +from appbe.dataPath import LOG_LOCATION +from appbe.log_ut import logg +import logging + +import openai +import tiktoken + +openai.api_key = '' +openai.api_base = '' +openai.api_type = '' +openai.api_version = '' +deployment_name=""Text-Datvinci-03"" + + +def generateLabelPerRecord(OrgData): + OrgData['LabelFromGPT'] = OrgData['Head_Description'].apply(lambda x: \\ + generate_gpt3_response\\ + + (""I am giving you the title and short description \\ + in the format [Title:Description], \\ + give me the related low level topics in one word in the \\ + format[Topic: your primary topic] along with top 5 important keywords in the \\ + format[Keywords: keywords]'{}' "".format(x))) + + #Cleaning the output as it is from ChatGPT + OrgData['temp1'] = OrgData['LabelFromGPT'].apply(lambda x: (x.split('Topic:')[1]).replace(']','')) + OrgData['LabelFromGPT'] = OrgData['temp1'].apply(lambda x: (x.split('Keywords:')[0]).replace(']','').rstrip()) + OrgData['Keywords'] = OrgData['temp1'].apply(lambda x: (x.split('Keywords:')[1]).replace(']','')) + OrgData = OrgData.drop(['temp1','Head_Description'], axis=1) + return OrgData + + +def generateLabelForChunkedRecords(OrgData): + import io + # OrgData = OrgData.head(120) + + Head_Description = {""Head_Description"": [] } + Head_Description2 = {""Head_Description"": [] } + Head_Description['Head_Description'] = OrgData['Head_Description'] + + strt_ind = 0 + brk_ind = 0 + # encoding = tiktoken.get_encoding('p50k_base') + encoding = tiktoken.encoding_for_model(""text-davinci-003"") + + chunks = [] + _cur_token_count = 0 + _chunk_token_count = 0 + + for ind in Head_Description['Head_Description'].index: + tokenized_text = encoding.encode(Head_Description['Head_Description'][ind]) + _cur_token_count = len(tokenized_text) + + if _cur_token_count >= 600: + OrgData['Head_Description'][ind] = OrgData['Head_Description'][ind][:1000] + upto_ind = ind + 1 + + + Head_Description2['Head_Description'] = OrgData['Head_Description'][brk_ind:ind] + _chunk_token_count = encoding.encode(Head_Description2['Head_Description'].to_string()) + if len(_chunk_token_count) >= 1200: + brk_ind = ind + # print(brk_ind) + chunks.append(ind-1) + + + _start_count = 0 + if len(chunks) == 0: + output = generate_gpt3_response(""I am giving you datatable of text records \\ + for each record give me the related low level topics in one word as a data column called Topic\\ + and important top five keywords as a data column called Keywords. \\ + Provide me record number as Record and these two data columns as datatable for each record in the given datatable and number of records should be equivalent to the number of records in the given datatable of text records. '{}' "".format(Head_Description['Head_Description'])) + + out = io.StringIO(output[2:]) + df = pd.read_csv(out, sep='\\t') + + else: + chunks.append(len(Head_Description['Head_Description'])) + + for ind_val in chunks: + _cur_ind_val = ind_val + _recordsSent = 0 + + Head_Description = {""Head_Description"": [] } + + if _start_count == 0: + Head_Description['Head_Description'] = OrgData['Head_Description'][strt_ind:_cur_ind_val].to_string() + _recordsSent = len(OrgData['Head_Description'][strt_ind:_cur_ind_val]) + else: + Head_Description['Head_Description'] = OrgData['Head_Description'][_pre_ind_val:_cur_ind_val].to_string() + _recordsSent = len(OrgData['Head_Description'][_pre_ind_val:_cur_ind_val]) + + _pre_ind_val = ind_val + + + # if _start_count <= 5: + output = generate_gpt3_response(""I am giving you datatable of text records \\ + for each record give me the related low level topics in one word as a data column called Topic\\ + and important top five keywords as a data column called Keywords. \\ + Provide me record number as Record and these two data columns as datatable for each record in the given datatable and number of records should be equivalent to the number of records in the given datatable of text records. '{}' "".format(Head_Description['Head_Description'])) + + + out = io.StringIO(output[2:]) + + if _start_count == 0: + df = pd.read_csv(out, sep='\\t') + else: + df_tmp = pd.read_csv(out, sep='\\t') + + if len(df_tmp) > _recordsSent: + df_tmp = df_tmp.head(_recordsSent) + + # df = df.append(df_tmp, ignore_index=True) + df = pd.concat([df, df_tmp], ignore_index=True) + + _start_count += 1 + + OrgData['LabelFromGPT'] = df['Topic'] + OrgData['Keywords'] = df['Keywords'] + OrgData = OrgData.drop(['Head_Description'], axis=1) + return OrgData + + + +# Text Data Labelling using LLM related changes +# -------------------------------------------------------- +def generateTextLabel(request, DATA_FILE_PATH): + log = logging.getLogger('log_ux') + + key,url,api_type,api_version = get_llm_data() + openai.api_key = key + openai.api_base = url + openai.api_type = api_type + openai.api_version = api_version + + try: + features = request.POST.getlist('InputFeatures') + datapath = request.session['textdatapath'] + OrgData = pd.read_csv(datapath) + + # OrgData = OrgData.head(2000) + OrgData.fillna("""", inplace = True) + + OrgData['Head_Description'] = OrgData[features[0]] + if (len(features) > 1): + for indx in range(len(features)): + if (indx > 0): + OrgData['Head_Description'] = OrgData['Head_Description'] + "" ""+ OrgData[features[indx]] + + + # OrgData = generateLabelPerRecord(OrgData) + OrgData = generateLabelForChunkedRecords(OrgData) + + df = OrgData + + filetimestamp = str(int(time.time())) + datasetName = 'AION_TextLabelled' + filetimestamp+'.csv' + dataFile = os.path.join(DATA_FILE_PATH,datasetName) + df.to_csv(dataFile) + request.session['texttopicdatapath'] = dataFile + + df_json = df.to_json(orient=""records"") + df_json = json.loads(df_json) + + + + from appbe.dataPath import DATA_DIR + from appbe.sqliteUtility import sqlite_db + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + newdata = {} + newdata['datapath'] = [dataFile] + newdata['datasetname'] = [datasetName] + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata), 'dataingest') + + + ################################################ + + context = {'data_topic':df_json, 'selected':'DataOperations'} + return context + + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + + errormsg = str(e) + + if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg: + errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.' + + if 'Max retries exceeded with url' in errormsg: + errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.' + + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + context = {'error': 'Failed to communicate LLM','LLM' : 'openAI', 'selected':'DataOperations', 'errormessage':errormsg} + log.info('generateTextLabel -- Error : Failed to generate Text-Label.. '+str(e)) + log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return context + + +#function to return the queried response +def generate_gpt3_response(user_text, print_output=False): + """""" + Query OpenAI GPT-3 for the specific key and get back a response + :type user_text: str the user's text to query for + :type print_output: boolean whether or not to print the raw output JSON + """""" + time.sleep(2) + + completions = openai.Completion.create( + # engine='Text-Datvinci-03', # Determines the quality, speed, and cost. engine='text-davinci-003', + engine=deployment_name, # Determines the quality, speed, and cost. engine='text-davinci-003', + + temperature=0, # Level of creativity in the response + prompt=user_text, # What the user typed in + max_tokens=2000, # Maximum tokens in the prompt AND response + n=1, # The number of completions to generate + stop=None, # An optional setting to control response generation + ) + + # Displaying the output can be helpful if things go wrong + if print_output: + print(completions) + + # Return the first choice's text + # print(completions.choices[0].text) + return completions.choices[0].text + +# -------------------------------------------------------- import pandas as pd +import numpy as np +from statsmodels.tsa.stattools import adfuller +from statsmodels.tsa.stattools import kpss +from statsmodels.tsa.seasonal import seasonal_decompose +import logging +import os +import warnings +warn" +"ings.filterwarnings('ignore') +## Main class to find out seassonality and stationary in timeseries data. +class StationarySeasonalityTest: + def __init__(self,df,featurename,datetimefeature): + self.df=df + self.targetFeature=featurename + self.datetimefeature=datetimefeature + + ## to get the timeseries data stationary information + def stationary_model(self,df,target_feature,stationary_check_method): + stationary_status=None + if (stationary_check_method.lower()=='adfuller'): + stats_model=adfuller(df[target_feature]) + statistic, p_value, n_lags, num_bservations,critical_values,info_criterion_best=stats_model[0],stats_model[1],stats_model[2],stats_model[3],stats_model[4],stats_model[5] + if (p_value>0.05): + stationary_status=str(""Non-Stationary"") + elif(p_value<0.05): + stationary_status=str(""Stationary"") + + ##kpss is opposite to ADF in considering null hypothesis. In KPSS, if null hypothesis,then it is stationary as oppose to ADF. + elif (stationary_check_method.lower()=='kpss'): + from statsmodels.tsa.stattools import kpss + stats_model = kpss(df[target_feature]) + statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] + + ##In kpss, the stationary condition is opposite to Adafuller. + if (p_value>0.05): + stationary_status=str(""Stationary"") + + else: + stationary_status=str(""Non-Stationary"") + + return stats_model,n_lags,p_value,stationary_status + + ## Get stationary details + def stationary_check(self,target_feature,time_col,method): + df=self.df + df[time_col]=pd.to_datetime(df[time_col]) + df=df.set_index(time_col) + try: + stationary_check_method=method + except: + stationary_check_method='adfuller' + if (len(target_feature) == 1): + try: + if isinstance(target_feature,list): + target_feature=''.join(target_feature) + elif isinstance(target_feature,int): + target_feature=str(target_feature) + elif isinstance(target_feature,str): + pass + except Exception as e: + pass + stationary_result={} + stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,target_feature,stationary_check_method) + # stationary_result[target_feature]=stationary_status + stationary_result[target_feature]=stationary_status + elif(len(target_feature) > 1): + stationary_result={} + for col in df.columns: + stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,col,stationary_check_method) + stationary_result[col]=stationary_status + else: + pass + stationary_val=None + for v in stationary_result.values(): + stationary_val=v + stationary_combined_res=dict() + c_dict=[k for k,v in stationary_result.items() if 'non-stationary' in v] + if (len(c_dict)>=1): + stationary_combined_res['dataframe_stationarity']='Non-Stationary' + else: + stationary_combined_res['dataframe_stationarity']='Stationary' + + return stats_model,n_lags,p_value,stationary_val,stationary_combined_res + #Get seasonality by using seasonal_decompose lib. + def seasonality_model(self,target_feature,df): + seasonality_status=None + try: + try: + stats_model = kpss(df[target_feature]) + statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] + except: + n_lags=1 + pass + try: + df_target=self.df[target_feature] + decompose_result_mult = seasonal_decompose(df_target,model='additive', extrapolate_trend='freq', period=n_lags) + except Exception as e: + ##If additive model (type of seasonal component) failed, use multiplicative + decompose_result_mult = seasonal_decompose(df_target,model='multiplicative', extrapolate_trend='freq', period=1) + trend = decompose_result_mult.trend + observed=decompose_result_mult.observed + seasonal = decompose_result_mult.seasonal + residual = decompose_result_mult.resid + try: + if isinstance(df_target, pd.Series): + auto_correlation = df_target.autocorr(lag=n_lags) + elif isinstance(df_target, pd.DataFrame): + df_target = df_target.squeeze() + auto_correlation = df_target.autocorr(lag=n_lags) + except: + pass + if (seasonal.sum()==0): + seasonality_status=""Non-Seasonal"" + else: + seasonality_status=""Seasonal"" + + # #Please use the below plot for GUI show (seasonality components) + # decompose_result_mult.plot().savefig('seasonality_plot.png') + df['observed'] = decompose_result_mult.observed + df['residual'] = decompose_result_mult.resid + df['seasonal'] = decompose_result_mult.seasonal + df['trend'] = decompose_result_mult.trend + + except Exception as e: + print(""Seasonality function exception: \\t"",e) + return df,decompose_result_mult,seasonality_status + + + ##Main function to check seasonlity in data + def seasonal_check(self,target_feature,time_col,seasonal_model): + df=self.df + try: + df[time_col]=pd.to_datetime(df[time_col]) + except Exception as e: + pass + df=df.set_index(time_col) + + if (len(target_feature)==1): + try: + if isinstance(target_feature,list): + target_feature=''.join(target_feature) + elif isinstance(target_feature,int): + target_feature=str(target_feature) + elif isinstance(target_feature,str): + pass + except Exception as e: + ## Because of EDA, all log messages removed. (self.log.info ) + pass + ## Seasonal component for individual feature based. + seasonality_result=dict() + df,decompose_result_mult,seasonality_status = self.seasonality_model(target_feature,df) + # seasonality_result[target_feature]=seasonality_status + + seasonality_result['Feature: '+str(target_feature)]=seasonality_status + elif(len(target_feature) > 1): + seasonality_result=dict() + for col in df.columns: + df,decompose_result_mult,seasonality_status = self.seasonality_model(col,df) + seasonality_result[col]=seasonality_status + else: + pass + + + # ## Seasonal component for whole dataset + seasonality_val=None + for v in seasonality_result.values(): + seasonality_val=v + seasonality_combined_res=dict() + c_dict=[k for k,v in seasonality_result.items() if 'non-seasonality' in v] + if (len(c_dict)>=1): + seasonality_combined_res['dataframe_seasonality']='No Seasonal elements' + else: + seasonality_combined_res['dataframe_seasonality']='contains seasonal elements.' + + return df,decompose_result_mult,seasonality_val,seasonality_combined_res + + #Main user defined caller for stationary and seasonality (SS) + def analysis(self,seasonality_status,stationarity_status): + seasonal_model=""additive"" + time_col=self.datetimefeature + stationary_method='adfuller' + if (isinstance(self.targetFeature,list)): + target=self.targetFeature + pass + elif (isinstance(self.targetFeature,str)): + target=list(self.targetFeature.split(',')) + if (stationarity_status.lower()==""true""): + stats_model,n_lags,p_value,stationary_result,stationary_combined_res=self.stationary_check(target,time_col,stationary_method) + return stationary_result + if (seasonality_status.lower()==""true""): + df,decompose_result_mult,seasonality_result,seasonality_combined_res=self.seasonal_check(target,time_col,seasonal_model) + return seasonality_result + + +#Main fn for standalone test purpose +if __name__=='__main__': + print(""Inside seasonality-stationary test main function..."") + print(""Below code used for standalone test purpose."") + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import logging + +from appbe.dataIngestion import getcommonfields +from appbe.dataIngestion import getusercasestatus +from appbe import service_url +import json +from appbe.dataIngestion import delimitedsetting +import os,sys +import pandas as pd +from django.http import HttpResponse +import time + +from appbe.dataPath import LOG_LOCATION +from appbe.log_ut import logg +def get_instance_id(modelID): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) + print(data) + if len(data) > 0: + return (data[3]+' instance '+data[2]) + else: + return 'Instance ID not available' + else: + return 'Instance ID not available' + +def get_instance(modelID): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) + if len(data) > 0: + return (data[3],data[2],data[5],data[6]) + else: + return '','','','' + else: + return '','','','' +def getprompt(promptfeature,contextFeature,responseFeature,promptFriendlyName,responseFriendlyName,data): + if contextFeature != '': + promptData = data[promptfeature].replace('\\n','') + inputData = data[contextFeature].replace('\\n','') + prompt = ( + f""Below is an {promptFriendlyName} that describes a task, paired with an Input that provides further context. "" + f""Write a {responseFriendlyName} that appropriately completes the request.\\n\\n"" + f""### {promptFriendlyName}:\\n{promptData}\\n\\n### Input:\\n{inputData}\\n\\n### {responseFriendlyName}:\\n"") + + else: + promptData = data[promptfeature].replace('\\n','') + prompt=( + f""Below is an {promptFriendlyName} that describes a task. "" + f""Write a {responseFriendlyName} that appropriately completes the request.\\n\\n"" + f""### {promptFriendlyName}:\\n{promptData}\\n\\n### {responseFriendlyName}:\\n"") + return prompt + +def getDataInstance(problem_type,mlmodels,configSettingsJson): + log = logging.getLogger('log_ux') + delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier']) + if problem_type == 'timeSeriesForecasting': #task 11997 + inputFieldsDict = {'noofforecasts': 10} + elif problem_type == 'recommenderSystem' and mlmodels =='ItemRating': + inputFieldsDict = {""uid"": 1, ""iid"": 31, ""rating"": 0} + elif problem_type == 'stateTransition': + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeaturesList = inputFeatures.split(',') + inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'} + else: + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeaturesList = inputFeatures.split(',') + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + if problem_type == 'survivalAnalysis': + inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + if os.path.isfile(dataFilePath): + df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,encoding_errors= 'replace') + try: + singleInstanceData = df.loc[0, inputFeaturesList] + except: + singleInstanceData = pd.Series(0, index =inputFeaturesList) + inputFieldsDict = singleInstanceData.to_dict() + else: + inputFieldsDict = {""File"":""EnterFileContent""} + inputFields = [] + inputFields.append(inputFieldsDict) + return inputFields +def createInstanceFeatures(configSettingsJson,problem_type,mlmodels,usecaseid,version,ser_url): + delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier']) + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + if inputFeatures != '': + inputFeaturesList = inputFeatures.split(',') + else: + inputFeaturesList = [] + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + if configSettingsJson['basic']['contextFeature'] != '': + inputFeaturesList.append(configSettingsJson['basic']['contextFeature']) + if problem_type == 'llmFineTuning': + inputFeaturesList.append('Temperature') + input" +"FeaturesList.append('Max Tokens') + if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997 + if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na': + inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + if problem_type == 'timeSeriesForecasting': #task 11997 + inputFieldsDict = {'noofforecasts': 10} + elif problem_type == 'recommenderSystem' and mlmodels=='ItemRating': + inputFieldsDict = {""uid"": 1, ""numberOfRecommendation"":10} + elif problem_type == 'stateTransition': + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + if inputFeatures != '': + inputFeaturesList = inputFeatures.split(',') + else: + inputFeaturesList = [] + inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'} + elif problem_type != 'llmFineTuning': + if os.path.isfile(dataFilePath): + df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') + try: + inputFieldsDict = df.to_dict(orient='index')[0] + except: + inputFieldsDict = pd.Series(0, index =inputFeaturesList).to_dict() + else: + inputFieldsDict = {""File"":""EnterFileContent""} + else: + inputFieldsDict = pd.Series('', index =inputFeaturesList).to_dict() + inputFieldsDict['Temperature'] = '0.1' + hypervisor,instanceid,region,image = get_instance(usecaseid+'_'+str(version)) + if hypervisor.lower() == 'AWS': + inputFieldsDict['Max Tokens'] = '1024' + else: + inputFieldsDict['Max Tokens'] = '4096' + inputFields = [] + inputFields.append(inputFieldsDict) + if problem_type == 'llmFineTuning': + ser_url = get_instance_id(usecaseid+'_'+str(version)) + elif problem_type == 'stateTransition': + + ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+usecaseid+'&version='+str(version) + else: + + ser_url = ser_url+'predict?usecaseid='+usecaseid+'&version='+str(version) + return inputFields,ser_url +def singleInstancePredict(request, Existusecases, usecasedetails): + log = logging.getLogger('log_ux') + modelType='' + context = getcommonfields() + submittype = request.POST.get('predictsubmit') + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + t1 = time.time() + try: + try: + model = Existusecases.objects.get(ModelName=request.session['ModelName'], + Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"") + training_output = f.read() + f.close() + training_output = json.loads(training_output) + featureused = training_output['data']['featuresused'] + except: + featureused = [] + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Prediction','Yes') + + usecasename = request.session['usecaseid'].replace("" "", ""_"") + context.update({'usecasename':usecasename}) + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"", encoding = ""utf-8"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + if inputFeatures != '': + inputFeaturesList = inputFeatures.split(',') + else: + inputFeaturesList = [] + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + if configSettingsJson['basic']['contextFeature'] != '': + inputFeaturesList.append(configSettingsJson['basic']['contextFeature']) + + problemtypes = configSettingsJson['basic']['analysisType'] + problem_type = '' + modelSize = '' + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + if problem_type == 'llmFineTuning': + inputFeaturesList.append('Temperature') + inputFeaturesList.append('Max Tokens') + mlmodels ='' + algorihtms = configSettingsJson['basic']['algorithms'][problem_type] + for k in algorihtms.keys(): + if configSettingsJson['basic']['algorithms'][problem_type][k] == 'True': + if mlmodels != '': + mlmodels += ', ' + mlmodels += k + + if problem_type == 'llmFineTuning': + ser_url = get_instance_id(usecasename+'_'+str(request.session['ModelVersion'])) + if 'modelSize' in configSettingsJson['basic']: + selectedModelSize = configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels] + for k in selectedModelSize.keys(): + if configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True': + modelSize = k + break + + elif problem_type == 'stateTransition': + ser_url = service_url.read_service_url_params(request) + ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion']) + else: + ser_url = service_url.read_service_url_params(request) + ser_url = ser_url+'predict?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion']) + if submittype.lower() == 'predict': + inputFieldsDict = {} + if problem_type == 'timeSeriesForecasting': #task 11997 + inputFieldsDict['noofforecasts'] = int(request.POST.get('noofforecasts')) + elif problem_type == 'stateTransition': + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + sessionid = request.POST.get('SessionID') + activity = request.POST.get(targetFeature) + inputFieldsDict[inputFeatures] = request.POST.get(inputFeatures) + inputFieldsDict[targetFeature] = request.POST.get(targetFeature) + + elif problem_type == 'recommenderSystem' and mlmodels == 'ItemRating': + inputFieldsDict['uid'] = request.POST.get('uid') + inputFieldsDict['numberOfRecommendation'] = int(request.POST.get('numberOfRecommendation')) #Task 11190 + else: + if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997 + if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na': + inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) + + for feature in inputFeaturesList: + inputFieldsDict[feature] = request.POST.get(feature) + if problem_type.lower() not in ['contextualsearch','similarityidentification']: + for key, value in inputFieldsDict.items(): + if value == 'nan': + inputFieldsDict[key] = '' + + if value == '': + if key in featureused: + context.update({'tab': 'predict','ser_url':ser_url, 'error': ' Error : Mandatory field(s) are empty', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) + return context + inputFieldsJson = json.dumps(inputFieldsDict) + if problem_type == 'llmFineTuning': + modelType = request.POST.get('modelTypeforInferencing') + + x = inputFieldsDict.keys() + from appbe.dataPath import DATA_DIR + prompt = inputFieldsDict[configSettingsJson['basic']['trainingFeatures']] + promptobj = {'prompt':prompt} + if configSettingsJson['basic']['contextFeature'] != '': + inputData = inputFieldsDict[configSettingsJson['basic']['contextFeature']] + promptobj.update({'input':inputData}) + filetimestamp = str(int(time.time())) + file_path = os.path.join(DATA_DIR,'logs',filetimestamp+'.json') + f= open(file_path,""w"",encoding=""utf-8"") + #print(promptobj) + json.dump(promptobj,f) + f.close() + from llm.llm_inference import LLM_predict + cloudconfig = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config','compute_conf.json')) + hypervisor,instanceid,region,image = get_instance(usecasename+'_'+str(request.session['ModelVersion'])) + if hypervisor and instanceid: + if modelSize != '': + mlmodels = mlmodels+'-'+modelSize + cachepath = os.path.join(DATA_DIR,'sqlite','cachePrompt.db') + import sqlite3 + conn = sqlite3.connect(cachepath) + from llm.llm_cache import CachePrompt + cachepromptObj = CachePrompt(conn) + searchFlag,result = cachepromptObj.selectFromCache(prompt,usecasename+'_'+str(request.session['ModelVersion']),modelType,temperature=inputFieldsDict['Temperature'],max_token=inputFieldsDict['Max Tokens']) + + if searchFlag: + buf = LLM_predict(cloudconfig,instanceid,file_path,hypervisor,mlmodels,usecasename+'_'+str(request.session['ModelVersion']),region,image,inputFieldsDict['Temperature'],inputFieldsDict['Max Tokens'],modelType) + import re + outputStr = buf.split('ModelOutput:')[1] + cachepromptObj.insertRecord(prompt,outputStr,usecasename+'_'+str(request.session['ModelVersion']),modelType,temperature=inputFieldsDict['Temperature'],max_token=inputFieldsDict['Max Tokens']) + else: + outputStr = result + if configSettingsJson['basic']['folderSettings']['fileType'].lower() != 'llm_document': + outputStr = outputStr.split('### '+configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response']+':')[1] + singlePredictionResults = [] + singlePredictionsummary="""" + Results={} + Results['Response'] = outputStr + singlePredictionResults.append(Results) + else: + context.update( + {'tab': 'tabconfigure', 'error': 'Prediction Error: Instance ID not found ', 'selected': 'prediction', + 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, + 'ModelVersion': ModelVersion,'mlmodels':mlmodels}) + log.info('Predict Instance :' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Prediction Error, Instance ID not found') + return context + else: + try: + import requests + #response = requests.post(ser_url,auth=(aion_service_username,aion_service_password),data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + response = requests.post(ser_url,data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + if response.status_code != 200: + outputStr=response.content + context.update({'tab': 'tabconfigure', 'error': outputStr.decode('utf-8'), 'selected': 'prediction'}) + log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : '+str(outputStr.decode('utf-8'))) + return context + except Exception as inst: + if 'Failed to establish a new connection' in str(inst): + context.update({'tab': 'tabconfigure', 'error': 'AION service need to be started', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) + log.info('Predict Instance :'+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0'+' sec'+' : '+'Error : AION service need to be started, '+str(inst)) + return context + else: + context.update({'tab': 'tabconfigure', 'error': 'Prediction Error '+str(inst),'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) + log.info('Predict Instance :'+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : Prediction Error, '+str(inst)) + return context + outputStr=response.content + outputStr = outputStr.decode('utf-8','ignore') + outputStr = outputStr.strip() + predict_dict = json.loads(str(outputStr)) + #print(predict_dict) + singlePredictionsummary="""" + if (predict_dict['status'] == 'SUCCESS'): + data = predict_dict['data'] + singlePredictionResults = [] + Results = {} + if problem_type == 'multiModalLearning': + data = data[0] + Results['prediction'] = data['predict'] + singlePredictionResults.append(Results) + + if problem_type == 'textSummarization': + data = data[0] + Results['msg'] = predict_dict['msg'] + singlePredictionResults.append(Results) + Results['prediction'] = predict_dict['data'] + singlePredictionResults.append(Results) + Results1 = {} + Results1['prediction'] = predict_dict['data'] + print(""prdata------------"",predict_dict['data']) + singlePredictionsummary=predict_dict['data'] + print(""singlePredictionsummary"",singlePredictionsummary) + t2 = time.time() + + elif problem_type == 'multiLabelPrediction': + prediction = '' + for x in data: + for y in x: + if 'predict' in y: + if prediction != '': + prediction += ',' + prediction += str(y)+':'+str(x[y]) + Results['prediction'] = prediction + singlePredictionResults.append(Results) + elif problem_type == 'timeSeriesFore" +"casting': #task 11997 + Results['prediction'] = json.dumps(data) + singlePredictionResults.append(Results) + elif problem_type == 'stateTransition': + if str(data['Anomaly']) == 'False': + Results['prediction'] = 'No Anomaly' + else: + Results['prediction'] = str(data['Remarks']) + singlePredictionResults.append(Results) + elif problem_type.lower() in ['similarityidentification','contextualsearch']: + data = data[0] + prediction = data['prediction'] + i = 1 + for x in prediction: + te = '' + for y in x: + info = (str(x[y])[:50] + '...') if len(str(x[y])) > 50 else str(x[y]) + te += y+': '+info+'\\n\\n' + Results[i] = te + i = i+1 + singlePredictionResults.append(Results) + else: + data = data[0] + if 'prediction' in data: + Results['prediction'] = data['prediction'] + + if 'probability' in data: + Results['probability'] = data['probability'] + if 'remarks' in data: + Results['remarks'] = json.loads(data['remarks']) + singlePredictionResults.append(Results) + t2 = time.time() + log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+str(round(t2-t1))+' sec'+' : '+'Success') + + else: + context.update({'tab': 'tabconfigure', 'error': 'Prediction Error '+str(predict_dict['message']), 'selected': 'prediction','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) + log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : Prediction Error') + return context + inputFields = [] + inputFields.append(inputFieldsDict) + ##Below added by sjayaram for llm langkit evaluation metrics Task:17109 + prompt_response_results = '' + if problem_type == 'llmFineTuning': + try: + response_msg = outputStr + prompt_msg = prompt + except: + response_msg = '' + prompt_msg = '' + + from appbe.evaluate_prompt import evaluate_prompt_response_inputs + final_output_json,prompt_response_results = evaluate_prompt_response_inputs(prompt_msg,response_msg) + #ser_url = service_url.read_service_url_params(request) + #ser_url = ser_url+'predict?usecaseid='+usecasename+'&version='+str(ModelVersion) + context.update({'tab': 'predict','mlmodels':mlmodels,'fineTunedModelType':modelType,'ser_url':ser_url, 'inputFields': inputFields,'singlePredictionResults': singlePredictionResults,'singlePredictionsummary':singlePredictionsummary,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction', + 'prompt_response_results':prompt_response_results}) + return context + elif submittype.lower() == 'script': + scriptdata=""'''\\n"" + scriptdata+=""* =============================================================================\\n"" + scriptdata+=""* COPYRIGHT NOTICE\\n"" + scriptdata+=""* =============================================================================\\n"" + scriptdata+=""* @ Copyright HCL Technologies Ltd. 2021, 2022, 2023\\n"" + scriptdata+=""* Proprietary and confidential. All information contained herein is, and\\n"" + scriptdata+=""* remains the property of HCL Technologies Limited. Copying or reproducing the\\n"" + scriptdata+=""* contents of this file, via any medium is strictly prohibited unless prior\\n"" + scriptdata+=""* written permission is obtained from HCL Technologies Limited.\\n"" + scriptdata+=""'''\\n"" + scriptdata+='import sys\\n' + scriptdata+='import json\\n' + scriptdata+='import requests\\n' + scriptdata+='import pandas as pd\\n' + scriptdata+='from pandas import json_normalize\\n' + scriptdata+='ser_url =""'+ser_url+'""\\n\\n' + scriptdata+=""def predict(data):\\n"" + scriptdata+="" if data.endswith('.tsv'):\\n"" + scriptdata+="" df=pd.read_csv(data,encoding='utf-8',encoding_errors= 'replace',sep='\\\\t')\\n"" + scriptdata+="" else:\\n"" + scriptdata+="" df=pd.read_csv(data,encoding='utf-8',encoding_errors= 'replace')\\n"" + scriptdata+=' features = ""'+"","".join([feature for feature in inputFeaturesList])+'""\\n' + scriptdata+="" features = features.split(',')\\n"" + scriptdata+="" df = df[features]\\n"" + scriptdata+="" data = df.to_json(orient='records')\\n"" + scriptdata+="" try:\\n"" + scriptdata+=' response = requests.post(ser_url,data=data,headers={""Content-Type"":""application/json"",})\\n' + scriptdata+="" if response.status_code == 200:\\n"" + scriptdata+="" outputStr=response.content\\n"" + scriptdata+="" outputStr = outputStr.decode('utf-8')\\n"" + scriptdata+="" outputStr = outputStr.strip()\\n"" + scriptdata+="" predict_dict = json.loads(str(outputStr))\\n"" + scriptdata+="" print(predict_dict)\\n"" + scriptdata+="" except Exception as e:\\n"" + scriptdata+=' print(e)\\n' + scriptdata+='\\nif __name__ == ""__main__"":\\n' + scriptdata+=' predict(sys.argv[1])' + response = HttpResponse() + response['content_type'] = 'text/plain' + response['Content-Disposition'] = 'attachment; filename=prediction.py' + response.write(scriptdata) + return response + + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + context.update({'tab': 'tabconfigure', 'error': 'Failed To perform prediction','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction'}) + log.info('Predict Instance :' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + ' 0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction, '+ str(inst)) + return context + + import os +import openai +from langchain.llms import AzureOpenAI +from sentence_transformers.SentenceTransformer import SentenceTransformer +import time +import datetime +import pandas as pd +import sys +import subprocess +import importlib +from appbe.aion_config import get_llm_data +from appbe.dataPath import DATA_FILE_PATH + +remote_data_dir = ""/home/aion/data/storage/llm_testing_data"" +remote_data_processeddata_dir = '/home/aion/data/storage/processed_data' +remote_config_dir = '/home/aion/data/config' +sh_file_path = '/home/aion/llm/sbin/llm_testing.sh' + + + + + +prompt_command = '/home/aion/llm/sbin/llm_testing.sh' + +PRE_CONTEXT = ""Answer the following question in a concise manner.\\n"" +DEFAULT_PARAMS = { + 'OPENAI_API_TYPE' : ""azure"", + 'OPENAI_API_BASE' : """", + 'OPENAI_API_KEY' : """", + 'OPENAI_API_VERSION' : ""2023-03-15-preview"" +} +faq="""" + +def getAMIDetails(config,selectedAMI): + y = {} + for x in config: + print(x) + if x['id'] == selectedAMI: + return x + return y +class test_LLM(): + + def __init__(self, + deployment_name='Text-Datvinci-03', params=DEFAULT_PARAMS, transformer=None, + sentence_txfr_model='sentence-transformers/paraphrase-mpnet-base-v2'): + self.deployment_name=deployment_name + self.set_params( params) + self.transformer = transformer + self.sentence_txfr_model = sentence_txfr_model + def fiddlerAuditorCheck(self): + status = importlib.util.find_spec('auditor') + if not status: + subprocess.check_call([sys.executable, ""-m"", ""pip"",""uninstall"", ""-q"",""-y"",""notebook""]) + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""install"",""-q"", ""notebook==6.4.5"" ]) + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""install"",""-q"",""fiddler-auditor==0.0.2""]) + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""install"",""-q"",""notebook==7.0.2""]) + status = importlib.util.find_spec('auditor') + return status + + + + def set_params(self, params={}): + valid_params = ['OPENAI_API_TYPE','OPENAI_API_KEY','OPENAI_API_BASE','OPENAI_API_VERSION'] + for key, value in params.items(): + if 'OPENAI_API_TYPE' == key: + openai.api_type = value + os.environ['OPENAI_API_TYPE'] = openai.api_type + elif 'OPENAI_API_KEY' == key: + openai.api_key = value + os.environ['OPENAI_API_KEY'] = openai.api_key + elif 'OPENAI_API_BASE' == key: + openai.api_base = value + os.environ['OPENAI_API_BASE'] = openai.api_base + elif key in valid_params: + os.environ[key] = value + + def run(self,modelName, temperature, similarity_threshold, perturbations_per_sample, prompts, reference_generation,pre_context=PRE_CONTEXT): + + + if not self.fiddlerAuditorCheck(): + raise ValueError('Fiddler-auditor is not instlled ""python -m pip install fiddler-auditor==0.0.2""') + + openai_llm = AzureOpenAI(deployment_name=self.deployment_name, temperature=temperature, openai_api_key=openai.api_key) + from auditor.perturbations import Paraphrase + from auditor.evaluation.expected_behavior import SimilarGeneration + from auditor.evaluation.evaluate import LLMEval + + + + # For Azure OpenAI, it might be the case the api_version for chat completion + # is different from the base model so we need to set that parameter as well. + if self.transformer: + azure_perturber = self.transformer + else: + azure_perturber = Paraphrase( + model=""GPT-35-Turbo"", + api_version=""2023-03-15-preview"", + num_perturbations=perturbations_per_sample, + ) + sent_xfmer = SentenceTransformer(self.sentence_txfr_model) + similar_generation = SimilarGeneration( + similarity_model=sent_xfmer, + similarity_threshold=similarity_threshold,) + llm_eval = LLMEval( + llm=openai_llm, + expected_behavior=similar_generation, + transformation=azure_perturber,) + test_result = llm_eval.evaluate_prompt_correctness( + prompt=prompts, + pre_context=pre_context, + reference_generation=reference_generation, + perturbations_per_sample=perturbations_per_sample + ) + return test_result + + def runmultiple(self,modelName, temperature, similarity_threshold, perturbations_per_sample, prompts, reference_generation,pre_context=PRE_CONTEXT,faq=faq): + + + if not self.fiddlerAuditorCheck(): + raise ValueError('Fiddler-auditor is not instlled ""python -m pip install fiddler-auditor==0.0.2""') + + from auditor.evaluation.expected_behavior import SimilarGeneration + from auditor.evaluation.evaluate import LLMEval + openai_llm = AzureOpenAI(deployment_name=self.deployment_name, temperature=temperature, openai_api_key=openai.api_key) + from auditor.perturbations import Paraphrase + + # For Azure OpenAI, it might be the case the api_version for chat completion + # is different from the base model so we need to set that parameter as well. + if self.transformer: + azure_perturber = self.transformer + else: + azure_perturber = Paraphrase( + model=""GPT-35-Turbo"", + api_version=""2023-03-15-preview"", + num_perturbations=perturbations_per_sample, + ) + sent_xfmer = SentenceTransformer(self.sentence_txfr_model) + similar_generation = SimilarGeneration( + similarity_model=sent_xfmer, + similarity_threshold=similarity_threshold,) + llm_eval = LLMEval( + llm=openai_llm, + expected_behavior=similar_generation, + transformation=azure_perturber,) + rows = faq.shape[0] + prompts = list(faq['Question']) + listofDf = [] + + for i in range(rows): + test_result = llm_eval.evaluate_prompt_robustness( + prompt=prompts[i], + pre_context=pre_context, + ) + + try: + now = datetime.datetime.now().strftime(""%H%M%S"") + name = str(i)+str(now)+'.html' + test_result.save(name) + df_iter=pd.read_html(name) + df_actual = df_iter[0] + listofDf.append(df_actual) + except: + pass + + perturbatedDF = pd.concat(listofDf) + + return perturbatedDF + + def run_offline_model(self, usecasename,modelName, temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,isfinetuned): + from appbe.compute import readComputeConfig + from appbe.prediction import get_instance + cloud_infra = readComputeConfig() + + dataFile = os.path.join(DATA_FILE_PATH, 'prompt.csv') + remoteFile = os.path.join(remote_data_dir, 'prompt.csv') +" +" if not reference_generation: + reference_generation = '' + prompt = pd.DataFrame([{'prompts':prompts, 'reference_generation':reference_generation}]) + prompt.to_csv(dataFile, index=False) + + + + hypervisor, instanceid, region, image = get_instance(usecasename) + key, url, api_type, api_version = get_llm_data() + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + LLM_DIR = os.path.normpath(os.path.join(currentDirectory, '..', 'llm')) + + if image != '' and image != 'NA': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + from llm.aws_instance_api import start_instance + # print(aws_access_key_id, aws_secret_key, instanceid, region) + status, msg, ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region) + + + if status.lower() == 'success': + + pem_file = os.path.join(LLM_DIR, amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + + # cope file to server for sinfle prompt + from AION.llm.ssh_command import copy_files_to_server + copy_files_to_server(ip,pem_file,dataFile,'',username,'',remote_data_dir,remote_config_dir) + if isfinetuned: + command = prompt_command + ' ' + usecasename + ' ' + str(modelName) \\ + + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\ + + str(perturbations_per_sample) + \\ + ' '+ str(key) + \\ + ' '+ str(url) + \\ + ' '+ str(api_type) + \\ + ' '+ str(api_version)+ \\ + ' '+ str(""single"") + else: + command = prompt_command + ' ' + 'BaseModel' + ' ' + str(modelName) \\ + + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\ + + str(perturbations_per_sample) + \\ + ' '+ str(key) + \\ + ' '+ str(url) + \\ + ' '+ str(api_type) + \\ + ' '+ str(api_version)+ \\ + ' '+ str(""single"") + + + + from llm.ssh_command import run_ssh_cmd + + buf = run_ssh_cmd(ip, pem_file, username, '', '', command) + print(buf) + return buf + def run_multiple_offline_model(self, usecasename,modelName, temperature, similarity_threshold, perturbations_per_sample, faq,isfinetuned): + dataFile = os.path.join(DATA_FILE_PATH, 'prompt.csv') + remoteFile = os.path.join(remote_data_dir, 'prompt.csv') + faq.to_csv(dataFile, index=False) + print(""This is done"") + from appbe.compute import readComputeConfig + from appbe.prediction import get_instance + cloud_infra = readComputeConfig() + hypervisor, instanceid, region, image = get_instance(usecasename) + key, url, api_type, api_version = get_llm_data() + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + LLM_DIR = os.path.normpath(os.path.join(currentDirectory, '..', 'llm')) + + if image != '' and image != 'NA': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + from llm.aws_instance_api import start_instance + # print(aws_access_key_id, aws_secret_key, instanceid, region) + status, msg, ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region) + + + if status.lower() == 'success': + + pem_file = os.path.join(LLM_DIR, amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + + #print(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir) + + from AION.llm.ssh_command import copy_files_to_server + copy_files_to_server(ip,pem_file,dataFile,'',username,'',remote_data_dir,remote_config_dir) + + if isfinetuned: + command = prompt_command + ' ' + usecasename + ' ' + str(modelName) \\ + + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\ + + str(perturbations_per_sample) + \\ + ' '+ str(key) + \\ + ' '+ str(url) + \\ + ' '+ str(api_type) + \\ + ' '+ str(api_version)+ \\ + ' '+ str(""multiple"") + + else: + command = prompt_command + ' ' + 'BaseModel' + ' ' + str(modelName) \\ + + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\ + + str(perturbations_per_sample) + \\ + ' '+ str(key) + \\ + ' '+ str(url) + \\ + ' '+ str(api_type) + \\ + ' '+ str(api_version)+ \\ + ' '+ str(""multiple"") + + + from llm.ssh_command import run_ssh_cmd + + buf = run_ssh_cmd(ip, pem_file, username, '', '', command) + print(buf) + return buf + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import csv +import logging +import pandas as pd + + +class csv_validator: + + def __init__(self): + self.log = logging.getLogger('eion') + + def __text_header(self, filename, threshold=0.75): + df = pd.read_csv(filename, header=None,nrows=1000) + numeric_columns = df.dtypes[df.dtypes != object] + if not len(numeric_columns): + first_row_len = df.iloc[0].str.len() + index = 0 + for c in df: + if (df[c].map(len).mean() * threshold <= first_row_len[index]): + return False + index += 1 + return True + return False + + def validate_header(self, filename,delimiter,textqualifier,threshold=0.75): + with open(filename, 'rt',encoding='utf-8') as csvfile: + has_header = csv.Sniffer().has_header(csvfile.read(8192)) + csvfile.seek(0) + if not has_header: + has_header = self.__text_header(filename, threshold) + reader = csv.reader(csvfile, delimiter=delimiter,quotechar=textqualifier) + good_csv = True + col_len = len(next(reader)) + bad_lines = [] + offset = 2 # +1 for first read and +1 for python index start at 0 + for index, row in enumerate(reader): + if len(row) != col_len: + good_csv = False + if(index == 1 and has_header): + offset += 1 + bad_lines.append(index + offset) + return has_header, good_csv, bad_lines + + +if __name__ == '__main__': + import sys + val = csv_validator() + print(val.validate_header(sys.argv[1])) + import json +import os +import random +import time +from avro.datafile import DataFileReader +from avro.io import DatumReader +from pyarrow.parquet import ParquetFile +from snorkel.labeling.model import LabelModel +from snorkel.labeling import PandasLFApplier, LFAnalysis +import pandas as pd +import pandavro as pdx +import pyarrow as pa +import numpy as np +import platform +from os.path import expanduser + +home = expanduser(""~"") +if platform.system() == 'Windows': + + DATA_FILE_PATH = os.path.join(home,'AppData','Local','Programs','HCLTech','AION','data','storage') +else: + DATA_FILE_PATH = os.path.join(home,'HCLT','AION','data') + + + + +def get_join(condition): + if condition[""join""] == 'and': + return ""&"" + elif condition[""join""] == 'or': + return ""|"" + else: + return """" + + +def create_labelling_function(rule_list, label_list): + lfs_main_func = 'def lfs_list_create():\\n' + lfs_main_func += '\\tfrom snorkel.labeling import labeling_function\\n' + lfs_main_func += '\\timport numpy as np\\n' + lfs_main_func += '\\timport json\\n' + lfs_main_func += '\\tABSTAIN = -1\\n' + lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n' + lfs_list = '\\tlfs_list=[' + for rule in rule_list: + lfs_list += 'lf_' + rule[""rule_name""] + ',' + lfs = '\\t@labeling_function()\\n' + lfs += '\\tdef lf_' + rule[""rule_name""] + '(data):\\n' + lfs += '\\t\\treturn np.where(' + for condition in rule[""conditions""]: + if ""string"" in condition[""sel_datatype""]: + if condition[""sel_condition""] in [""=="", ""!=""]: + cond_statement = '(data[""' + condition[""sel_column""] + '""]' + condition[ + ""sel_condition""] + '(""' + str(condition[""input_value""]) + '""))' + get_join(condition) + else: + cond_statement = '(data[""' + condition[""sel_column""] + '""].' + condition[ + ""sel_condition""] + '(""' + str(condition[""input_value""]) + '""))' + get_join(condition) + else: + cond_statement = '(data[""' + condition[""sel_column""] + '""]' + condition[""sel_condition""] + \\ + str(condition[""input_value""]) + ')' + get_join(condition) + lfs += cond_statement + lfs += ', labels.index(""' + rule[""label""] + '""), ABSTAIN)\\n' + lfs_main_func += lfs + if lfs_list.endswith("",""): + lfs_list = lfs_list.rstrip(lfs_list[-1]) + lfs_list += ']\\n' + else: + lfs_list += ']\\n' + lfs_main_func += lfs_list + lfs_main_func += '\\treturn lfs_list\\n' + lfs_main_func += 'lfs_list_create()' + f = open(os.path.join(DATA_FILE_PATH, 'lfs_list.txt'), 'w') + f.write(lfs_main_func) + f.close() + return lfs_main_func + + +def label_dataset(rule_list, file_ext, label_list, not_satisfy_label): + file_path = os.path.join(DATA_FILE_PATH, ""uploaded_file."" + file_ext) + if file_ext in [""csv"", ""tsv""]: + df = pd.read_csv(file_path) + elif file_ext == ""json"": + df = pd.json_normalize(pd.read_json(file_path).to_dict(""records"")) + elif file_ext == ""avro"": + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + elif file_ext == ""parquet"": + df = pd.read_parquet(file_path, engine=""pyarrow"") + labelling_functions = create_labelling_function(rule_list, label_list) + exec(labelling_functions) + lfs = eval('lfs_list_create()') + applier = PandasLFApplier(lfs) + l_data = applier.apply(df) + label_model = LabelModel(cardinality=len(label_list) + 1, verbose=True) + label_model.fit(l_data, n_epochs=500, log_freq=50, seed=123) + df[""label""] = label_model.predict(L=l_data, tie_break_policy=""abstain"") + df.loc[df[""label""] == -1, ""label""] = not_satisfy_label + for item in label_list: + df.loc[df[""label""] == label_list.index(item), ""label""] = item + if file_ext in [""csv"", ""tsv""]: + df.to_csv(os.path.join(DATA_FILE_PATH, ""result_file."" + file_ext), index=False) + elif file_ext == ""parquet"": + df.to_parquet(os.path.join(DATA_FILE_PATH, ""result_file."" + file_ext), + engine=""pyarrow"", index=False) + elif file_ext == ""avro"": + pdx.to_avro(os.path.join(DATA_FILE_PATH, ""result_file."" + file_ext), df) + else: + raise ValueError(""Invalid file format"") + num_records = len(df.index) + size_take = 100 + if num_records <= size_take: + size_take = num_records + display_df = df.sample(n=size_take) + return display_df.to_html(classes='table table-striped text-left', justify='left', index=False) + + +def create_sample_function(rule, label_list, not_satisfy_label): + lfs_main_func = 'def lf_rule_apply(data" +"):\\n' + lfs_main_func += '\\timport numpy as np\\n' + lfs_main_func += '\\tABSTAIN = -1\\n' + lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n' + lfs = '\\treturn np.where(' + for condition in rule[""conditions""]: + if ""string"" in condition[""sel_datatype""]: + if condition[""sel_condition""] in [""=="", ""!=""]: + cond_statement = '(data[""' + condition[""sel_column""] + '""]' + condition[""sel_condition""] + '(""' + str( + condition[""input_value""]) + '""))' + get_join(condition) + else: + cond_statement = '(data[""' + condition[""sel_column""] + '""].str.' + condition[ + ""sel_condition""] + '(""' + str(condition[""input_value""]) + '""))' + get_join(condition) + print(cond_statement) + else: + cond_statement = '(data[""' + condition[""sel_column""] + '""]' + condition[""sel_condition""] + \\ + str(condition[""input_value""]) + ')' + get_join(condition) + lfs += cond_statement + lfs += ', ""' + rule[""label""] + '"", ""' + not_satisfy_label + '"")\\n' + lfs_main_func += lfs + return lfs_main_func + + +def get_sample_result_of_individual_rule(rule_json, file_ext, label_list, not_satisfy_label): + file_path = os.path.join(DATA_FILE_PATH, ""uploaded_file."" + file_ext) + size_take = 100 + if file_ext in [""csv"", ""tsv""]: + num_records = sum(1 for line in open(file_path)) - 1 + if num_records > size_take: + skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take)) + else: + skip = 0 + df = pd.read_csv(file_path, skiprows=skip) + elif file_path.endswith("".json""): + df = pd.read_json(file_path) + df = pd.json_normalize(df.to_dict(""records"")) + elif file_path.endswith("".avro""): + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + elif file_path.endswith("".parquet""): + pf = ParquetFile(file_path) + take_rows = next(pf.iter_batches(batch_size=size_take)) + df = pa.Table.from_batches([take_rows]).to_pandas() + # file_content = pd.read_parquet(file_path, engine=""pyarrow"") + else: + raise ValueError(""Invalid file format"") + + rule_applier_func = create_sample_function(rule_json, label_list, not_satisfy_label) + exec(rule_applier_func) + df[rule_json[""rule_name""]] = eval('lf_rule_apply')(df) + return df.to_html(classes='table table-striped text-left', justify='left', index=False) + + +def create_sample_function_ver2(rule_json, label_list, not_satisfy_label): + lfs_main_func = 'def lf_rule_apply(data):\\n' + lfs_main_func += '\\timport numpy as np\\n' + lfs_main_func += '\\tABSTAIN = -1\\n' + lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n' + counter = 0 + for condition in rule_json[""conditions""]: + lfs_return = condition[""sel_label""] + if counter > 0: + lfs_return_condition = '\\telif' + else: + lfs_return_condition = '\\tif' + for label_condition in condition[""label_condition""]: + if label_condition[""sel_datatype""] == ""string"": + if label_condition[""sel_condition""] == ""contains"": + lfs_return_condition += '((' + str(label_condition[""input_value""]) + ') in data[""' + \\ + label_condition[""sel_column""] + '""])' + get_join(label_condition) + elif label_condition[""sel_condition""] in [""=="", ""!=""]: + lfs_return_condition += '(data[""' + label_condition[""sel_column""] + '""]' + label_condition[ + ""sel_condition""] + '(""' + str( + label_condition[""input_value""]) + '""))' + get_join(label_condition) + else: + lfs_return_condition += '(data[""' + label_condition[""sel_column""] + '""].' + label_condition[ + ""sel_condition""] + '(""' + str(label_condition[""input_value""]) + '""))' + get_join( + label_condition) + else: + lfs_return_condition += '(data[""' + label_condition[""sel_column""] + '""]' + label_condition[ + ""sel_condition""] + str(label_condition[""input_value""]) + ')' + get_join(label_condition) + if get_join(label_condition) == """": + lfs_return_condition += "":\\n"" + lfs_return_condition += '\\t\\treturn ""' + lfs_return + '""\\n' + lfs_main_func += lfs_return_condition + counter += 1 + lfs_return_condition = '\\n\\telse:\\n' + lfs_return_condition += '\\t\\treturn ""' + not_satisfy_label + '""' + lfs_main_func += lfs_return_condition + return lfs_main_func + + +def get_sample_result_of_individual_rule_ver2(rule_json, file_ext, label_list, not_satisfy_label): + file_path = os.path.join(DATA_FILE_PATH, ""uploaded_file."" + file_ext) + size_take = 100 + if file_ext in [""csv"", ""tsv""]: + num_records = sum(1 for line in open(file_path)) - 1 + if num_records > size_take: + skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take)) + else: + skip = 0 + df = pd.read_csv(file_path, skiprows=skip) + elif file_path.endswith("".json""): + df = pd.read_json(file_path) + df = pd.json_normalize(df.to_dict(""records"")) + elif file_path.endswith("".avro""): + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + elif file_path.endswith("".parquet""): + pf = ParquetFile(file_path) + take_rows = next(pf.iter_batches(batch_size=size_take)) + df = pa.Table.from_batches([take_rows]).to_pandas() + # file_content = pd.read_parquet(file_path, engine=""pyarrow"") + else: + raise ValueError(""Invalid file format"") + + rule_applier_func = create_sample_function_ver2(rule_json, label_list, not_satisfy_label) + exec(rule_applier_func) + df[rule_json[""rule_name""]] = df.apply(eval('lf_rule_apply'), axis=1) + return df.to_html(classes='table table-striped text-left', justify='left', index=False) + + +def create_labelling_function_ver2(rule_list, label_list): + lfs_main_func = 'def lfs_list_create():\\n' + lfs_main_func += '\\tfrom snorkel.labeling import labeling_function\\n' + lfs_main_func += '\\timport numpy as np\\n' + lfs_main_func += '\\timport json\\n' + lfs_main_func += '\\tABSTAIN = -1\\n' + lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n' + lfs_list = '\\tlfs_list=[' + for rule in rule_list: + lfs_list += 'lf_' + rule[""rule_name""] + ',' + lfs = '\\t@labeling_function()\\n' + lfs += '\\tdef lf_' + rule[""rule_name""] + '(data):\\n' + counter = 0 + for condition in rule[""conditions""]: + lfs_return = 'labels.index(""' + condition[""sel_label""] + '"")' + if counter > 0: + lfs_return_condition = '\\t\\telif' + else: + lfs_return_condition = '\\t\\tif' + for label_condition in condition[""label_condition""]: + if label_condition[""sel_datatype""] == ""string"": + if label_condition[""sel_condition""] == ""contains"": + lfs_return_condition += '((' + str(label_condition[""input_value""]) + ') in data[""' + \\ + label_condition[""sel_column""] + '""])' + get_join(label_condition) + elif label_condition[""sel_condition""] in [""=="", ""!=""]: + lfs_return_condition += '(data[""' + label_condition[""sel_column""] + '""]' + label_condition[ + ""sel_condition""] + '(""' + str( + label_condition[""input_value""]) + '""))' + get_join(label_condition) + else: + lfs_return_condition += '(data[""' + label_condition[""sel_column""] + '""].' + label_condition[ + ""sel_condition""] + '(""' + str(label_condition[""input_value""]) + '""))' + get_join( + label_condition) + else: + lfs_return_condition += '(data[""' + label_condition[""sel_column""] + '""]' + label_condition[ + ""sel_condition""] + str(label_condition[""input_value""]) + ')' + get_join(label_condition) + if get_join(label_condition) == """": + lfs_return_condition += "":\\n"" + lfs_return_condition += '\\t\\t\\treturn ' + lfs_return + '\\n' + lfs += lfs_return_condition + counter += 1 + lfs_return_condition = '\\n\\t\\telse:\\n' + lfs_return_condition += '\\t\\t\\treturn ABSTAIN\\n' + lfs += lfs_return_condition + lfs_main_func += lfs + if lfs_list.endswith("",""): + lfs_list = lfs_list.rstrip(lfs_list[-1]) + lfs_list += ']\\n' + else: + lfs_list += ']\\n' + lfs_main_func += lfs_list + lfs_main_func += '\\treturn lfs_list\\n' + lfs_main_func += 'lfs_list_create()' + # f = open(os.path.join(DATA_FILE_PATH, 'lfs_list.txt'), 'w') + # f.write(lfs_main_func) + # f.close() + return lfs_main_func + + +def get_rule_name_list(rule_list): + rule_name_list = [] + for rule in rule_list: + rule_name_list.append(rule[""rule_name""]) + return rule_name_list + + +def label_dataset_ver2(request,rule_list, file_ext, label_list, not_satisfy_label, label_weightage, include_proba): + file_path = os.path.join(DATA_FILE_PATH, ""uploaded_file."" + file_ext) + if file_ext in [""csv"", ""tsv""]: + df = pd.read_csv(file_path) + elif file_ext == ""json"": + df = pd.json_normalize(pd.read_json(file_path).to_dict(""records"")) + elif file_ext == ""avro"": + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + elif file_ext == ""parquet"": + df = pd.read_parquet(file_path, engine=""pyarrow"") + labelling_functions = create_labelling_function_ver2(rule_list, label_list) + exec(labelling_functions) + lfs = eval('lfs_list_create()') + applier = PandasLFApplier(lfs) + l_data = applier.apply(df) + label_model = LabelModel(cardinality=len(label_list), verbose=True) + label_model.fit(l_data, n_epochs=500, log_freq=50, seed=123, class_balance=label_weightage) + df[""label""] = label_model.predict(L=l_data, tie_break_policy=""abstain"") + if include_proba: + prediction_of_prob = label_model.predict_proba(L=l_data) + for label in label_list: + df[label + ""_prob""] = np.around(prediction_of_prob[:, label_list.index(label)], 2) * 100 + df.loc[df[""label""] == -1, ""label""] = not_satisfy_label + + + filetimestamp = str(int(time.time())) + datasetName = ""AION_labelled_""+filetimestamp + '.' + file_ext + request.session['AION_labelled_Dataset'] = datasetName + for item in label_list: + df.loc[df[""label""] == label_list.index(item), ""label""] = item + if file_ext in [""csv"", ""tsv""]: + df.to_csv(os.path.join(DATA_FILE_PATH, datasetName), index=False) + elif file_ext == ""parquet"": + df.to_parquet(os.path.join(DATA_FILE_PATH, datasetName), + engine=""pyarrow"", index=False) + elif file_ext == ""avro"": + pdx.to_avro(os.path.join(DATA_FILE_PATH, datasetName), df) + else: + raise ValueError(""Invalid file format"") + + #### saving file to database + from appbe.dataPath import DATA_DIR + from appbe.sqliteUtility import sqlite_db + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + newdata = {} + newdata['datapath'] = [os.path.join(DATA_FILE_PATH, datasetName)] + newdata['datasetname'] = [datasetName] + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata), 'dataingest') + + + num_records = len(df.index) + size_take = 100 + if num_records <= size_take: +" +" size_take = num_records + display_df = df.sample(n=size_take) + weightage = np.around(label_model.get_weights(), 2) + rule_name_list = get_rule_name_list(rule_list) + analysis_df = LFAnalysis(l_data, lfs).lf_summary() + analysis_df[""Rule""] = analysis_df.index + analysis_df[""Rule""] = analysis_df[""Rule""].str.replace(""lf_"", """") + analysis_df = analysis_df[[""Rule"", ""Polarity"", ""Coverage"", ""Overlaps"", ""Conflicts""]] + weightage_dict = dict(zip(rule_name_list, weightage)) + analysis_json = analysis_df.to_dict(orient=""records"") + for item in analysis_json: + item[""Weightage""] = weightage_dict[item[""Rule""]] + analysis_df = pd.json_normalize(analysis_json) + # rules_weightage = [] + # for key in weightage_dict: + # rules_weightage.append({ + # ""label"": key, + # ""y"": weightage_dict[key], + # ""legendText"": key + # }) + response = { + # ""rule_name_list"": rule_name_list, + # ""weightage_list"": list(weightage), + ""analysis_df"": analysis_df.to_html(classes='table table-striped text-left', justify='left', index=False), + ""result_html"": display_df.to_html(classes='table table-striped text-left', justify='left', index=False) + } + return response + + +def get_label_and_weightage(test_file_ext, marked_label_column,file_delim_test, custom_test_delim ): + file_path = os.path.join(DATA_FILE_PATH, ""test_data_file."" + test_file_ext) + if test_file_ext in [""csv"", ""tsv""]: + df = pd.read_csv(file_path) + elif test_file_ext == ""json"": + df = pd.json_normalize(pd.read_json(file_path).to_dict(""records"")) + elif test_file_ext == ""avro"": + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + elif test_file_ext == ""parquet"": + df = pd.read_parquet(file_path, engine=""pyarrow"") + json_df = pd.DataFrame(df[marked_label_column].value_counts(normalize=True) * 100) + json_dict = json.loads(json_df.to_json()) + label_with_weightage = [] + for k in json_dict[marked_label_column]: + label_with_weightage.append( + {""label_name"": k, ""label_weightage"": np.around(json_dict[marked_label_column][k], 2)}) + return label_with_weightage + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import time +from pathlib import Path +import logging +from datetime import datetime as dt + + +class logg(): + from appbe.dataPath import LOG_LOCATION + def __init__(self, LOG_LOCATION): + self.log_location = LOG_LOCATION + + def create_log(self,version): + log_file_path = Path(self.log_location) + log_file_path.mkdir(parents=True, exist_ok=True) + time_stamp = dt.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S') + fileName='log_ux_'+time_stamp+'.log' + filehandler = logging.FileHandler(log_file_path/fileName, 'a','utf-8') + formatter = logging.Formatter('%(asctime)s %(message)s') + filehandler.setFormatter(formatter) + log = logging.getLogger('log_ux') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + log.info('********** AION_'+str(version)+' **********') + return log ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +def read_service_url_params(request): + hosturl =request.get_host() + url='http://'+hosturl+'/api/' + return url + +def read_monitoring_service_url_params(request): + hosturl =request.get_host() + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) + file = open(file_path, ""r"") + data = file.read() + file.close() + service_url = '127.0.0.1' + service_port='60050' + for line in data.splitlines(): + if 'aion_service_url=' in line: + service_url= line.split('=',1)[1] + if 'aion_service_port=' in line: + service_port= line.split('=',1)[1] + url='http://'+hosturl+'/api/' + return url + +def read_performance_service_url_params(request): + hosturl =request.get_host() + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) + file = open(file_path, ""r"") + data = file.read() + file.close() + service_url = '127.0.0.1' + service_port='60050' + for line in data.splitlines(): + if 'aion_service_url=' in line: + service_url= line.split('=',1)[1] + if 'aion_service_port=' in line: + service_port= line.split('=',1)[1] + url='http://'+hosturl+'/api/' + return url + +def read_pattern_anomaly_url_params(request): + hosturl =request.get_host() + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) + file = open(file_path, ""r"") + data = file.read() + file.close() + service_url = '127.0.0.1' + service_port='60050' + for line in data.splitlines(): + if 'aion_service_url=' in line: + service_url= line.split('=',1)[1] + if 'aion_service_port=' in line: + service_port= line.split('=',1)[1] + url='http://'+hosturl+'/api/pattern_anomaly_predict/' + return url + +def read_pattern_anomaly_setting_url_params(request): + hosturl =request.get_host() + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) + file = open(file_path, ""r"") + data = file.read() + file.close() + service_url = '127.0.0.1' + service_port='60050' + for line in data.splitlines(): + if 'aion_service_url=' in line: + service_url= line.split('=',1)[1] + if 'aion_service_port=' in line: + service_port= line.split('=',1)[1] + url='http://'+hosturl+'/api/pattern_anomaly_settings/' + return url ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import shutil +import subprocess +import sys +import glob +from pathlib import Path +import json +from django.http import FileResponse +from django.http import HttpResponse +from importlib.metadata import version +COMMON_PACKAGES = ""'setuptools >=62.3.0','pandas==1.5.3','numpy==1.24.2','joblib==1.2.0','Cython==0.29.33','scipy==1.10.1',' scikit-learn==1.2.1','word2number==1.1','category_encoders==2.6.0'"" +DL_COMMON_PACKAGE = ""'tensorflow==2.11.0'"" +TEXT_PACKAGES = ""'spacy==3.5.0','nltk==3.8.1','textblob==0.15.3','demoji==1.1.0','bs4==0.0.1','text-unidecode==1.3','pyspellchecker==0.6.2','contractions==0.1.73','protobuf==3.19.6','lxml'"" + + +def createPackagePackage(request,id,version,usecasedetails,Existusecases): + from appbe.pages import get_usecase_page + #print('2') + usecasedetail = usecasedetails.objects.get(id=id) + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version) + modelid = models[0].id + p = Existusecases.objects.get(id=modelid) + deploymentfolder = str(p.DeployPath) + modelname = p.ModelName.usecaseid + version = p.Version + deployed_code = 'AION' + dockerimage = os.path.join(deploymentfolder,'publish','docker_image') + dockersetup = os.path.join(deploymentfolder,'publish','docker_setup') + tempPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'temp_'+modelname+'_'+str(version)) + try: + shutil.rmtree(tempPath,ignore_errors=True) + except: + pass + + shutil.copytree(deploymentfolder,tempPath) + shutil.rmtree(os.path.join(tempPath,'publish'), ignore_errors=True) + try: + Path(os.path.join(deploymentfolder,'publish')).mkdir(parents=True, exist_ok=True) + os.mkdir(dockersetup) + except: + shutil.rmtree(dockersetup,ignore_errors=True) + os.mkdir(dockersetup) + try: + os.mkdir(dockerimage) + except: + shutil.rmtree(dockerimage,ignore_errors=True) + os.mkdir(dockerimage) + shutil.copytree(tempPath, os.path.join(dockersetup,deployed_code)) + shutil.rmtree(tempPath) + + docker_setup = os.path.join(dockersetup,'AION') + + try: + os.mkdir(dockerimage) + except: + pass + requirementfilename = os.path.join(dockersetup,'requirements.txt') + installfilename = os.path.join(dockersetup,'install.py') + dockerfile = os.path.join(dockersetup,'Dockerfile') + dockerdata='FROM python:3.10-slim-buster' + dockerdata+='\\n' + dockerdata+='WORKDIR /app' + dockerdata+='\\n' + dockerdata+='COPY AION AION' + dockerdata+='\\n' + dockerdata+='''RUN apt-get update \\ + && apt-get install -y build-essential manpages-dev \\ + && apt-get install -y libgomp1 \\ + && python -m pip install --no-cache-dir -r AION/requirements.txt +''' + f = open(dockerfile, ""w"") + f.write(str(dockerdata)) + f.close() + try: + try: + import docker + client = docker.from_env() + client.containers.list() + except: + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = 'Error' + context['Msg'] = 'Docker should be installed and running on your machine. To build the docker image manually, the setup script is available at the following location: \\\\n'+dockersetup.replace('\\\\', '/') + return context + command = 'docker pull python:3.10-slim-buster' + os.system(command); + subprocess.check_call([""docker"", ""build"", ""-t"",modelname.lower()+"":""+str(version),"".""], cwd=dockersetup) + subprocess.check_call([""docker"", ""save"", ""-o"",modelname.lower()+""_""+str(version)+"".tar"",modelname.lower()+"":""+str(version)], cwd=dockersetup) + dockerfilepath = os.path.join(dockersetup,modelname.lower()+""_""+str(version)+"".tar"") + shutil.copyfile(dockerfilepath, os.path.join(dockerimage,modelname.lower()+""_""+str(version)+"".tar"")) + shutil.rmtree(dockersetup) + msg = 'Done' + Status = 'SUCCESS' + except Exception as e: + msg = 'Error in docker images creation. To build manually docker image setup available in following location: '+dockersetup.replace('\\\\', '\\\\\\\\') + Status = 'Fail' + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = Status + context['Msg'] = msg + return context + +def downloadPackage(request,id,version,usecasedetails,Existusecases): + try: + if 'downloadstatus' in request.session: + if request.session['downloadstatus'] == 'Downloading': + return HttpResponse(json.dumps(""Error Creating Package""), content_type=""application/error"") + request.session['downloadstatus'] = 'Downloading' + usecasedetail = usecasedetails.objects.get(id=id) + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='" +"SUCCESS',Version=version) + modelid = models[0].id + p = Existusecases.objects.get(id=modelid) + deployPath = str(p.DeployPath) + if os.path.isdir(os.path.join(deployPath,'publish','package')): + for f in os.listdir(os.path.join(deployPath,'publish','package')): + if f.endswith('whl'): + os.remove(os.path.join(deployPath,'publish','package',f)) + usecasename = p.ModelName.usecaseid + Version = p.Version + deployed_code = usecasename + targetname = usecasename+'_'+str(Version) + whl_dir_name = 'WHEEL_'+usecasename+'_'+str(Version) + deployLocation = os.path.join (deployPath,'..',whl_dir_name) + try: + os.makedirs(deployLocation) + except OSError as e: + shutil.rmtree(deployLocation) + os.makedirs(deployLocation) + shutil.copytree(deployPath,os.path.join(deployLocation,deployed_code)) + initstring = 'import os' + initstring += '\\n' + initstring += 'import sys' + initstring += '\\n' + initstring += 'sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))' + filename = os.path.join(deployLocation,deployed_code,'__init__.py') + f = open(filename, ""w"") + f.write(str(initstring)) + f.close() + textdata=0 + learner_type = 'ml' + requirementfile = os.path.join(deployPath,'requirements.txt') + install_requires = '' + if os.path.exists(requirementfile): + fileobj = open(requirementfile, 'r') + requirePackages = fileobj.readlines() + fileobj.close() + for package in requirePackages: + if install_requires != '': + install_requires = install_requires+',' + install_requires = install_requires+'\\''+package.strip()+'\\'' + setup_string = 'from setuptools import setup,find_packages' + setup_string += '\\n' + setup_string += 'setup(name=\\''+deployed_code+'\\',' + setup_string += '\\n' + setup_string += 'version=\\'1\\',' + setup_string += '\\n' + setup_string += 'packages = find_packages(),' + setup_string += '\\n' + setup_string += 'install_requires = ['+install_requires+'],' + setup_string += '\\n' + setup_string += 'package_data={""'+deployed_code+'.pytransform"":[""*.*""],""'+deployed_code+'"":[""*.sav"",""*.json""],"""":[""*"",""*/*"",""*/*/*""]}' + setup_string += '\\n' + setup_string += ')' + filename = os.path.join(deployLocation,'setup.py') + f = open(filename, ""w"") + f.write(str(setup_string)) + f.close() + + subprocess.check_call([sys.executable, ""setup.py"", ""bdist_wheel""], cwd=deployLocation) + shutil.copytree(os.path.join(deployLocation,'dist'),os.path.join(deployPath,'publish','package'),dirs_exist_ok=True) + shutil.rmtree(deployLocation) + if os.path.isdir(os.path.join(deployPath,'publish','package')): + for f in os.listdir(os.path.join(deployPath,'publish','package')): + if f.endswith('whl'): + package = f + zip_file = open(os.path.join(deployPath,'publish','package',package), 'rb') + request.session['downloadstatus'] = 'Done' + return FileResponse(zip_file) + except Exception as e: + print(e) + request.session['downloadstatus'] = 'Done' + return HttpResponse(json.dumps(""Error Creating Package""), content_type=""application/error"") +def installPackage(model,version,deployedPath): + deployedPath = os.path.join(deployedPath,'publish','package') + whlfilename='na' + if os.path.isdir(deployedPath): + for file in os.listdir(deployedPath): + if file.endswith("".whl""): + whlfilename = os.path.join(deployedPath,file) + if whlfilename != 'na': + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""uninstall"",""-y"",model]) + subprocess.check_call([sys.executable, ""-m"", ""pip"", ""install"",""--no-dependencies"",whlfilename]) + status,pid,ip,port = checkModelServiceRunning(model) + if status == 'Running': + stopService(pid) + startService(model,ip,port) + return('Success') + else: + return('Installation Package not Found') +def getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases): + usecasedetail = usecasedetails.objects.get(id=id) + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version) + return(models[0].id) + +def stopService(pid): + import psutil + p = psutil.Process(int(pid)) + p.terminate() + +def checkModelServiceRunning(package_name): + from os.path import expanduser + home = expanduser(""~"") + if platform.system() == 'Windows': + modelServices = os.path.join(home,'AppData','Local','HCLT','AION','services') + else: + modelServices = os.path.join(home,'HCLT','AION','target','services') + filename = package_name+'_service.py' + modelservicefile = os.path.join(modelServices,filename) + status = 'Not Initialized' + ip = '' + port = '' + pid = '' + if os.path.exists(modelservicefile): + status = 'Not Running' + import psutil + for proc in psutil.process_iter(): + pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections']) + if 'python' in pinfo['name']: + if filename in pinfo['cmdline'][1]: + status = 'Running' + pid = pinfo['pid'] + for x in pinfo['connections']: + ip = x.laddr.ip + port = x.laddr.port + + + return(status,pid,ip,port) +def startService(package_name,ip,portNo): + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','bin','model_service.py')) + from os.path import expanduser + home = expanduser(""~"") + if platform.system() == 'Windows': + modelServices = os.path.join(home,'AppData','Local','HCLT','AION','services') + else: + modelServices = os.path.join(home,'HCLT','AION','target','services') + if not os.path.isdir(modelServices): + os.makedirs(modelServices) + filename = package_name+'_service.py' + modelservicefile = os.path.join(modelServices,filename) + status = 'File Not Exist' + if os.path.exists(modelservicefile): + status = 'File Exist' + r = ([line.split() for line in subprocess.check_output(""tasklist"").splitlines()]) + for i in range(len(r)): + if filename in r[i]: + status = 'Running' + if status == 'File Not Exist': + shutil.copy(file_path,modelservicefile) + with open(modelservicefile, 'r+') as file: + content = file.read() + file.seek(0, 0) + line = 'from '+package_name+' import aion_performance' + file.write(line+""\\n"") + line = 'from '+package_name+' import aion_drift' + file.write(line+ ""\\n"") + line = 'from '+package_name+' import featureslist' + file.write(line+ ""\\n"") + line = 'from '+package_name+' import aion_prediction' + file.write(line+ ""\\n"") + file.write(content) + file.close() + status = 'File Exist' + if status == 'File Exist': + command = ""python ""+modelservicefile+' '+str(portNo)+' '+str(ip) + os.system('start cmd /c ""'+command+'""') + + +def checkInstalledPackge(package_name): + import importlib.util + spec = importlib.util.find_spec(package_name) + if spec is None: + return('Not Installed','','') + else: + if len(spec.submodule_search_locations) > 0: + displaypath = os.path.join(spec.submodule_search_locations[0],'etc','display.json') + with open(displaypath) as file: + config = json.load(file) + file.close() + if 'usecasename' in config: + modelName = config['usecasename'] + else: + modelName = 'NA' + + if 'version' in config: + version = config['version'] + else: + version = 'NA' + return('Installed',modelName,version) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from appbe.data_io import sqlite_db +from os.path import expanduser +import platform +import pandas as pd +import os +from appbe.dataPath import DATA_DIR +PUBLISH_PATH = os.path.join(DATA_DIR,'publish') +DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite') + +def chech_publish_info(usecasename): + version = 0 + status = 'Not Published' + inputDriftStatus = 'No Drift' + MODEL_DEPLOY_DATABASE_PATH = os.path.join(PUBLISH_PATH,usecasename,'database') + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + if sqlite_dbObj.table_exists('publish'): + data = sqlite_dbObj.read('publish',""usecase = '""+usecasename+""' and status = 'Published'"") + if data.shape[0] > 0: + model_sqlite_dbObj = sqlite_db(MODEL_DEPLOY_DATABASE_PATH,'deploy.db') + version = data['version'].iloc[0] + status = 'Published' + if model_sqlite_dbObj.table_exists('monitoring'): + data = model_sqlite_dbObj.read('monitoring',""version = '""+str(version)+""'"") + if data.shape[0] > 0: + msg = data['Msg'].iloc[-1] + if 'Affected Columns' in msg: + inputDriftStatus = 'Input Drift Found' + return version,status,inputDriftStatus + +def check_input_data(usecasename): + MODEL_DEPLOY_DATABASE_PATH = os.path.join(PUBLISH_PATH,usecasename,'database') + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + data = pd.DataFrame() + if sqlite_dbObj.table_exists('publish'): + dataa = sqlite_dbObj.read('publish',""usecase = '""+usecasename+""' and status = 'Published'"") + if dataa.shape[0] > 0: + modelsqlite_dbObj = sqlite_db(MODEL_DEPLOY_DATABASE_PATH,'deploy.db') + if modelsqlite_dbObj.table_exists('prodData'): + data = modelsqlite_dbObj.read('prodData') + return data + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +import os +import pandas as pd +import numpy as np +import subprocess +import sys +import re +import plotly.graph_objects as go +import plotly.figure_factory as ff +def global_explain(request): + try: + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + problemType = 'classification' + for key in configSettingsJson['basic']['analysisType']: + if configSettingsJson['basic']['analysisType'][key] == 'True': + problemType = key + break + if problemType.lower() != 'classification' and problemType.lower() != 'regression': + return 'Problem Type Error','Explainable AI only available for classification and regression problem','NA','NA','NA','NA',0,0,'NA','NA','NA','NA',0,'NA','NA',0,'NA','NA','NA','NA','NA','NA' + + displaypath = os.path.join( request.session['deploypath'],'etc','display.json') + with open(displaypath) as file: + config = json.load(file) + file.close() + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeatures = inputFeatures.split(',') + if targetFeature in inputFeatures: + inputFeatures.remove(targetFeature) + + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + from utils.file_ops import read_df_compressed + status,df = read_df_compressed(config['postprocessedData'],encoding='utf8',nrows=10) + #print(df) + df.rename(columns=lambda x: x.strip(), inplace=True) + df = df[inputFeatures] + #print(df) + singleInstanceData = df.loc[5, inputFeatures] + inputFieldsDict = singleInstanceData.to_dict() + inputFields = [] + inputFields.append(inputFieldsDict) + + if 'nrows' in config: + nrows = config['nrows'] + else: + nrows = 'Not Available' + + if 'ncols' in config: + ncols = config['ncols'] + else: + ncols = 'Not Available' + + if 'targetFeature' in config: + targetFeature = config['targetFeature'] + else: + targetFeature = '' + + labelMaps = config['labelMaps'] + modelfeatures = config['modelFeatures'] + mfcount = len(modelfeatures) + df_pro" +"processed = pd.read_csv(dataFilePath) + if 'targetFeature' != '': + target_classes = df_proprocessed[targetFeature].unique() + numberofclasses = len(target_classes) + else: + target_classes = [] + numberofclasses = 'Not Available' + dataPoints = df_proprocessed.shape[0] + df_proprocessed = df_proprocessed.head(5) + df_proprocessed = df_proprocessed.to_json(orient=""records"") + df_proprocessed = json.loads(df_proprocessed) + expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py') + outputStr = subprocess.check_output([sys.executable,expainableAIPath,'global']) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + ale_json = json.loads(str(outputStr)) + ale_json = ale_json['data'] + ale_view = ale_json['data'] + sentences = ale_json['sentences'] + scoreMessage = '' + feature_importance = ale_json['feature_importance'] + dfimp = pd.DataFrame.from_dict(feature_importance) + dfimp = dfimp.sort_values(by=['values'],ascending=False).reset_index() + yaxis_data = dfimp['values'].tolist() + xaxis_data = dfimp['labels'].tolist() + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data,y=yaxis_data,name='Feature Importance')) + cfig.update_layout(barmode='stack',xaxis_title='Features') + bargraph = cfig.to_html(full_html=False, default_height=450,default_width=1000) + dftoprecords = dfimp.head(2) + topTwoFeatures = dfimp['labels'].tolist() + topFeaturesMsg = [] + for i in range(0,len(dfimp)): + value = round(dfimp.loc[i, ""values""],2)*100 + value = round(value,2) + tvalue = str(dfimp.loc[i, ""labels""])+' contributing to '+ str(value)+'%' + topFeaturesMsg.append(tvalue) + most_influencedfeature = ale_json['most_influencedfeature'] + interceppoint = ale_json['interceptionpoint'] + anchorjson = ale_json['anchorjson'] + return 'Success','Success',ale_view,sentences,bargraph,inputFields,nrows,ncols,targetFeature,dataPoints,target_classes,df_proprocessed,numberofclasses,modelfeatures,problemType,mfcount,topTwoFeatures,topFeaturesMsg,most_influencedfeature,interceppoint,anchorjson,labelMaps + except Exception as Inst: + print(Inst) + return 'Error','Exception: '+str(Inst),'NA','NA','NA','NA',0,0,'NA','NA','NA','NA',0,'NA','NA',0,'NA','NA','NA','NA','NA','NA' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json + +import pandas as pd + + +def get_true_option(d, default_value=None): + if isinstance(d, dict): + for k, v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + +def get_true_options(d): + options = [] + if isinstance(d, dict): + for k, v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + options.append(k) + return options + +def check_datetime(config): + dateTime = config['basic']['dateTimeFeature'] + if dateTime == '' or dateTime.lower()=='na': + return False + return True + +def check_dtype(d): + flag= 1 + for item in d: + if item[""type""].lower() != ""text"" and item[""type""].lower() != ""index"": + flag = 0 + break + return flag + +def check_text(d): #task 12627 + flag= 0 + for item in d: + if item[""type""].lower() == ""text"": + flag = 1 + break + return flag + +def check_labelencoding(ftr_dict_list, target_ftr): + for ftr_dict in ftr_dict_list: + if ftr_dict['feature']!=target_ftr and ftr_dict['type'].lower()=='categorical' and ftr_dict['categoryEncoding'].lower()!='labelencoding': + return False + return True + +class timeseries(): + + def __init__(self,config): + self.config=config + if self.config['basic']['analysisType']['timeSeriesForecasting'].lower()=='true': #task 11997 + self.problemType = 'timeSeriesForecasting' + elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true': + self.problemType = 'timeSeriesAnomalyDetection' #task 11997 + + def validate_basic_config(self,status='pass',msg=None): + #task 12627 + date_time_status = check_datetime(self.config) + text_status = check_text(self.config['advance']['profiler']['featureDict']) + if not date_time_status and text_status: + msg = 'For time series problem,\\\\n* One feature should be in datetime format\\\\n* Text feature not supported ' + return 'error', msg + elif not date_time_status: + msg = 'For time series problem, one feature should be in datetime format' + return 'error', msg + elif text_status: + msg = 'For time series problem, text feature not supported ' + return 'error', msg + selected_algos = get_true_options(self.config['basic']['algorithms'][self.problemType]) #task 11997 + if isinstance(self.config['basic']['targetFeature'],str): + targetFeature = list(self.config['basic']['targetFeature'].split(',')) + if self.problemType=='timeSeriesForecasting': #task 11997 + if len(targetFeature) > 1: + if 'ARIMA' in selected_algos: + status = 'error' + msg = ""ARIMA is not supported for multilabel (target) feature"" + return status, msg + if ""FBPROPHET"" in selected_algos: + status = 'error' + msg = ""FBPROPHET is not supported for multiLabel (target) feature"" + return status, msg + if 'MLP' in selected_algos: + status = 'error' + msg = ""MLP is not supported for multiLabel (target) feature"" + return status, msg + if len(targetFeature) == 1 and 'VAR' in selected_algos: + status = 'error' + msg = ""VAR is not supported for singleLabel (target) feature"" + return status, msg + elif self.problemType=='timeSeriesAnomalyDetection': #task 11997 + anomChecker = anomaly(self.config) + status, msg = anomChecker.validate_basic_config() + return status, msg + + +class anomaly(): + + def __init__(self,config): + self.config = config + if self.config['basic']['analysisType']['anomalyDetection'].lower()=='true': #task 11997 + self.problemType = 'anomalyDetection' + elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true': #task 11997 + self.problemType = 'timeSeriesAnomalyDetection' + + def validate_basic_config(self,status='pass',msg=None): + #task 12627 + date_time_status = check_datetime(self.config) + targetFeature = self.config['basic']['targetFeature'] + if self.problemType=='anomalyDetection' and date_time_status: + status = 'error' + msg = 'Date feature detected. For anomaly detection on time series change problem type to Time Series Anomaly Detection or drop Date feature' + return status, msg + if targetFeature.lower()!= 'na' and targetFeature!= """" and self.config['basic']['inlierLabels'] == '': + status = 'error' + msg = 'Please provide inlier label in case of supervised anomaly detection' + return status, msg + +class survival(): + + def __init__(self,config): + self.config = config + self.problemType= 'survivalAnalysis' + + def validate_basic_config(self): + dateTimeStatus = check_datetime(self.config) + labelencoding_status = check_labelencoding(self.config['advance']['profiler']['featureDict'], self.config['basic']['targetFeature']) + if not dateTimeStatus and not labelencoding_status: + msg = 'For survival analysis problem,\\\\n* One feature should be in datetime format\\\\n* Encoding of categorical features should be of label encoding ' + return 'error', msg + elif not dateTimeStatus: + msg = 'One feature should be in datetime format for survival analysis problem. Please select it from model feature' + return 'error', msg + elif not labelencoding_status: + msg = 'Categorical features are expected to be label encoded for survival analysis problem. Please select it from feature encoding' + return 'error', msg + else: + return 'pass', "" "" + +class associationrule(): + + def __init__(self,config): + self.config=config + + def validate_basic_config(self,status='pass', msg=None): + if self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == 'na' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == 'na': + return ""error"",""Make sure to configure invoice feature and item feature"" + elif self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] == self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']: + return ""error"",""Make sure to invoice feature and item feature is configure correctly"" + else: + return ""pass"", "" "" + +class itemrating(): #task 6081 + + def __init__(self,config): + self.config = config + + def validate_basic_config(self): + data_loc = self.config['basic']['dataLocation'] + data_length = len(pd.read_csv(data_loc)) + if data_length >= 1000000: + return 'error', ""Recommender System can handle data up to 1 million records. Please try with a smaller dataset."" + else: + return ""pass"","" "" + + +class documentsimilarity(): + + def __init__(self,config): + self.config=config + + def validate_basic_config(self,status='pass', msg=None): + flag = check_dtype(self.config['advance']['profiler']['featureDict']) + if flag == 1: + return ""pass"", "" "" + else: + msg=""Make sure to change the feature type from Categorical to Text and drop Numerical features for document similarity"" + return ""error"", msg + + + + +def validate(config): + try: + problem_type = get_true_option(config['basic']['analysisType']) + status = 'pass' + msg = '' + if 'timeseries' in problem_type.lower(): #task 11997 + obj = timeseries(config) + elif problem_type.lower() == 'survivalanalysis': + obj = survival(config) + elif problem_type.lower() == 'anomalydetection': + obj = anomaly(config) + elif problem_type.lower() in ['similarityidentification','contextualsearch']: + obj = documentsimilarity(config) + elif problem_type.lower() == 'recommendersystem': + if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'].lower() == 'true': + obj = associationrule(config) + elif config['basic']['algorithms']['recommenderSystem']['ItemRating'].lower() == 'true': #task 6081 + obj = itemrating(config) + else: + return 'pass',"""" + else: + return 'pass',"""" + status,msg= obj.validate_basic_config() + print(status, msg, 'io') + return(status,msg) + except Exception as e: + print(e) + + +def start_check(config): + return validate(config) + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import os +import sys +import scipy.stats as st + +def DistributionFinder(data): + try: + distributionName = """" + sse = 0.0 + KStestStatic = 0.0 + dataType = """" + if (data.dtype == ""float64""): + dataType = ""Continuous"" + elif (data.dtype == ""int""): + dataType = ""Discrete"" + elif (data.dtype == ""int64""): + dataType = ""Discrete"" + if (dataType == ""Discrete""): + distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] + index, counts = np.unique(data.astype(int), return_counts=True) + + if (len(index) >= " +"2): + best_sse = np.inf + y1 = [] + total = sum(counts) + mean = float(sum(index * counts)) / total + variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) + dispersion = mean / float(variance) + theta = 1 / float(dispersion) + r = mean * (float(theta) / 1 - theta) + + for j in counts: + y1.append(float(j) / total) + + pmf1 = st.bernoulli.pmf(index, mean) + pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) + pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) + pmf4 = st.nbinom.pmf(index, mean, r) + pmf5 = st.poisson.pmf(index, mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1 - pmf5, 2.0)) + + sselist = [sse1, sse2, sse3, sse4, sse5] + best_distribution = 'NA' + for i in range(0, len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName = best_distribution + sse = best_sse + + elif (dataType == ""Continuous""): + + distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, + st.gamma, st.beta] + best_distribution = st.norm.name + best_sse = np.inf + datamin = data.min() + datamax = data.max() + nrange = datamax - datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + params = distribution.fit(data.astype(float)) + arg = params[:-2] + loc = params[-2] + scale = params[-1] + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if (best_sse > sse > 0): + best_distribution = distribution.name + best_sse = sse + distributionName = best_distribution + sse = best_sse + except: + response = str(sys.exc_info()[0]) + message = 'Job has Failed' + response + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + print(message) + return distributionName, sse ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +import os +import rsa +import boto3 #usnish +import pandas as pd +import time +def add_new_bucket(request): + + try: + + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + + if request.POST[""aionreferencename""] =='' or request.POST[""s3bucketname""] == '' or request.POST[""awsaccesskey""] == '' : + return 'error' + pkeydata='''-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1AfnrMv +fVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw0m4e +wQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2PM4Re +n0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHyKxlq +i/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhxWrs/ +lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQAB +-----END RSA PUBLIC KEY-----''' + + pubkey = rsa.PublicKey.load_pkcs1(pkeydata) + awssecretaccesskey = rsa.encrypt(request.POST[""awssecretaccesskey""].encode(), pubkey) + print(awssecretaccesskey) + newdata = {} + newdata['Name'] = request.POST[""aionreferencename""] + newdata['AWSAccessKeyID'] = request.POST[""awsaccesskey""] + newdata['AWSSecretAccessKey'] = str(awssecretaccesskey) + newdata['S3BucketName'] = request.POST[""s3bucketname""] + data.append(newdata) + with open(file_path, 'w') as f: + json.dump(data, f) + f.close() + +def get_s3_bucket(): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + return data +def read_s3_bucket(name,filename,DATA_FILE_PATH): + privkey = '''-----BEGIN RSA PRIVATE KEY----- +MIIEqQIBAAKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1Af +nrMvfVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw +0m4ewQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2P +M4Ren0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHy +Kxlqi/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhx +Wrs/lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQABAoIBAC/VbNfQPEqJSO3f +VFPqfR73q2MbGdgiMQOTgeDvLxiF1QdizJ+j/I5mgiIAMviXuOpPU+NbdMHbZZWd +D15kNlD8UCXVg6yyiOuHStjmjK4uHe8I86E1nxTb0hbyZCWZlbk/WizlDHInu+dT +KdIZcq2AIidU6tAxtwA0ingHaRSoXDlSGwOTEigNqmWOKnDTVg0SMscoHOD7siXF +DHm1/lkvD3uvcZk6c7fGxC8SgNX2dj6n/Nbuy0Em+bJ0Ya5wq4HFdLJn3EHZYORF +ODUDYoGaSxeXqYsGg/KHJBc8J7xW9FdN9fGbHfw1YplrmiGL3daATtArjMmAh0EQ +H8Sj7+ECgYkA3oWMCHi+4t8txRPkg1Fwt8dcqYhGtqpAus3NESVurAdi0ZPqEJcQ +4cUbflwQPhX0TOaBlkgzdP8DMdcW/4RalxHsAh5N8ezx/97PQMb3Bht0WsQUBeYJ +xLV7T2astjTRWactGCG7dwTaUYRtU3FqL6//3CysmA12B5EMX0udNBOTKwmaYKww +AwJ5AOISS7f12Q0fgTEVY0H8Zu5hHXNOA7DN92BUzf99iPx+H+codLet4Ut4Eh0C +cFmjA3TC78oirp5mOOQmYxwaFaxlZ7Rs60dlPFrhz0rsHYPK1yUOWRr3RcXWSR13 +r+kn+f+8k7nItfGi7shdcQW+adm/EqPfwTHM8QKBiQCIPEMrvKFBzVn8Wt2A+I+G +NOyqbuC8XSgcNnvij4RelncN0P1xAsw3LbJTfpIDMPXNTyLvm2zFqIuQLBvMfH/q +FfLkqSEXiPXwrb0975K1joGCQKHxqpE4edPxHO+I7nVt6khVifF4QORZHDbC66ET +aTHA3ykcPsGQiGGGxoiMpZ9orgxyO3l5Anh92jmU26RNjfBZ5tIu9dhHdID0o8Wi +M8c3NX7IcJZGGeCgywDPEFmPrfRHeggZnopaAfuDx/L182pQeJ5MEqlmI72rz8bb +JByJa5P+3ZtAtzc2RdqNDIMnM7fYU7z2S279U3nZv0aqkk3j9UDqNaqvsZMq73GZ +y8ECgYgoeJDi+YyVtqgzXyDTLv6MNWKna9LQZlbkRLcpg6ELRnb5F/dL/eB/D0Sx +QpUFi8ZqBWL+A/TvgrCrTSIrfk71CKv6h1CGAS02dXorYro86KBLbJ0yp1T/WJUj +rHrGHczglvoB+5stY/EpquNpyca03GcutgIi9P2IsTIuFdnUgjc7t96WEQwL +-----END RSA PRIVATE KEY-----''' + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + awssecretaccesskey = '' + found = False + for x in data: + if x['Name'] == name: + awssecretaccesskey = x['AWSSecretAccessKey'] + aws_access_key_id = x['AWSAccessKeyID'] + bucketName = x['S3BucketName'] + found = True + break + if found: + privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') + awssecretaccesskey = eval(awssecretaccesskey) + awssecretaccesskey = rsa.decrypt(awssecretaccesskey, privkey) + awssecretaccesskey = awssecretaccesskey.decode('utf-8') + #awssecretaccesskey = 'SGcyJavYEQPwTbOg1ikqThT+Op/ZNsk7UkRCpt9g'#rsa.decrypt(awssecretaccesskey, privkey) + client_s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(awssecretaccesskey)) + #print(bucketName,filename) + try: + response = client_s3.get_object(Bucket=bucketName, Key=filename) + df = pd.read_csv(response['Body']) + except Exception as e: + print(e)#usnish + return 'Error', pd.DataFrame() + + #return 'Error', pd.DataFrame() + return 'Success',df + return 'Error', pd.DataFrame() ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os.path +import time +import subprocess +import sys +from appbe.aion_config import kafka_setting +from appbe.aion_config import running_setting +from appbe import installPackage +from appbe import compute +from appbe.models import getusercasestatus +import json +import pandas as pd +import ntpath +import shutil +import platform +from pathlib import Path +from appbe.dataPath import DATA_DIR +LOG_FILE_PATH = os.path.join(DATA_DIR,'logs') +def encrptpackage_command(request,Existusecases,usecasedetails): + command = request.POST.get('encryptedsubmit') + + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure" +"= compute.readComputeConfig() + modelID = request.POST.get('modelID') + p = Existusecases.objects.get(id=modelID) + usecasename = p.ModelName.UsecaseName + usecaseid = p.ModelName.usecaseid + runningStatus,pid,ip,port = installPackage.checkModel" +" import pandas as pd +import numpy as np +def get_leaderboard(file_content): + matched_lines = [line.replace('Model:-', '') for line in file_content.split('\\n') if ""Model:-"" in line] + df = pd.DataFrame(columns = ['Model', 'Iterations', 'Score (%)', 'Score Type', 'Best Score (%)']) + import re + try: + for line in matched_lines: + if 'Model Name::' in line: + MODEL = line.split('::') + model = MODEL[1] + if 'ScoringType::' in line: + S = line.split('::') + #SC = ScorTyp[1] + if 'make_scorer'in line: + ST = line.split('make_scorer') + ScorTyp = ST[1] + df['Score Type'] = np.where(df['Model'] == model, ScorTyp,df['Score Type']) + if 'Validation Score::' in line: + BS = line.split('::') + BestSc = round(float(BS[1]), 4)*100 + BestSc = abs(BestSc) + df['Best Score (%)'] = np.where(df['Model'] == model, BestSc, df['Best Score (%)']) + if 'Iteration::' in line: + l = line.split('::') + word = re.findall(r'\\[(.*?)\\]', l[1]) + if ';, score=' in line: + sc = line.split('score=') + SCR = sc[1].split(' ') + Score = round(float(SCR[0]), 4)*100 + Score = abs(Score) + # df = df.concat({'Model': model, 'Iterations': word,'Score (%)': Scor,'Score Type': '', 'Best Score (%)': 0}, ignore_index=True) + newdf = pd.DataFrame([{'Model': model, 'Iterations': word,'Score (%)': Score,'Score Type': '', 'Best Score (%)': 0}]) + df = pd.concat([df,newdf],axis=0, ignore_index=True) + + LIST = [] + for i in range(int(len(df['Score (%)'])/5)): + l = (sum(df['Score (%)'][5*i:5*(i+1)])/5) + #LIST.concat(l) + LIST.append(l) + for i in range(len(LIST)): + df['Score (%)'][5*i:5*(i+1)]=LIST[i] + + CL = [line.replace('------->Type of Model :classification', 'Model :classification') for line in file_content.split('\\n') if ""------->Type of Model :classification"" in line] + for l in CL: + if 'Model :classification' in l: + df = df.sort_values(by = ['Best Score (%)'], ascending=False) + RE = [line.replace('------->Type of Model :regression', 'Model :regression') for line in file_content.split('\\n') if ""------->Type of Model :regression"" in line] + for l in RE: + if 'Model :regression' in l: + df = df.sort_values(by = ['Best Score (%)']) + except Exception as e: + print(e) + return df + +if __name__ == ""__main__"": + file_path = r""C:\\Users\\richard.mochahari\\AppData\\Local\\Programs\\HCLTech\\AION\\data\\target\\AI0335\\1\\log\\model_training_logs.log"" + my_file = open(file_path, 'r') + file_content = my_file.read() + my_file.close() + print(get_leaderboard(file_content)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +import json + +def getInstanceonGCP(image,instances): + try: + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('LLMTuning'): + data = sqlite_obj.read_data('LLMTuning','image=""'+image['id']+'""') + for values in data: + instance = {} + instance['type'] = 'instance' + instance['id'] = values[2] + instance['workLoad'] = image['workLoad'] + instance['machineImageProjectID'] = image['machineImageProjectID'] + instance['ssh'] = image['ssh'] + instance['machineConfiguration'] = image['machineConfiguration'] + instance['instanceType'] = image['instanceType'] + instances.append(instance) + except Exception as e: + print(e) + return instances + +def getInstanceonAWS(amiid,instances): + try: + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('LLMTuning'): + data = sqlite_obj.read_data('LLMTuning','image=""'+amiid['id']+'""') + for values in data: + instance = {} + instance['type'] = 'instance' + instance['id'] = values[2] + instance['workLoad'] = amiid['workLoad'] + instance['regionName'] = amiid['regionName'] + instance['ssh'] = amiid['ssh'] + instance['machineConfiguration'] = amiid['machineConfiguration'] + instance['instanceType'] = amiid['instanceType'] + instances.append(instance) + except Exception as e: + print(e) + return instances +def updatelocalsetings(request): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('computeInfrastructure'): + updated_data = 'selectedInfrastructure=""Local""' + sqlite_obj.update_data(updated_data,'','computeInfrastructure') +def updateToComputeSettings(infratructure): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('computeInfrastructure'): + updated_data = 'selectedInfrastructure=""'+infratructure+'""' + sqlite_obj.update_data(updated_data,'','computeInfrastructure') +def updateGCPConfig(request): + try: + credentialsJson = request.POST.get('credentialsJson') + projectID = request.POST.get('gcpProjectid') + machineType = request.POST.get('gcpmachineType') + selectedID = request.POST.get('gcpInstance') + gcpZone = request.POST.get('gcpZone') + workload = request.POST.get('gcpworkload') + noOfInstance = request.POST.get('GCPnoofinstance') + #print(credentialsJson,projectID,machineType,selectedID,gcpZone,workload,noOfInstance) + if credentialsJson != '' and projectID != '': + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('gcpCredentials'): + updated_data = 'credentialsJson=""'+credentialsJson+'"",projectID=""'+projectID+'"",machineType=""'+machineType+'"",selectedID=""'+selectedID+'"",regionName=""'+gcpZone+'"",noOfInstance=""'+str(noOfInstance)+'"",workload=""'+workload+'""' + sqlite_obj.update_data(updated_data,'','gcpCredentials') + else: + newdata = {} + newdata.update({'id':['1'],'credentialsJson': [credentialsJson],'projectID': [projectID],'machineType':[machineType],'selectedID':[selectedID],'regionName':[gcpZone],'noOfInstance':[noOfInstance],'workload':[workload]}) + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'gcpCredentials') + return('success') + else: + return('error') + except Exception as e: + print(e) + return('error') +def updateComputeConfig(request): + try: + AWSAccessKeyID = request.POST.get('AWSAccessKeyID') + AWSSecretAccessKey = request.POST.get('AWSSecretAccessKey') + workload = request.POST.get('workload') + machineType = request.POST.get('machineType') + selectedID = request.POST.get('amiInstance') + regionName = request.POST.get('regionName') + noOfInstance = request.POST.get('NoOfInstance') + securitygroupid = request.POST.get('AWSSecuritygroupID') + if AWSAccessKeyID != '' and AWSSecretAccessKey != '': + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('awsCredentials'): + column_names = sqlite_obj.column_names('awsCredentials') + if 'securitygroupid' not in column_names: + query = 'Alter Table awsCredentials ADD securitygroupid TEXT' + sqlite_obj.execute_query(query) + updated_data = 'AWSAccessKeyID=""'+AWSAccessKeyID+'"",AWSSecretAccessKey=""'+AWSSecretAccessKey+'"",machineType=""'+machineType+'"",selectedID=""'+selectedID+'"",regionName=""'+regionName+'"",noOfInstance=""'+noOfInstance+'"",workload=""'+workload+'"",securitygroupid=""'+securitygroupid+'""' + sqlite_obj.update_data(updated_data,'','awsCredentials') + else: + newdata = {} + newdata.update({'id':['1'],'AWSAccessKeyID': [AWSAccessKeyID],'AWSSecretAccessKey': [AWSSecretAccessKey],'machineType':[machineType],'selectedID':[selectedID],'regionName':[regionName],'noOfInstance':[noOfInstance],'workload':[workload],'securitygroupid':[securitygroupid]}) + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'awsCredentials') + return('success') + else: + return('error') + except Exception as e: + print(e) + return('error') + +def selectedInfratructure(): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + selcInfra = 'Local' + if sqlite_obj.table_exists('computeInfrastructure'): + data = sqlite_obj.read_data('computeInfrastructure') + for values in data: + selcInfra = values[1] + return selcInfra + +def readComputeConfig(): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','compute_conf.json')) + f = open(file_path, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + selcInfra = 'Local' + if sqlite_obj.table_exists('computeInfrastructure'): + data = sqlite_obj.read_data('computeInfrastructure') + for values in data: + selcInfra = values[1] + + else: + data = {} + data.update({'id':['1'],'selectedInfrastructure': ['Local']}) + sqlite_obj.write_data(pd.DataFrame.from_dict(data),'computeInfrastructure') + configSettingsJson['computeInfrastructure'] = selcInfra + for ami in configSettingsJson['AWS_EC2']['amis']: + configSettingsJson['AWS_EC2']['instances'] = getInstanceonAWS(ami,configSettingsJson['AWS_EC2']['instances']) + for image in configSettingsJson['GCP']['machineImage']: + configSettingsJson['GCP']['instances'] = getInstanceonGCP(image,configSettingsJson['GCP']['instances']) + + AWSAccessKeyID = '' + AWSSecretAccessKey = '' + securitygroupid = '' + machineType = 'AMI' + selectedID = '' + regionName = '' + noofInfra = 1 + workLoad = 'LLM' + if sqlite_obj.table_exists('awsCredentials'): + column_names = sqlite_obj.column_names('awsCredentials') + #print(column_names) + if 'workload' not in column_names: + query = 'Alter Table awsCredentials ADD workload TEXT' + sqlite_obj.execute_query(query) + if 'securitygroupid' not in column_names: + query = 'Alter Table awsCredentials ADD securitygroupid TEXT' + sqlite_obj.execute_query(query) + data = sqlite_obj.read_data('awsCredentials') + for values in data: + AWSAccessKeyID = values[1] + AWSSecretAccessKey = values[2] + machineType = values[3] + selectedID = values[4] + regionName = values[5] + noofInfra = values[6] + workLoad = values[7] + securitygroupid = values[8] + selectedAWS = {} + selectedAWS['accessKey'] = AWSAccessKeyID + selectedAWS['secretAccessKey'] = AWSSecretAccessKey + selectedAWS['machineType']=machineType + selectedAWS['selectedID'] = selectedID + selectedAWS['regionName'] = regionName + selectedAWS['noOfInstance']=noofInfra + selectedAWS['workLoad'] = workLoad + selectedAWS['securitygroupid'] = securitygroupid + configSettingsJson['awsCredentials'] = selectedAWS + + gcpCredentials='' + projectID = '' + selectedID = '' + machineType = '' + regionName = '' + noOfInstance = 1 + workLoad = 'LLM' + if sqlite_obj.table_exists('gcpCredentials'): + column_names = sqlite_obj.column_names('gcpCredentials') + if 'workload' not in column_names: + query = 'Alter Table gcpCredentials ADD workload TEXT' + sqlite_obj.execute_query(query) + data = sqlite_obj.read_data('gcpCredentials') + for values in data: + gcpCredentials" +"= values[1] + projectID = values[2] + machineType = values[3] + selectedID = values[4] + regionName = values[5] + noOfInstance = values[6] + workLoad = values[7] + " +"False' + elif model in [""Neural Architecture Search""]: + model.xplain = 'False' + model.flserversupport = 'False' + model.onlinelerningsupport = 'False' + supportedmodels = [""Logistic Regression"",""Neural Network"",""Linear Regression""] + if model.deploymodel in supportedmodels: + model.flserversupport = 'True' + else: + model.flserversupport = 'False' + supportedmodels = [""Extreme Gradient Boosting (XGBoost)""] + if model.deploymodel in supportedmodels: + model.encryptionsupport = 'True' + else: + model.encryptionsupport = 'False' + supportedmodels = [""Online Decision Tree Classifier"",""Online Logistic Regression"",""Online Linear Regression"",""Online Decision Tree Regressor"",""Online KNN Regressor"",""Online Softmax Regression"",""Online KNN Classifier""] + if model.deploymodel in supportedmodels: + model.onlinelerningsupport = 'True' + onlineoutputPath = os.path.join(str(model.DeployPath),'production','Config.json') + with open(onlineoutputPath) as file: + onlineoutputPath = json.load(file) + file.close() + details = {'Score' :onlineoutputPath['metricList'],'DataSize':onlineoutputPath['trainRowsList']} + dfonline = pd.DataFrame(details) + model.oltrainingdetails = dfonline + else: + model.onlinelerningsupport = 'False' + except Exception as e: + print(e) + pass + + return models + + +def landing_page(usecasedetails,Existusecases,hosturl,usecaseId = None,search_text=None): + + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + if usecaseId: + usecase = usecasedetails.objects.filter(id=usecaseId) + + else: + if search_text: + usecase = usecasedetails.objects.filter(UsecaseName__contains=str(search_text)).order_by('-id') + else: + #usecase = usecasedetails.objects.all().order_by('-id')[:100] #top 100 records + usecase = usecasedetails.objects.all().order_by('-id') #top 100 records + usecaselist=[] + + if not usecaseId: + for x in usecase: + problemType= 'NA' + publish_url = '' + otherModel = {} + models = Existusecases.objects.filter(Status='SUCCESS',publishStatus='Published',ModelName=x.id).order_by('-id') + + if len(models) > 0: + #print(models[0]) + version = models[0].Version + if os.path.isdir(str(models[0].DeployPath)): + modelPath = os.path.join(str(models[0].DeployPath),'etc','output.json') + with open(modelPath) as file: + outputconfig = json.load(file) + problemType = outputconfig['data']['ModelType'] + #print(problemType.lower()) + if problemType.lower() == ""llm fine-tuning"": + cloudconfig = os.path.normpath( + os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json')) + print(cloudconfig) + from appbe.models import get_instance + hypervisor,instanceid,region,image,status = get_instance(x.usecaseid+ '_' + str(version)) + from llm.llm_inference import get_ip + instanceip = get_ip(cloudconfig,instanceid,hypervisor,region,image) #usnish__ server maynot running + if instanceip != '': + publish_url = 'http://' + instanceip + ':' + '8000' + '/generate' + else: + publish_url = 'service not available' + else: + publish_url = 'http://'+hosturl+':'+str(models[0].portNo)+'/AION/'+x.usecaseid+'/predict' + publish_status = 'Published' + #publish_url = 'http://'+hosturl+':'+str(models[0].portNo)+'/AION/'+x.usecaseid+'/predict' + parentModel = get_model(Existusecases,x.id,int(version)) + else: + models = Existusecases.objects.filter(Status='SUCCESS',ModelName=x.id).order_by('-id') + if len(models) > 0: + publish_status = 'Trained' + version = models[0].Version + parentModel = get_model(Existusecases,x.id,int(version)) + else: + models = Existusecases.objects.filter(ModelName=x.id).order_by('-id') + if len(models)==0: + publish_status= 'Not Trained' + version = -1 + else: + if models[0].Status == 'FAIL': + publish_status= 'Failed' + elif models[0].Status == 'Running': + publish_status = 'Running' + else: + publish_status='Not Trained' + + problemType = models[0].ProblemType + version = models[0].Version + parentModel={} + usecasedetails = {'uuid':x.id,'description':x.Description,'usecaseid':x.usecaseid,'usecase':x.UsecaseName,'status':publish_status,'publish_url':publish_url,'version':version,'parentModel':parentModel,'otherModel':otherModel,'problemType':problemType} + usecaselist.append(usecasedetails) + else: + for x in usecase: + otherModel = get_model(Existusecases,x.id) + problemType = otherModel[0].problemType + usecasedetails = {'uuid':x.id,'description':x.Description,'usecase':x.UsecaseName,'status':'','version':'','parentModel':{},'otherModel':otherModel,'problemType':problemType} + usecaselist.append(usecasedetails) + + return usecaselist +def get_landing_model(Existusecases): + models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id') + for model in models: + model.scoringCreteria = 'NA' + model.score = 'NA' + model.deploymodel = 'NA' + if os.path.isdir(str(model.DeployPath)): + modelPath = os.path.join(str(model.DeployPath),'etc','output.json') + try: + with open(modelPath) as file: + outputconfig = json.load(file) + file.close() + if outputconfig['status'] == 'SUCCESS': + model.scoringCreteria = outputconfig['data']['ScoreType'] + model.score = outputconfig['data']['BestScore'] + model.deploymodel = outputconfig['data']['BestModel'] + model.problemType = outputconfig['data']['ModelType'] + model.maacsupport = 'True' + model.flserversupport = 'False' + model.onlinelerningsupport = 'False' + supportedmodels = [""Logistic Regression"",""Neural Network"",""Linear Regression""] + if model.deploymodel in supportedmodels: + model.flserversupport = 'True' + else: + model.flserversupport = 'False' + supportedmodels = [""Extreme Gradient Boosting (XGBoost)""] + if model.deploymodel in supportedmodels: + model.encryptionsupport = 'True' + else: + model.encryptionsupport = 'False' + supportedmodels = [""Online Decision Tree Classifier"",""Online Logistic Regression""] + if model.deploymodel in supportedmodels: + model.onlinelerningsupport = 'True' + onlineoutputPath = os.path.join(str(model.DeployPath),'production','Config.json') + with open(onlineoutputPath) as file: + onlineoutputPath = json.load(file) + file.close() + details = {'Score' :onlineoutputPath['metricList'],'DataSize':onlineoutputPath['trainRowsList']} + dfonline = pd.DataFrame(details) + model.oltrainingdetails = dfonline + else: + model.onlinelerningsupport = 'False' + except Exception as e: + pass + return models + +def usecase_page(request,usecasedetails,Existusecases,usecaseid,search_text): + + try: + from appbe import read_service_url_params + tacking_url = read_service_url_params(request) + except: + tacking_url = '127.0.0.1' + hosturl =request.get_host() + hosturl = hosturl.split(':') + hosturl = hosturl[0] + computeinfrastructure = compute.readComputeConfig() + from appbe.aion_config import settings + usecasetab = settings() + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + status,msg = pushRecordForTraining() + if status == False: + context = {'msg':msg} + context['selected'] = 'License' + return status,context,'licenseexpired.html' + ser_url = service_url.read_service_url_params(request) + packagetip=''' +Call From Command Line +1. Click AION Shell +2. python {packageAbsolutePath}/aion_predict.py {json_data} +Call As a Package +1. Go To package_path\\publish\\package +2. python -m pip install {packageName}-py3-none-any.whl +Call the predict function after wheel package installation +1. from {packageName} import aion_predict as p1 +2. p1.predict({json_data})''' + if request.method == ""POST"": + usecasename = request.POST.get('UsecaseName') + description = request.POST.get('Description') + usecaseid = request.POST.get('usecaseid') + #print('1',usecasename) + if (usecasename == ''): + + usecase = landing_page(usecasedetails,Existusecases,hosturl) + if len(usecase) > 0: + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + else: + nouc = 1 + nouc = str(nouc).zfill(4) + description_text = 'This is a usecase for AI' + str(nouc) + context = {'description_text':description_text,'usecase':'usecase','Notallowed':'Usecasename is mandatory','ser_url':ser_url,'packagetip':packagetip,'usecasedetail': usecase,'nouc':nouc, 'ser_url':ser_url,'packagetip':packagetip, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'tacking_url':tacking_url,'usecasetab':usecasetab, + 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting} + return status,context,'usecases.html' + else: + usecase_count = usecasedetails.objects.filter(usecaseid=usecaseid).count() + usecasename_count = usecasedetails.objects.filter(UsecaseName=usecasename).count() + usecase = landing_page(usecasedetails,Existusecases,hosturl) + + + + if (usecase_count > 0) or (usecasename_count > 0): + + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + nouc = str(nouc).zfill(4) + Msg = 'Error in usecase creating, try again' + if usecase_count > 0: + Msg = 'Error in usecase creating, try again' + if usecasename_count > 0: + Msg = 'There is already a use case with same name, please provide unique name' + description_text = 'This is a usecase for AI' + str(nouc) + context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc,'Status':'error','Msg': Msg,'tacking_url':tacking_url,'usecasetab':usecasetab,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ser_url':ser_url,'packagetip':packagetip, + 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting} + return status,context,'usecases.html' + + else: + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + p = usecasedetails(UsecaseName=usecasename,usecaseid=usecaseid,Description=description) + p.save() + s1 = Existusecases.objects.filter(ModelName=p.id).annotate(maxver=Max('ModelName__existusecases__Version')) + config_list = s1.filter(Version=F('maxver')) + if config_list.count() > 0: + Version = config_list[0].Version + Version = Version + 1 + else: + Version = 1 + ps = Existusecases(DataFilePath='', DeployPath='', Status='Not Trained',ConfigPath='', Version=Version, ModelName=p,TrainOuputLocation='') + ps.save() + request.session['ModelName'] = p.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = usecaseid + request.session['ModelVersion'] = Version + request.session['ModelStatus'] = 'Not Trained' + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + selected_use_case = usecasename + model_status = 'Not Trained' + ModelVersion = Version + from appbe.telemetry import UseCaseCreated + UseCaseCreated(usecaseid+'-'+str(Version)) + if len(usecase) > 0: + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + else: + nouc = 1 + nouc = str(nouc).zfill(4) + description_text = 'This is a usecase for AI' + str(nouc) + context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'newusercase': usecasename,'tacking_url':tacking_url,'finalstate':request.session['finalstate'], + 'description': description,'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'clusteringModels':clusteringModels,'s3buckets':get_s3" +"_bucket(),'gcsbuckets':get_gcs_bucket(),'usecasetab':usecasetab,'azurestorage':get_azureStorage(), + 'ModelStatus': model_status, 'ModelVersion': ModelVersion, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure} + return status,context,'upload.html' + else: + + + models = get_landing_model(Existusecases) + + usecase = landing_page(usecasedetails,Existusecases,hosturl,usecaseid,search_text) + if len(usecase) > 0: + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + else: + nouc = 1 + nouc = str(nouc).zfill(4) + description_text = 'This is a usecase for AI' + str(nouc) + context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'tacking_url':tacking_url,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab} + if usecaseid: + context.update({'ucdetails':'True'}) + return status,context,'usecases.html' + +def index_page(request,usecasedetails,Existusecases): + if 'ModelVersion' in request.session: + del request.session['ModelVersion'] + if 'UseCaseName' in request.session: + del request.session['UseCaseName'] + if 'ModelStatus' in request.session: + del request.session['ModelStatus'] + if 'currentstate' in request.session: + del request.session['currentstate'] + if 'finalstate' in request.session: + del request.session['finalstate'] + return usecases_page(request,usecasedetails,Existusecases) + +def usecases_page(request,usecasedetails,Existusecases,usecaseid=None,substring=None): + return usecase_page(request,usecasedetails,Existusecases,usecaseid,substring) + +def mllite_page(request): + from appbe.aion_config import settings + usecasetab = settings() + status,msg = pushRecordForTraining() + if status == False: + context = {'selected':'mllite','lerror':msg} + return context + configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + context = {} + context = {'selected':'mllite','sagemaker':configSettingsJson,'usecasetab':usecasetab} + return context +def mltesting_page(request): + from appbe.aion_config import settings + usecasetab = settings() + status,msg = pushRecordForTraining() + if status == False: + context = {'lerror':msg} + return context + if request.method == ""POST"": + models = request.POST['model'] + datap = request.POST['data'] + if(os.path.isfile(models) and os.path.isfile(datap)): + request.session['datalocation'] = datap + df = pd.read_csv(datap,encoding='utf-8',skipinitialspace = True,encoding_errors= 'replace') + trainfea = df.columns.tolist() + featurs = request.POST.getlist('Training') + feature = "","".join(featurs) + filetimestamp = str(int(time.time())) + settingconfig = os.path.join(CONFIG_FILE_PATH, 'MLTest_' + filetimestamp + '.json') + request.session['MLTestResult'] = settingconfig + mltestresult={} + mltestresult['models'] = models + mltestresult['datap'] = datap + mltestresult['feature'] = feature + # features = ['PetalLengthCm','PetalWidthCm'] + targ = request.POST['Target'] + tar =[targ] + mltestresult['target'] = targ + mltestresult = json.dumps(mltestresult) + with open(settingconfig, ""w"") as fpWrite: + fpWrite.write(mltestresult) + fpWrite.close() + from pathlib import Path + mltest={} + if Path(models).is_file() and Path(datap).is_file(): + try: + from mltest import baseline + outputStr = baseline.baseline_testing(models,datap, feature, targ) + #scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_mltest.py')) + #print(scriptPath, models, datap, feature, targ) + #outputStr = subprocess.check_output([sys.executable, scriptPath, models, datap, feature, targ]) + #print(outputStr) + #outputStr = outputStr.decode('utf-8') + #outputStr= outputStr.replace('\\'','\\""') + #print('ou',outputStr) + #outputStr = outputStr.strip() + mltest = json.loads(outputStr) + Problemtype= mltest['Problemtype'] + with open(request.session['MLTestResult'], 'r+') as f: + mltestresult = json.load(f) + f.close() + mltestresult['Problemtype'] = Problemtype + mltestresult['ProblemName'] = mltest['ProblemName'] + status = mltest['Status'] + if status == 'Fail': + errormsg= mltest['Msg'] + context = {'error':errormsg,'mltest':'mltest'} + else: + if Problemtype == 'Classification': + mltestresult['Score'] = mltest['Accuracy'] + mltestresult['Params'] = mltest['Params'] + Problem= mltest['ProblemName'] + Parameters= mltest['Params'] + round_params = {} + for key, value in Parameters.items(): + if isinstance(value, float): + round_params[key] = round(value,2) + else: + round_params[key] = value + matrixconfusion = mltest['Confusionmatrix'] + classificationreport = mltest['classificationreport'] + classificationreport = json.loads(classificationreport) + matrixconfusion = json.loads(matrixconfusion) + indexName =[] + columnName = [] + for i in matrixconfusion.keys(): + + indexName.append(""act:""+str(i)) + + for j in matrixconfusion[i].keys(): + + columnName.append(""pre:""+str(j)) + df3 = pd.DataFrame.from_dict(classificationreport) + df = df3.transpose() + df2 = pd.DataFrame.from_dict(matrixconfusion) + df1 = pd.DataFrame(df2.values,index=indexName,columns=columnName) + report = df.to_html() + report1 = df1.to_html() + recordone = mltest['onerecord'] + recordsten = mltest['tenrecords'] + recordshund = mltest['hundrecords'] + context = {'modelname': models,'datapath':datap,'features':featurs,'target':tar,'Problemtype':Problem,'modeltype':Problemtype,'Parameter':round_params,'Onerecord':recordone,'Tenrecords':recordsten,'Hundrecords':recordshund,'matrixconfusion':report1,'classificationreport':report,'classification':'classification','df':df,'df1':df1,'basemltest':'basemltest','success':'success','trainfea':trainfea,'selected':'mltesting','usecasetab':usecasetab} + + elif Problemtype == 'Regression': + Problem= mltest['ProblemName'] + mltestresult['Params'] = mltest['Params'] + mltestresult['Score'] = mltest['R2'] + Parameters= mltest['Params'] + round_params = {} + for key, value in Parameters.items(): + if isinstance(value, float): + round_params[key] = round(value,2) + else: + round_params[key] = value + Mse = mltest['MSE'] + Mae = mltest['MAE'] + Rmse = mltest['RMSE'] + R2 = mltest['R2'] + recordone = mltest['onerecord'] + recordsten = mltest['tenrecords'] + recordshund = mltest['hundrecords'] + context = {'modelname': models,'datapath':datap,'features':featurs,'target':tar, 'Problemtype':Problem,'Parameter':round_params,'Onerecord':recordone,'Tenrecords':recordsten,'Hundrecords':recordshund,'Mse':Mse,'Mae':Mae,'Rmse':Rmse,'R2Score':R2,'regression':'regression','success':""success"",'selected': 'mltest','basemltest':'basemltest','usecasetab':usecasetab} + else: + errormsg= mltest['Msg'] + context = {'error':errormsg,'mltest':'mltest'} + + + mltestresult = json.dumps(mltestresult) + with open(settingconfig, ""w"") as fpWrite: + fpWrite.write(mltestresult) + fpWrite.close() + except Exception as e: + print(""-------------""+str(e)+'=================') + e = str(e).replace('\\'','') + errormsg = 'Error: Exception '+str(e) + context = {'error':errormsg,'mltest':'mltest'} + else: + if not (Path(models).is_file() and Path(datap).is_file()): + context = {'error':""Please Check ModelPath & Datapath Format"",""result"":""result"",'selected':'mltesting','usecasetab':usecasetab} + elif not Path(models).is_file(): + context = {'error':""Please Check ModelPath Format"",""result"":""result"",'selected':'mltesting','usecasetab':usecasetab} + elif not Path(datap).is_file(): + context = {'error':""Please Check DataPath Format"",""result"":""result"",'selected':'mltesting','usecasetab':usecasetab} + else: + context = {'error':'Either model path or data path does not exist','mltest':'mltest','usecasetab':usecasetab} + else: + context = {'selected':'mltesting','usecasetab':usecasetab} + return context from langkit import textstat +from whylogs.experimental.core.udf_schema import udf_schema +import pandas as pd +import whylogs as why +from langkit import light_metrics +from whylogs.experimental.core.udf_schema import udf_schema +from whylogs.experimental.core.udf_schema import register_dataset_udf +import whylogs as why +import json +from sentence_transformers import SentenceTransformer, util +from langkit import lang_config, response_column + + +def evaluate_prompt_metrics(prompt_msg: any): + """""" Evaluate prompt only information."""""" + text_schema = udf_schema() + llm_schema = light_metrics.init() + df = pd.DataFrame({ + ""prompt"": [ + prompt_msg + ]}) + results = why.log(df, schema=udf_schema()) # .profile() + view = results.view() + + automated_readability_index_prompt = view.get_column(""prompt.automated_readability_index"").to_summary_dict() + automated_readability_index_prompt_mean = automated_readability_index_prompt['distribution/mean'] + arip_m = lambda x:1 if x < 1 else (14 if x > 14 else x) + automated_readability_index_prompt_mean = arip_m(automated_readability_index_prompt_mean) + automated_readability_index_prompt_value = get_readability_index_range_value(automated_readability_index_prompt_mean) + + flesch_reading_ease_prompt = view.get_column(""prompt.flesch_reading_ease"").to_summary_dict() + flesch_reading_ease_prompt_mean = flesch_reading_ease_prompt['distribution/mean'] + frep_m = lambda x:1 if x < 1 else (100 if x > 100 else x) + flesch_reading_ease_prompt_mean = frep_m(flesch_reading_ease_prompt_mean) + flesch_reading_ease_prompt_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_prompt_mean) + + prompt_results = {'prompt_readability_score': str(automated_readability_index_prompt_mean), + 'prompt_readability_value': automated_readability_index_prompt_value, + + 'prompt_reading_ease': str(flesch_reading_ease_prompt_mean), + 'prompt_reading_ease_value': flesch_reading_ease_prompt_value} + + prompt_results_json = json.dumps(prompt_results, indent=4) + return prompt_results_json,prompt_results + + +model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') +@register_dataset_udf([""prompt"", ""response""], ""response.relevance_to_prompt"") +def similarity_MiniLM_L6_v2(text): + x = text[""prompt""] + y = text[""response""] + embedding_1 = model.encode(x, convert_to_tensor=True) + embedding_2 = model.encode(y, convert_to_tensor=True) + similarity = util.pytorch_cos_sim(embedding_1, embedding_2) + result = similarity.item() + return result + + +def get_readability_index_range_value(readability_value): + if readability_value <= 1: + ## Grade level Kindergarden to fourth grade + return ""Kindergarten"" + elif 1 < readability_value <= 2: + ## Grade level Kindergarden to fourth grade + return ""First Grade"" + elif 2 < readability_value <= 3: + ## Grade level Fifth grade to Ninth grade + return ""Second Grade"" + elif 3 < readability_value <= 4: + ## Grade level Fifth grade to Ninth grade + return ""Third Grade"" + elif 4 < readability_value <= 5: + ## Grade level Fifth grade to Ninth grade + return ""Fourth Grade"" + elif 5 < readability_value <= 6: + ## Grade level Fifth grade to Ninth grade + return ""Fifth Grade"" + elif 6 < readability_value <= 7: + ## Grade level Fifth grade to Ninth grade + return ""Sixth Grade"" + elif 7 < readability_value <= 8: + ## Grade level Fifth grade to Ninth grade + " +"return ""Seventh Grade"" + elif 8 < readability_value <= 9: + ## Grade level Fifth grade to Ninth grade + return ""Eighth Grade"" + elif 9 < readability_value <=10: + ## Grade level Fifth grade to Ninth grade + return ""Ninth Grade"" + elif 10 < readability_value <=11: + ## Grade level Fifth grade to Ninth grade + return ""Tenth Grade"" + elif 11 < readability_value <=12: + ## Grade level Fifth grade to Ninth grade + return ""Eleventh Grade"" + elif 12 < readability_value <= 13: + ## Grade level Fifth grade to Ninth grade + return ""Twelfth Grade"" + elif readability_value > 13: + ## Grade level Fifth grade to Ninth grade + return ""College Grade"" + else: + return ""College Grade"" + +def get_flesch_reading_ease_prompt_value(readability_value): + """""" Get flesch readability score range approximation"""""" + if readability_value <= 29: + return ""Very Confusing"" + elif 29 < readability_value <= 49: + return ""Difficult"" + elif 49 < readability_value <= 59: + return ""Fairly Difficult"" + elif 59 < readability_value <= 69: + return ""Standard"" + elif 69 < readability_value <= 79: + return ""Fairly Easy"" + elif 79 < readability_value <= 89: + return ""Easy"" + elif 89 < readability_value <= 100: + return ""Very Easy"" + else: + return ""Very Easy"" + +def get_relevence_to_response_value(similarity_score): + """""" To findout relevence to response results based on similarity score."""""" + if similarity_score <=0.3: + return ""Low"" + elif 0.3 < similarity_score <= 0.5: + return ""Average"" + elif 0.5 < similarity_score <= 0.8: + return ""Good"" + elif similarity_score > 0.8: + return ""High"" + + + +def evaluate_prompt_response_inputs (prompt_msg:any, response_msg:any)->str: + """""" Predict the text quality, text relevence for both prompt and response messages."""""" + df = pd.DataFrame({ + ""prompt"": [prompt_msg], + ""response"": [response_msg]}) + results = why.log(df, schema=udf_schema()) + view = results.view() + + automated_readability_index_prompt = view.get_column(""prompt.automated_readability_index"").to_summary_dict() + automated_readability_index_prompt_mean = automated_readability_index_prompt['distribution/mean'] + arip_m = lambda x:1 if x < 1 else (14 if x > 14 else x) + automated_readability_index_prompt_mean = arip_m(automated_readability_index_prompt_mean) + automated_readability_index_prompt_value = get_readability_index_range_value(automated_readability_index_prompt_mean) + + flesch_reading_ease_prompt = view.get_column(""prompt.flesch_reading_ease"").to_summary_dict() + flesch_reading_ease_prompt_mean = flesch_reading_ease_prompt['distribution/mean'] + frep_m = lambda x:1 if x < 1 else (100 if x > 100 else x) + flesch_reading_ease_prompt_mean = frep_m(flesch_reading_ease_prompt_mean) + flesch_reading_ease_prompt_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_prompt_mean) + + automated_readability_index_response = view.get_column(""response.automated_readability_index"").to_summary_dict() + automated_readability_index_response_mean = automated_readability_index_response['distribution/mean'] + arir_m = lambda x:1 if x < 1 else (14 if x > 14 else x) + automated_readability_index_response_mean = arir_m(automated_readability_index_response_mean) + automated_readability_index_response_value = get_readability_index_range_value(automated_readability_index_response_mean) + + flesch_reading_ease_response = view.get_column(""response.flesch_reading_ease"").to_summary_dict() + flesch_reading_ease_response_mean = flesch_reading_ease_response['distribution/mean'] + frer_m = lambda x:1 if x < 1 else (100 if x > 100 else x) + flesch_reading_ease_response_mean = frer_m(flesch_reading_ease_response_mean) + flesch_reading_ease_response_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_response_mean) + + relevance_to_response = view.get_column(""response.relevance_to_prompt"").to_summary_dict() + relevance_to_response_mean = relevance_to_response['distribution/mean'] + r2r_m = lambda x:0 if x < 0 else (1 if x > 1 else x) + relevance_to_response_mean = r2r_m(relevance_to_response_mean) + relevance_to_response_value = get_relevence_to_response_value(relevance_to_response_mean) + + sentence_count_response = view.get_column(""response.sentence_count"").to_summary_dict() + sentence_count_response_mean = sentence_count_response['distribution/mean'] + word_count_response = view.get_column(""response.lexicon_count"").to_summary_dict() + word_count_response_mean = word_count_response['distribution/mean'] + prompt_response_results = {'prompt_readability_score': str(automated_readability_index_prompt_mean), + 'prompt_readability_value': automated_readability_index_prompt_value, + + 'prompt_reading_ease': str(flesch_reading_ease_prompt_mean), + 'prompt_reading_ease_value': flesch_reading_ease_prompt_value, + + 'response_readability': str(automated_readability_index_response_mean), + 'response_readability_value': str(automated_readability_index_response_value), + + 'response_reading_ease': str(flesch_reading_ease_response_mean), + 'response_reading_ease_value': str(flesch_reading_ease_response_value), + + 'response_sentence_count': str(sentence_count_response_mean), + 'response_word_count_response': str(word_count_response_mean), + + 'relevance_to_response': str(relevance_to_response_mean), + 'relevance_to_response_value': relevance_to_response_value + } + final_output_json = json.dumps(prompt_response_results, indent=4) + return final_output_json,prompt_response_results + + + +if __name__ == ""__main__"": + ##Test only prompt message information + option = 'predict' + if option == 'evaluate': + prompt_only_response_msg = ""A large language model is an advanced artificial intelligence (AI) system designed to process, understand, and generate human-like text based on massive amounts of data. These models are typically built using deep learning techniques, such as neural networks, and are trained on extensive datasets that include text from a broad range, such as books and websites, for natural language processing.Fine-tuning a large language model involves adjusting and adapting a pre-trained model to perform specific tasks or to cater to a particular domain more effectively. The process usually entails training the model further on a smaller, targeted dataset that is relevant to the desired task or subject matter.Few-shot learning (FSL) can be considered as a meta-learning problem where the model learns how to learn to solve the given problem. In this approach, the model is provided with a very limited number of examples (i.e., “few shots”) from the new task, and it uses this information to adapt and perform well on that task. Adapter Training: Adapter training is a method that involves training lightweight modules that are plugged into the pre-trained model, allowing for fine-tuning on a specific task without affecting the original model’s performance on other tasks.Multi-task Learning: Multi-task learning is a method where the pre-trained model is fine-tuned on multiple tasks simultaneously. This approach enables the model to learn and leverage the shared representations across different tasks, leading to better generalization and performance. Task-specific Fine-tuning: Task-specific fine-tuning is a method where the pre-trained model is fine-tuned on a specific task or domain using a task-specific dataset. This method requires more data and time than transfer learning but can result in higher performance on the specific task. Sequential Fine-tuning: Sequential fine-tuning is a method where a pre-trained model is fine-tuned on multiple related tasks or domains sequentially. This allows the model to learn more nuanced and complex language patterns across different tasks, leading to better generalization and performance.A noteworthy avenue of research within LLM fine-tuning explores strategies to reduce the expenses associated with updating model parameters. This endeavor is the essence of parameter-efficient fine-tuning (PEFT), a collection of techniques aiming to curtail the number of parameters requiring adjustments.Various PEFT techniques exist, and one prominent example is a low-rank adaptation (LoRA), a technique gaining popularity among open-source language models."" + prompt_res = evaluate_prompt_metrics(prompt_only_response_msg) + elif option == 'predict': + prompt_msg = ""What is AION?"" + response_msg = ""AION (Artificial Intelligence ONline) is an open -source software platform for building, deploying and operating the entire lifecycle of AI applications. It supports various use cases such as predictive analytics , machine learning and deep learning . Key features: 1. Data Ingestion : Supports multiple data sources like text files, excel sheet, database etc."" + evaluation_metrics_json = evaluate_prompt_response_inputs(prompt_msg,response_msg) + print(""evaluation_metrics_json: \\n"",evaluation_metrics_json) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +import os +import rsa +import boto3 #usnish +import pandas as pd +import time +def add_new_GCSBucket(request): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + f.close() + if data == '': + data = [] + except: + data = [] + print(request.POST[""aionreferencename""]) + print(request.POST[""serviceaccountkey""]) + print(request.POST[""bucketname""]) + if request.POST[""aionreferencename""] =='' or request.POST[""serviceaccountkey""] == '' or request.POST[""bucketname""] == '' : + + return 'error' + newdata = {} + + + newdata['Name'] = request.POST[""aionreferencename""] + newdata['GCSServiceAccountKey'] = request.POST[""serviceaccountkey""] + newdata['GCSbucketname'] = request.POST[""bucketname""] + data.append(newdata) + with open(file_path, 'w') as f: + json.dump(data, f) + f.close() + return 'success' + +def get_gcs_bucket(): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + return data +def read_gcs_bucket(name,filename,DATA_FILE_PATH): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + found = False + print(data) + for x in data: + if x['Name'] == name: + GCSServiceAccountKey = x['GCSServiceAccountKey'] + GCSbucketname = x['GCSbucketname'] + found = True + break + print(found) + print(name) + try: + if found: + import io + from google.cloud import storage + storage_client = storage.Client.from_service_account_json(GCSServiceAccountKey) + print(GCSServiceAccountKey) + print(GCSbucketname) + bucket = storage_client.get_bucket(GCSbucketname) + blob = bucket.blob(filename) + data = blob.download_as_string() + df = pd.read_csv(io.BytesIO(data), encoding = 'utf-8', sep = ',',encoding_errors= 'replace') + return 'Success',df + except Exception as e: + print(e) + return 'Error', pd.DataFrame() ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import pandas as pd +import requests +from io import StringIO + +import json +import time +import shutil +import sys +from appbe import compute +from appbe.aion_config import kafka_setting +from appbe.aion_config import running_setting +from appbe.s3bucketsDB import get_s3_bucket +from appbe.gcsbucketsDB import get_gcs_bucket +from appbe.azureStorageDB import get_azureStorage +from appbe.aion_config import eda_setting +from appbe.s3bucketsDB import read_s3_bucket +from appbe.gcsbucketsDB import read_gcs_bucket +from appbe.azureStorageDB import read_azureStorage +from appbe.validatecsv import csv_validator + +import time + +from appbe.dataPath import LOG_LOCATION +from appbe.dataPath import DATA_FILE_PATH +from appbe.log_ut import logg +import logging + +def langchain_splittext(filename): + try: + from langchain.document_loaders import PyPDFLoader" +" + from langchain.text_splitter import RecursiveCharacterTextSplitter + loader = PyPDFLoader(filename) + pages = loader.load() + text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50) + texts = text_splitter.split_documents(pages) + return(texts) + except Exception as e: + print(e) + +def pd_lanfchain_textsplitter(datalocation,data): + try: + document=[] + for i in range(len(data)): + filename = os.path.join(datalocation,data.loc[i,""File""]) + out = langchain_splittext(filename) + for doc in out: + print(doc.page_content) + document.append(doc.page_content) + my_data = pd.DataFrame({'instruction': document}) + n = 1 + my_data[""response""] = my_data[""instruction""].tolist()[n:] + my_data[""instruction""].tolist()[:n] + filetimestamp = str(int(time.time())) + filename = os.path.join(DATA_FILE_PATH, 'LLMTuning_' + filetimestamp+'.csv') + my_data.to_csv(filename,index=False) + return(filename) + except Exception as e: + print(e) +def getimpfeatures(dataFile, numberoffeatures,delimiter,textqualifier): + imp_features = [] + if numberoffeatures > 20: + try: + from appbe.eda import ux_eda + eda_obj = ux_eda(dataFile,delimiter,textqualifier,optimize=1) + if eda_obj.getNumericFeatureCount() >= 2: + pca_map = eda_obj.getPCATop10Features() + imp_features = pca_map.index.values.tolist() + except Exception as e: + print(e) + pass + return imp_features + +def pdf2text(inpFileName): + try: + from pypdf import PdfReader + reader = PdfReader(inpFileName) + number_of_pages = len(reader.pages) + text="""" + OrgTextOutputForFile="""" + for i in range(number_of_pages) : + page = reader.pages[i] + text1 = page.extract_text() + text=text+text1 + import nltk + tokens = nltk.sent_tokenize(text) + for sentence in tokens: + sentence=sentence.replace(""\\n"", "" "") + if len(sentence.split()) < 4 : + continue + if len(str(sentence.split(',')).split()) < 8 : + continue + if any(chr.isdigit() for chr in sentence) : + continue + OrgTextOutputForFile= OrgTextOutputForFile+str(sentence.strip()) + #print(""\\n\\n\\n\\nOrgTextOutputForFile------------->\\n\\n\\n"",OrgTextOutputForFile) + return (OrgTextOutputForFile) + except Exception as e: + print(""Encountered exception. {0}"".format(e)) + +def getcommonfields(): + computeinfrastructure = compute.readComputeConfig() + from appbe.aion_config import settings + usecasetab = settings() + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + context = {'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab,'azurestorage':get_azureStorage()} + return context + +def getusercasestatus(request): + if 'UseCaseName' in request.session: + selected_use_case = request.session['UseCaseName'] + else: + selected_use_case = 'Not Defined' + + if 'ModelVersion' in request.session: + ModelVersion = request.session['ModelVersion'] + else: + ModelVersion = 0 + + if 'ModelStatus' in request.session: + ModelStatus = request.session['ModelStatus'] + else: + ModelStatus = 'Not Trained' + return selected_use_case,ModelVersion,ModelStatus + +def delimitedsetting(delimiter='',textqualifier='',other=''): + if delimiter != '': + if delimiter.lower() == 'tab' or delimiter.lower() == '\\t': + delimiter = '\\t' + elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';': + delimiter = ';' + elif delimiter.lower() == 'comma' or delimiter.lower() == ',': + delimiter = ',' + elif delimiter.lower() == 'space' or delimiter.lower() == ' ': + delimiter = ' ' + elif delimiter.lower() == 'other' or other.lower() != '': + if other != '': + delimiter = other + else: + delimiter = ',' + elif delimiter != '': + delimiter = delimiter + else: + delimiter = ',' + else: + delimiter = ',' + if textqualifier == '': + textqualifier = '""' + return delimiter,textqualifier + +def multipleZipExtraction(data,DATA_FILE_PATH): + from zipfile import ZipFile + try: + import glob + filetimestamp = str(int(time.time())) + extracted_data = os.path.join(DATA_FILE_PATH, 'extracted_' + filetimestamp) + os.mkdir(extracted_data) + with ZipFile(data, 'r') as zObject: + zObject.extractall(extracted_data) + csv_files = glob.glob(r'{}\\*.{}'.format(extracted_data,'csv')) + df_csv_append = pd.DataFrame() + for file in csv_files: + df = pd.read_csv(file) + df_csv_append = df_csv_append.append(df, ignore_index=True) + for f in os.listdir(extracted_data): + os.remove(os.path.join(extracted_data, f)) + #os.mkdir(extracted_data) + combined_data = os.path.join(extracted_data,filetimestamp+'.csv') + df_csv_append.to_csv(combined_data) + return combined_data + except Exception as e: + if os.path.exists(extracted_data): + shutil.rmtree(extracted_data) + #print (e) + return '' + + + +def tarFileExtraction(data,DATA_FILE_PATH): + try: + import tarfile + filetimestamp = str(int(time.time())) + extracted_data = os.path.join(DATA_FILE_PATH, 'extracted_' + filetimestamp) + os.mkdir(extracted_data) + if data.endswith('tar'): + file = tarfile.open(data) + file.extractall(extracted_data) + file.close() + + for f in os.listdir(extracted_data): + if f.endswith('csv') or f.endswith('tsv'): + dataFile = os.path.join(extracted_data,f) + return dataFile + except Exception as e: + if os.path.exists(extracted_data): + shutil.rmtree(extracted_data) + print (e) + return '' +# ------ changes for the bug 10379 starts---------------- By Usnish ------ +def checkRamAfterLoading(dataPath): + import psutil + availableRam = psutil.virtual_memory()[1]/1e9 + filesize = os.path.getsize(dataPath)/1e9 + return availableRam < 2*filesize +def checkRamBeforeLoading(dataPath): + import psutil + filesize = os.path.getsize(dataPath)/1e9 + totalRam = psutil.virtual_memory()[0] / 1e9 + if( filesize > 0.8 * totalRam): + return ""File size is larger than the 80% of Total RAM."" + return """" +# ------ changes for the bug 10379 ends---------------- By Usnish ------ + + +# ---------- 10012:Decision Threshold related Changes S T A R T ---------- +# This method is used to check If -> +# 80% of available RAM size is greater than ingested data (or not). +def checkRAMThreshold(dataPath): + import psutil + availableRam = psutil.virtual_memory()[1]/1e9 + filesize = os.path.getsize(dataPath)/1e9 + return (0.8 * availableRam) > filesize +# ---------------------- E N D ---------------------- + + +# Text Data Labelling using LLM related changes +# -------------------------------------------------------- +def ingestTextData(request, DATA_FILE_PATH): + log = logging.getLogger('log_ux') + try: + Datapath = request.FILES['DataFilePath'] + from appbe.eda import ux_eda + + ext = str(Datapath).split('.')[-1] + request.session['uploadfiletype'] = 'Local' + request.session['datatype'] = 'Normal' + filetimestamp = str(int(time.time())) + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) + else: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + dataPath = dataFile + + request.session['textdatapath'] = dataPath + + # import pdb + # pdb.set_trace() + + # check_df = pd.read_csv(dataPath) + eda_obj = ux_eda(dataPath) + check_df = eda_obj.getdata() + + df_top = check_df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + + # featuresList = check_df.columns.tolist() + features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() + + noTextFeature = False + if len(textFeature) == 0: + noTextFeature = True + + context = {'raw_data':df_json, 'featuresList':textFeature, 'selected':'DataOperations', 'noTextFeature':noTextFeature} + + return context + + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + context = {'error': 'Failed to read data','emptycsv' : 'emptycsv'} + log.info('Text Data Ingestion -- Error : Failed to read data, '+str(e)) + log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return context +# ---------------------- E N D --------------------------- + + + +def ingestDataFromFile(request,DATA_FILE_PATH): + log = logging.getLogger('log_ux') + delimiter,textqualifier = delimitedsetting(request.POST.get('delimiters'),request.POST.get('qualifier'),request.POST.get('delimiters_custom_value')) + request.session['delimiter'] = delimiter + request.session['textqualifier'] = textqualifier + context = getcommonfields() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + context.update({'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,}) + try: + t1 = time.time() + request.session['uploadfiletype'] = '' + request.session['uploadLocation'] = '' + data_is_large = False + check_df = pd.DataFrame() + if request.method == 'POST': + if 'ModelVersion' in request.session: + ModelVersion = request.session['ModelVersion'] + else: + ModelVersion = 0 + if 'ModelName' not in request.session: + movenext = False + request.session['currentstate'] = 0 + context.update({'tab': 'tabconfigure', 'error': 'Please Create/Select the Use Case First', 'movenext': movenext,'currentstate': request.session['currentstate']}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please Create/Select the Use Case First') + return context + else: + type = request.POST.get(""optradio"") + if type == ""s3Bucket"": + try: + request.session['uploadfiletype'] = 'S3Bucket' + bucketname = request.POST.get('s3bucketname') + fileName = request.POST.get('s3file') + if fileName != '': + status,msg,check_df = read_s3_bucket(bucketname,fileName,DATA_FILE_PATH) + if status == 'Success': + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile, index=False) + request.session['datalocation'] = dataFile + + else : + request.session['currentstate'] = 0 #usnish + context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : ' + str(msg)) + return context + + else: #usnish + request.session['currentstate'] = 0 + context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name') + return context + except Exception as e: + request.session['currentstate'] = 0 + context.update({'error': str(e),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+ str(e)) + return context + '''request.session['datalocation'] = ""S3""''' + + + # -------------------------------- Graviton-Integration Changes S T A R T -------------------------------- + elif type == ""graviton"": + try: + dataServiceId = request.POST.get('dataservice') + metadataId = request.POST.get('metadata') + data = [] + from appbe.aion_config import get_graviton_data + graviton_url,graviton_" +"userid = get_graviton_data() + gravitonURL = graviton_url + gravitonUserId = graviton_userid + + # url = 'https://xenius.azurewebsites.net/api/getdata?userid=1&dataserviceid='+str(dataserviceId) +'&metadataid=' +str(metadataId) + url = gravitonURL + 'getdata?userid=' + gravitonUserId +'&dataserviceid='+str(dataServiceId) +'&metadataid=' +str(metadataId) + print(url) + response = requests.get(url) + statuscode = response.status_code + if statuscode == 200: + json_dictionary = json.loads(response.content) + data = json_dictionary['result'] + + firstElement = next(iter(data[0].keys())) + check_df = pd.DataFrame.from_dict(data[0][firstElement]) + + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile, index=False) + + request.session['uploadfiletype'] = 'Graviton' + request.session['datalocation'] = str(dataFile) + except Exception as e: + print(e) + request.session['currentstate'] = 0 + context.update({'error':'Check log file for more details','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error :'+str(e)) + return context + # ------------------------------------------------ E N D ------------------------------------------------- + + elif type == ""azurestorage"": + try: + request.session['uploadfiletype'] = 'AzureStorage' + azurename = request.POST.get('azurename') + directoryname = request.POST.get('azuredirectory') + if directoryname != '': + + status,msg,check_df = read_azureStorage(azurename,directoryname,DATA_FILE_PATH) + if status == 'Success': + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile, index=False) + '''request.session['datalocation'] = ""S3""''' + request.session['datalocation'] = dataFile + else : + request.session['currentstate'] = 0 #usnish + context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' +str(msg)) + + return context + + else: #usnish + request.session['currentstate'] = 0 + context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name') + return context + except Exception as e: + print(e) + request.session['currentstate'] = 0 + context.update({'error': 'File does not exist','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, '+str(e)) + return context + elif type == ""googleBucket"": + try: + request.session['uploadfiletype'] = 'GCPBucket' + bucketname = request.POST.get('gcpbucketname') + fileName = request.POST.get('file1') + if fileName != '': + + status,msg,check_df = read_gcs_bucket(bucketname,fileName,DATA_FILE_PATH) + if status == 'Success': + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile, index=False) + '''request.session['datalocation'] = ""S3""''' + request.session['datalocation'] = dataFile + else : + request.session['currentstate'] = 0 #usnish + context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+str(msg)) + return context + else: #usnish + request.session['currentstate'] = 0 + context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name') + return context + except Exception as e: + request.session['currentstate'] = 0 + context.update({'error': 'File does not exist','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, ' + str(e)) + return context + elif type == ""url"": + try: + request.session['uploadfiletype'] = 'URL' + url_text = request.POST.get('urlpathinput') + log.info('Data ingesttion from URL..') + request.session['uploadLocation'] = url_text + url = url_text + check_df = pd.read_csv(url) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile,index=False) + request.session['datalocation'] = dataFile + except Exception as e: + request.session['currentstate'] = 0 + e = str(e) + print(e) + if e.find(""tokenizing"")!=-1: + error = ""This is not an open source URL to access data"" + context.update({'error': error, 'ModelVersion': ModelVersion, 'emptycsv': 'emptycsv'}) + elif e.find(""connection"")!=-1: + error = ""Can not access the URL through HCL network, please try with other network"" + context.update({'error': error, 'ModelVersion': ModelVersion, 'emptycsv': 'emptycsv'}) + else: + error = 'Please provide a correct URL' + context.update({'error': error,'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+error + ', '+str(e)) + return context + elif type == ""nifi"": + try: + request.session['uploadfiletype'] = 'Nifi' + log.info('Data ingesttion from Nifi..') + url_text = request.POST.get('nifiurlpathinput') + request.session['uploadLocation'] = url_text + response = requests.get(url_text) + csv_str = response.content.decode('utf-8') + check_df = pd.read_csv(StringIO(csv_str)) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile,index=False) + request.session['datalocation'] = dataFile + except requests.exceptions.ConnectionError: + request.session['currentstate'] = 0 + context.update({'error': 'Connection Error','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error:Connection Error') + return context + except Exception as e: + print(e) + request.session['currentstate'] = 0 + e = str(e) + context.update({'error': e,'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+str(e)) + return context + + elif type == ""tblaiondata"": + try: + dataset = request.POST.get('datasetname') + print('dataset',dataset) + from appbe.dataPath import DATA_DIR + from appbe.sqliteUtility import sqlite_db + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + temp_data = sqlite_obj.read_data('dataingest') + + dataFile = '' + for x in temp_data: + if x[1] == dataset: + dataFile = x[0] + check_df = pd.read_csv(dataFile) + request.session['datalocation'] = dataFile + except Exception as e: + request.session['currentstate'] = 0 + context.update({'error': 'Failed to read data','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, ' + str(e)) + return context + + else: + if request.FILES: + Datapath = request.FILES['DataFilePath'] + if Datapath.size > 31457280: + context.update({'tab': 'tabconfigure','error': 'Upload limit is 30 MB only, use local file option for larger file','currentstate': request.session['currentstate'], 'ModelVersion': ModelVersion}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : Upload limit is 30 MB only, use local file option for larger file') + return context + ext = str(Datapath).split('.')[-1] + request.session['uploadfiletype'] = 'Local' + request.session['datatype'] = 'Normal' + filetimestamp = str(int(time.time())) + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) + else: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + dataPath = dataFile + else: + dataPath = request.POST.get('localfilePath') + #print(os.path.getsize(dataPath)) + + # 10012:Decision Threshold related Changes - S T A R T + #removed few lines related to the check to not allow data to be ingested + # E N D + + + if request.POST.get('optfiletype') == 'avro': + try: + import pandavro as pdx + if os.path.isdir(dataPath): + for f in os.listdir(dataPath): + if f.endswith('avro'): + processed_df = pdx.read_avro(f) + if not df.empty: + df = df.append(processed_df, ignore_index=True) + else: + df = pd.DataFrame(processed_df) + elif os.path.isfile(dataPath): + import pandavro as pdx + df = pdx.read_avro(dataPath) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + df.to_csv(dataFile, index=False) + request.session['datalocation'] = str(dataFile) + except Exception as e: + print(e) + + elif request.POST.get('optfiletype') == 'parquet': + if os.path.isdir(dataPath): + for f in os.listdir(dataPath): + if f.endswith('parquet'): + processed_df = pd.read_parquet(f, engine='pyarrow') + if not df.empty: + df = df.append(processed_df, ignore_index=True) + else: + df = pd.DataFrame(processed_df) + elif os.path.isfile(dataPath): + df = pd.read_parquet(dataPath, engine='pyarrow') + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + df.to_csv(dataFile, index=False) + request.session['datalocation'] = str(dataFile) + elif request.POST.get('optfiletype') == 'dilimeted': + + if os.path.isdir(dataPath): + for f in os.listdir(dataPath): + if f.endswith('csv') or f.endswith('tsv'): + processed_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') + if not df.empty: + df = df.append(processed_df, ignore_index=True) + else: + df = pd.DataFrame(processed_df) + filetimestamp = str(int(time." +"time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + df.to_csv(dataFile, index=False,sep=delimiter,quotechar=textqualifier) + request.session['datalocation'] = str(dataFile) + else: + dataFile = dataPath + request.session['uploadfiletype'] = 'Local' + request.session['datatype'] = 'Normal' + + FileReadingstatus = True + request.session['currentstate'] = 0 + + if dataPath.endswith('tar'): + dataFile = tarFileExtraction(dataPath,DATA_FILE_PATH) + + if dataPath.endswith('zip'): + dataFile = multipleZipExtraction(dataPath,DATA_FILE_PATH) + + if dataFile == '': + FileReadingstatus = False + msg = 'Please provide a file name' + elif dataFile.endswith("".xls"") or dataFile.endswith("".xlsx""): + FileReadingstatus = False + msg = 'Please provide a dilimited file' + elif not os.path.isfile(dataFile): + FileReadingstatus = False + msg = 'File does not exist' + else: + check_df = pd.DataFrame(); + try: + try: + + cvobj = csv_validator() + valid_header, validrows, rownumbers = cvobj.validate_header(dataFile,delimiter,textqualifier) + request.session['datalocation'] = str(dataFile) + if not validrows: + FileReadingstatus = False + msg = 'Data Format issue' + else: + if valid_header: + check_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,nrows=100,encoding_errors= 'replace') + request.session['datalocation'] = str(dataFile) + else: + check_df = pd.read_csv(dataFile, header=None, encoding='utf8', prefix='X',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + check_df.to_csv(dataFile, index=False) + request.session['datalocation'] = str(dataFile) + except Exception as e: + print(e) + check_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,nrows=100) + request.session['datalocation'] = str(dataFile) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+str(e)) + except UnicodeDecodeError: + FileReadingstatus = False + msg = 'Only utf8 file encoding supported' + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error:'+msg) + except pd.errors.EmptyDataError: + FileReadingstatus = False + msg = 'File is empty' + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:'+msg) + except pd.errors.ParserError: + FileReadingstatus = False + msg = 'File Parsng Error' + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+msg) + except FileNotFoundError: + FileReadingstatus = False + msg = 'File does not exist' + request.session['currentstate'] = 0 + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+msg) + except Exception as e: + msg = 'File Read Error' + FileReadingstatus = False + print(e) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + msg+', '+str(e)) + + if check_df.empty and FileReadingstatus: + FileReadingstatus = False + msg = 'Date file is empty' + + if not FileReadingstatus: + context.update({'tab': 'tabconfigure','error': msg,'currentstate': request.session['currentstate'], 'ModelVersion': ModelVersion}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+msg) + return context + + + # -------------------------------- 10012:Decision Threshold related Changes S T A R T ------------------------------- + data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation']) + msg = """" + if data_is_under_RAM_threshold == False: + msg = ""AION will not be able to train on data set provided as it is bigger than available RAM, Please choose distributed learning for further processing."" + # ------------------------------------------------------ E N D ------------------------------------------------------ + + + check_df.rename(columns=lambda x: x.strip(), inplace=True) + featuresList = check_df.columns.tolist() + numberoffeatures = len(featuresList) + imp_features = getimpfeatures(dataFile,numberoffeatures,delimiter,textqualifier) + samplePercentage = 100 + samplePercentval = 0 + showRecommended = False + sample_size = int(eda_setting()) + + dflength = len(check_df) + if dflength > sample_size: + samplePercentage = round(float((sample_size/dflength) * 100),2) + samplePercentval = samplePercentage / 100 + showRecommended = True + df_top = check_df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + + records = check_df.shape[0] + request.session['NoOfRecords'] = records + statusmsg = 'Data File Uploaded Successfully' + t2 = time.time() + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str( + round(t2 - t1)) + ' sec' + ' : ' + 'Success') + # EDA Subsampling changes + context.update({'range':range(1,101),'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList,'tab': 'tabconfigure', 'data': df_json, 'status_msg': statusmsg, + 'selected': 'modeltraning','imp_features':imp_features,'numberoffeatures':numberoffeatures, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'exploratory': False}) + if msg!="""": + context.update({'data_size_alert': msg}) + return context + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + request.session['currentstate'] = 0 + context.update({'error': 'Failed to read data','emptycsv' : 'emptycsv'}) + log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : Failed to read data, '+str(e)) + log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return context + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +import json +import platform +import subprocess +def kafka_setting(): + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')) + f = open(file_path, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + from os.path import expanduser + home = expanduser(""~"") + if platform.system() == 'Windows': + DEPLOY_LOCATION = os.path.join(home,'AppData','Local','HCLT','AION','target','kafka') + else: + DEPLOY_LOCATION = os.path.join(home,'HCLT','AION','target','kafka') + configSettingsJson['kafkalocation'] = DEPLOY_LOCATION + return(configSettingsJson) + +def start_tracking(): + from appbe.dataPath import DEPLOY_LOCATION + import platform + mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','..','Scripts','mlflow.exe')) + script_path = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','..','Scripts')) + #Updating path for system environment; Bug-13835 + os.environ['PATH']= os.environ['PATH']+ ';'+ str(script_path) + DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns') + if platform.system() == 'Windows': + subprocess.Popen([sys.executable, mlflowpath,""ui"", ""--backend-store-uri"",""file:///""+DEPLOY_LOCATION]) + else: + subprocess.Popen(['mlflow',""ui"",""-h"",""0.0.0.0"",""--backend-store-uri"",""file:///""+DEPLOY_LOCATION]) + +def aion_tracking(): + status = 'Success' + import requests + try: + response = requests.get('http://localhost:5000') + if response.status_code != 200: + status = 'Error' + except Exception as inst: + print(inst) + status = 'Error' + return status + +def aion_service(): + try: + if platform.system() == 'Windows': + nooftasks = getrunningstatus('AION_Service') + else: + nooftasks = getrunningstatus('run_service') + if len(nooftasks): + status = 'Running' + else: + if platform.system() == 'Windows': + servicepath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','sbin','AION_Service.bat')) + os.system('start cmd /c ""'+servicepath+'""') + else: + servicepath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','bin','run_service.py')) + subprocess.Popen([sys.executable,servicepath]) + status = 'Started' + except Exception as inst: + print(inst) + status = 'Error' + return status + + +def getrunningstatus(name): + try: + taskdetails = [] + if platform.system() == 'Windows': + r = ([line.split() for line in subprocess.check_output('tasklist /v /FI ""IMAGENAME eq conhost.exe""').decode('UTF-8').splitlines()]) + r.append([line.split() for line in subprocess.check_output('tasklist /v /FI ""IMAGENAME eq cmd.exe""').decode('UTF-8').splitlines()]) + else: + r = ([line.split() for line in subprocess.check_output(""ps -ef | grep .py"",shell=True).decode('UTF-8').splitlines()]) + for i in range(len(r)): + s = r[i] + if any(name in j for j in s): + taskdetails.append('Yes') + break + return (taskdetails) + except Exception as inst: + print(inst) + status = 'Error' + return status +def getTasks(mlflow,consumer,service): + mlflowlist = [] + consumerlist=[] + servicelist = [] + #r = os.popen('tasklist /v').read().strip().split('\\n') + try: + if platform.system() == 'Windows': + r = ([line.split() for line in subprocess.check_output('tasklist /v /FI ""IMAGENAME eq conhost.exe""').decode('UTF-8').splitlines()]) + r.append([line.split() for line in subprocess.check_output('tasklist /v /FI ""IMAGENAME eq cmd.exe""').decode('UTF-8').splitlines()]) + else: + r = ([line.split() for line in subprocess.check_output(""ps -ef | grep .py"",shell=True).decode('UTF-8').splitlines()]) + except Exception as e: + print(e) + r = [] + + #print(r) + #print ('# of tasks is %s' % (len(r))) + for i in range(len(r)): + s = r[i] + if any(mlflow in j for j in s): + mlflowlist.append('Yes') + if any(consumer in j for j in s): + consumerlist.append('Yes') + if any(service in j for j in s): + servicelist.append('Yes') + return (mlflowlist,consumerlist,servicelist) + +def running_setting(): + otherApps = {} + if platform.system() == 'Windows': + mlflowlist,consumerlist,servicelist = getTasks('AION_MLFlow','AION_Consumer','AION_Service') + else: + mlflowlist,consumerlist,servicelist = getTasks('run_mlflow','AION_Consumer','run_service') + if len(mlflowlist): + otherApps['modeltracking'] = 'Running' + else: + " +"otherApps['modeltracking'] = 'Not Running' + + #nooftasks = getTasks('AION_Consumer') + if len(consumerlist): + otherApps['consumer'] = 'Running' + else: + otherApps['consumer'] = 'Not Running' + + #nooftasks = getTasks('AION_Service') + if len(servicelist): + otherApps['service'] = 'Running' + else: + otherApps['service'] = 'Not Running' + return(otherApps) + + +#EDA Performance change +# ---------------------------- +def eda_setting(): + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','eda.config') + sample_size='' + try: + if(os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"") + read = file.read() + file.close() + for line in read.splitlines(): + if 'sample_size=' in line: + sample_size = line.split('=',1)[1] + except Exception as inst: + pass + return(sample_size) +def get_telemetryoptout(): + telemetryoptuout = ""No"" + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + try: + if sqlite_obj.table_exists('settings'): + data = sqlite_obj.read_data('settings') + for values in data: + telemetryoptuout = values[7] + + else: + telemetryoptuout = 'No' + except Exception as e: + print(e) + telemetryoptuout ='No' + return telemetryoptuout +def get_edafeatures(): + No_of_Permissible_Features_EDA = """" + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + try: + if sqlite_obj.table_exists('settings'): + data = sqlite_obj.read_data('settings') + for values in data: + No_of_Permissible_Features_EDA = values[3] + + else: + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') + if (os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"") + read = file.read() + file.close() + for line in read.splitlines(): + if 'No_of_Permissible_Features_EDA=' in line: + No_of_Permissible_Features_EDA = line.split('=', 1)[1] + except Exception as e: + print(e) + No_of_Permissible_Features_EDA =20 + return No_of_Permissible_Features_EDA + +def get_graviton_data(): + graviton_url = """" + graviton_userid = """" + + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + try: + if sqlite_obj.table_exists('settings'): + data = sqlite_obj.read_data('settings') + for values in data: + graviton_url = values[0] + graviton_userid = values[1] + + else: + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') + if (os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"") + read = file.read() + file.close() + for line in read.splitlines(): + if 'graviton_url=' in line: + graviton_url = line.split('=', 1)[1] + if 'graviton_userid=' in line: + graviton_userid = line.split('=', 1)[1] + + except Exception as e: + print(e) + graviton_url = """" + graviton_userid = """" + return graviton_url,graviton_userid + + +def get_llm_data(): + apiKeyIdLLM = """" + apiUrlLLM = """" + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + try: + if sqlite_obj.table_exists('openai'): + data = sqlite_obj.read_data('openai')[0] + param_keys = ['api_type','api_key','api_base','api_version'] + openai_data = dict((x,y) for x,y in zip(param_keys,data)) + return openai_data['api_key'],openai_data['api_base'],openai_data['api_type'],openai_data['api_version'] + except Exception as e: + print(e) + apiKeyIdLLM = """" + apiUrlLLM = """" + return apiKeyIdLLM,apiUrlLLM,"""","""" + + + +def settings(): + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','aion.config') + usecase='disable' + graviton_url = '' + graviton_userid = '' + apiKeyIdLLM = '' + apiUrlLLM = '' + No_of_Permissible_Features_EDA = '' + try: + from appbe.sqliteUtility import sqlite_db + import pandas as pd + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('settings'): + column_names = sqlite_obj.column_names('settings') + data = sqlite_obj.read_data('settings') + if 'telemetryOptOut' not in column_names: + query = 'Alter Table settings ADD telemetryOptOut TEXT' + sqlite_obj.execute_query(query) + + if 'No_of_Permissible_Features_EDA' not in column_names or 'apiKeyIdLLM' not in column_names: + sqlite_obj.drop_table('settings') + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') + file = open(configfilepath, ""r"") + dataread = file.read() + for line in dataread.splitlines(): + if 'usecase=' in line: + cusecase = line.split('=', 1)[1] + if 'graviton_url=' in line: + cgraviton_url = line.split('=', 1)[1] + if 'graviton_userid=' in line: + cgraviton_userid = line.split('=', 1)[1] + if 'No_of_Permissible_Features_EDA=' in line: + cNo_of_Permissible_Features_EDA = line.split('=', 1)[1] + if 'apiKeyIdLLM=' in line: + capiKeyIdLLM = '' + if 'apiUrlLLM=' in line: + capiUrlLLM = '' + file.close() + if 'apiKeyIdLLM' not in column_names: + apiKeyIdLLM = capiKeyIdLLM + if 'apiUrlLLM' not in column_names: + apiUrlLLM = capiUrlLLM + if 'No_of_Permissible_Features_EDA' not in column_names: + No_of_Permissible_Features_EDA = cNo_of_Permissible_Features_EDA + newdata = {} + newdata.update({'graviton_url':[data[0][0]],'graviton_userid': [data[0][1]],'usecase': [data[0][2]],'No_of_Permissible_Features_EDA':[No_of_Permissible_Features_EDA],'settingsid':['1'],'apiKeyIdLLM' :apiKeyIdLLM,'apiUrlLLM':apiUrlLLM,'telemetryOptOut':telemetryOptOut}) + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'settings') + data = sqlite_obj.read_data('settings') + for values in data: + graviton_url = values[0] + graviton_userid = values[1] + usecase = values[2] + No_of_Permissible_Features_EDA = values[3] + telemetryOptOut = values[7] + else: + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') + if (os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"") + read = file.read() + file.close() + apiKeyIdLLM = '' + apiUrlLLM = '' + for line in read.splitlines(): + if 'usecase=' in line: + usecase = line.split('=', 1)[1] + if 'graviton_url=' in line: + graviton_url = line.split('=', 1)[1] + if 'graviton_userid=' in line: + graviton_userid = line.split('=', 1)[1] + if 'No_of_Permissible_Features_EDA=' in line: + No_of_Permissible_Features_EDA = line.split('=', 1)[1] + newdata = {} + newdata.update({'graviton_url':[graviton_url],'graviton_userid': [graviton_userid],'usecase': [usecase],'No_of_Permissible_Features_EDA':[No_of_Permissible_Features_EDA],'settingsid':['1'],'apiKeyIdLLM' :'','apiUrlLLM':'','telemetryOptOut':['No']}) + # --------else create table and update the data, write data will create a table if it does nt exists----- + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'settings') + return(usecase) + except Exception as e: + print(e) + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','aion.config') + try: + if(os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"") + read = file.read() + file.close() + for line in read.splitlines(): + if 'usecase=' in line: + usecase = line.split('=',1)[1] + if 'graviton_url=' in line: + graviton_url = line.split('=',1)[1] + if 'graviton_userid=' in line: + graviton_userid = line.split('=',1)[1] + if 'No_of_Permissible_Features_EDA=' in line: + No_of_Permissible_Features_EDA = line.split('=', 1)[1] + if 'apiKeyIdLLM=' in line: + apiKeyIdLLM = line.split('=', 1)[1] + if 'apiUrlLLM=' in line: + apiUrlLLM = line.split('=', 1)[1] + except Exception as inst: + pass + external_system = 'enable' + semantico = 'enable' + return(usecase) + + +def addKafkaModel(request,datalocation): + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')) + f = open(file_path, ""r+"") + configSettings = f.read() + configSettingsJson = json.loads(configSettings) + modelSignature = request.POST.get('modelsignature') + timeframe = request.POST.get('timeframe') + command = request.POST.get('kafkasubmit') + if command.lower() == 'configure': + configSettingsJson['timeFrame'][modelSignature] = str(timeframe) + configSettingsJson['trainingDataLocation'][modelSignature] = datalocation + elif command.lower() == 'unconfigure': + del configSettingsJson['timeFrame'][modelSignature] + updatedConfigSettingsJson = json.dumps(configSettingsJson) + f.seek(0) + f.write(updatedConfigSettingsJson) + f.truncate() + f.close() + +def saveopenaisettings(request): + try: + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + if sqlite_obj.table_exists('openai'): + updated_data = 'api_type=""'+request.POST.get('api_type')+'"",api_key=""'+request.POST.get('apiKeyIdLLM')+'"",api_base=""'+request.POST.get('apiUrlLLM')+'"",api_version=""'+request.POST.get('api_version')+'""' + sqlite_obj.update_data(updated_data,'','openai') + else: + newdata = {} + newdata.update({'api_type':['azure'],'api_key': [request.POST.get('apiKeyIdLLM')],'api_base': [request.POST.get('apiUrlLLM')],'api_version':[request.POST.get('api_version')]}) + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'openai') + except Exception as e: + print(e) + +def savegravitonconfig(request): + try: + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + updated_data = 'graviton_url=""'+request.POST.get('graviton_url')+'"",graviton_userid=""'+request.POST.get('graviton_userid')+'""' + sqlite_obj.update_data(updated_data,'settingsid=1','settings') + except Exception as e: + print(e) +def saveconfigfile(request): + try: + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + import pandas as pd + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'config.db') + updated_data = 'usecase=""'+request.POST" +".get('usecasetab')+'"",No_of_Permissible_Features_EDA=""'+request.POST.get('edefeatures')+'"",telemetryOptOut=""'+request.POST.get('telemetryOptOut')+'""' + print(updated_data) + sqlite_obj.update_data(updated_data,'settingsid=1','settings') + return request.POST.get('usecasetab') + except Exception as e: + print(e) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import plotly.figure_factory as ff +from plotly.subplots import make_subplots +import plotly.graph_objects as go +from wordcloud import WordCloud, STOPWORDS +import pandas as pd +import numpy as np +from appbe import distribution +import io +import urllib +import os +import sys +import base64 +from appbe import help_Text as ht +import math +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from natsort import natsorted +from sklearn.cluster import KMeans +import json +from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator + +from appbe.aion_config import eda_setting +from dython.nominal import associations +def calculateNumberofCluster(featureData): + Sum_of_squared_distances = [] + K = range(1, 15) + for k in K: + km = KMeans(n_clusters=k) + km = km.fit(featureData) + Sum_of_squared_distances.append(km.inertia_) + x1, y1 = 1, Sum_of_squared_distances[0] + x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances) - 1] + distances = [] + for inertia in range(len(Sum_of_squared_distances)): + x0 = inertia + 2 + y0 = Sum_of_squared_distances[inertia] + numerator = abs((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1) + denominator = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2) + distances.append(numerator / denominator) + n_clusters = distances.index(max(distances)) + 2 + #print(n_clusters) + return (n_clusters) + +def get_eda(request): + hopkins_val = '' + hopkins_tip = '' + if request.session['datatype'] == 'Normal': + from appbe.eda import ux_eda + + # EDA Subsampling changes + # ---------------------------- + edasampleSize = request.POST.get('SubsampleSize') + edasampleSize = str(int(edasampleSize)/100) + + sampleFile = str(request.session['datalocation']) + repText = sampleFile[sampleFile.find('sub_'):sampleFile.find('_sampled_') + 9] + if len(repText) == 30: + dataLocation = sampleFile.replace(repText,"""") + else: + dataLocation = sampleFile + + eda_obj = ux_eda(dataLocation,request.session['delimiter'],request.session['textqualifier']) + + df0 = eda_obj.getdata() + + if os.path.isfile(dataLocation): + if(len(edasampleSize) > 0): + df0 = df0.sample(frac = float(edasampleSize)) + + #EDA Performance change + # ---------------------------- + dflength = len(df0) + # sample_size = int(eda_setting()) + # if dflength >= sample_size: + # eda_obj.subsampleData(sample_size) + # else: + eda_obj.subsampleData(dflength) + # ---------------------------- + + TrainSampleSelected = request.POST.get('TrainSampleSize') + if(TrainSampleSelected == 'EDASize'): + from pathlib import Path + filePath = Path(dataLocation) + + import datetime + timestamp = datetime.datetime.now().replace(microsecond=0).isoformat() + timestamp = str(timestamp.replace("":"","""")) + + sub_sampledFile = filePath.parent/(""sub_"" + timestamp + ""_sampled_""+filePath.name) + # sub_sampledFile = filePath.parent/(usename + ""_sub_sampled_""+filePath.name) + + df0.to_csv(sub_sampledFile,index=False,) + request.session['datalocation'] = str(sub_sampledFile) + records = df0.shape[0] + request.session['NoOfRecords'] = records + + edaFeatures = request.POST.getlist('InputFeatures') + request.session['edaFeatures'] = edaFeatures + + if(len(edaFeatures) > 0): + eda_obj.subsetFeatures(edaFeatures) + # ---------------------------- + + features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() + + request.session['edanumericCatFeatures'] = numericCatFeatures + request.session['edatextFeature'] = textFeature + + categoricalfeatures = catfeatures + numericfeaturecount = eda_obj.getNumericFeatureCount() + cluster_details = [] + dataCharts = [] + # correlated_features=[] + pca_details = [] + + if numericfeaturecount > 1: + try: + + cluster_details,hopkins_val = eda_obj.getClusterDetails() + if hopkins_val!='': + if float(hopkins_val) <0.3: + hopkins_tip = ht.hopkins_tip[0] + elif float(hopkins_val)>0.7: + hopkins_tip = ht.hopkins_tip[2] + else: + hopkins_tip = ht.hopkins_tip[1] + else: + hopkins_tip = '' + except Exception as e: + print(""========================""+str(e)) + pass + + try: + pca_map = eda_obj.getPCATop10Features() + pca_details = pca_map + yaxis_data = pca_map.tolist() + xaxis_data = pca_map.index.values.tolist() + import plotly.graph_objects as go + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) + cfig.update_layout(barmode='stack', xaxis_title='Features',yaxis_title='Explained Variance Ratio') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) + dataCharts.append(bargraph) + except: + pass + df = eda_obj.getdata() + + # try: + # top5highcorr = eda_obj.getHighlyCorrelatedFeatures(5) + # correlated_features = getHighlyCorrelatedFeatureCharts(df,top5highcorr) + # except: + # pass + + else: + df = eda_obj.getdata() + + # # EDA Subsampling changes + # # ---------------------------- + # if os.path.isfile(dataLocation): + # if dflength < 10000: + # if(len(edasampleSize) > 0): + # df = df.sample(frac = float(edasampleSize)) + # ---------------------------- + + if len(textFeature) > 0: + commonfeatures = eda_obj.getTopTextFeatures(10) + # comment_words = eda_obj.word_token() + del eda_obj + + wordcloudpic = '' + showtextFeature = False + if len(textFeature) > 0: + showtextFeature = True + # try: + # stopwords = set(STOPWORDS) + # wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, + # min_font_size=10).generate(comment_words) + # try: + # plt.clf() + # except: + # pass + # plt.imshow(wordcloud, interpolation='bilinear') + # plt.axis(""off"") + # plt.tight_layout(pad=0) + # image = io.BytesIO() + # plt.savefig(image, format='png') + # image.seek(0) + # string = base64.b64encode(image.read()) + # wordcloudpic = 'data:image/png;base64,' + urllib.parse.quote(string) + # except: + # pass + + xaxis_data = commonfeatures['most_common_words'].tolist() + yaxis_data = commonfeatures['freq'].tolist() + import plotly.graph_objects as go + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) + cfig.update_layout(barmode='stack', xaxis_title='Features',yaxis_title='Count') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) + dataCharts.append(bargraph) + + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + + df_json = json.loads(df_json) + # if len(df) > 10000: + # df1 = df.sample(n=10000, random_state=1) + # else: + # df1 = df + df1 = df + + data_deep_json = df_top.to_json(orient='records') #df1.to_json(orient='records') + + try: + gfsg = GenericFeatureStatisticsGenerator() + proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': df1}]) + protostr = base64.b64encode(proto.SerializeToString()).decode(""utf-8"") + except Exception as e: + protostr='' + print('protostr '+str(e)) + try: + correlationgraph = getCorrelationMatrix(df) + except Exception as e: + print(e) + try: + dataDrift = 'onRequest' #getDriftDistribution(numericCatFeatures, df[numericCatFeatures]) + + except Exception as e: + dataDrift = '' + print(e) + + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + statusmsg = 'Successfully Done' + + DF_list = list() + + des1 = df.describe(include='all').T + des1['missing count %'] = df.isnull().mean() * 100 + des1['zero count %'] = df.isin([0]).mean() * 100 + data = list(df.columns.values) + des1.insert(0, 'Features', data) + + des1 = des1.to_json(orient=""records"") + pca_df=pd.DataFrame() + #print(pca_details) + + # if pca_details.empty: + if len(pca_details) > 0: + pca_df = pd.DataFrame({'Feature':pca_details.index, 'Explained Variance Ratio':pca_details.values}).round(4) + + pca_df = pca_df.to_json(orient=""records"") + if len(df.columns) > 25: + df3 = df[df.columns[0:24]] + else: + df3 = df.copy() + #cor_mat = abs(df3.corr()) + #cor_mat = cor_mat.round(2) + try: + if len(df3.columns) > 25: + df3 = df3[df3.columns[0:24]] + cor_mat= associations(df3,compute_only=True) + cor_mat=cor_mat['corr'] + #cor_mat = df3.corr() + cor_mat = cor_mat.astype(float).round(2) + except Exception as e: + print(""creating correlation mat issue: \\n"",e) + pass + + data = list(cor_mat.index) + cor_mat.insert(0, 'Features', data) + cor_mat = cor_mat.to_json(orient=""records"") + cluster_df = pd.DataFrame.from_dict(cluster_details) + cluster_df = cluster_df.to_json(orient=""records"") + #textFeature = json.dumps(textFeature) + # 2.2 patch changes + #------------------------------------------------- + request.session['edaRecords'] = df.shape[0] + print(textFeature) + context = {'data_deep_json': data_deep_json, 'sampleFile':sampleFile,'protostr': protostr, 'data': df_json, 'oneda': True, + 'dataCharts': dataCharts,'dataDrift': dataDrift, 'drift_tip': ht.drift_tip,'des1':des1,'cluster_df':cluster_df,'hopkins_val':hopkins_val, + 'pca_df':pca_df,'cor_mat':cor_mat,'correlationgraph': correlationgraph, 'centroids':cluster_details, 'wordcloudpic': wordcloudpic, 'showtextFeature': showtextFeature, 'textFeature': textFeature, + # 'featurepairgraph': correlated_features, + 'data_overview_tip': ht.data_overview_tip,'timeseries_analysis_tip':ht.timeseries_analysis_tip, 'feature_importance_tip': ht.feature_importance_tip,'hopkins_tip':hopkins_tip, + 'correlation_analysis_tip': ht.correlation_analysis_tip, + 'exploratory_analysis_tip': ht.exploratory_analysis_tip, 'data_deep_drive_tip': ht.data_deep_drive_tip,'status_msg': statusmsg,'selected_use_case': selected_use_case, + 'pair_graph_tip':ht.pair_graph_tip, 'fair_metrics_tip':ht.fair_metrics_tip, 'categoricalfeatures':categoricalfeatures, 'numericCatFeatures':numericCatFeatures, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':True,'NumericFeatureList':numericFeature,'dateFeature':dateFeature,'targetFeature':targetFeature} + return(context) + + + + + +# EDA Visualization changes +# ----------------------------" +" +def get_edaGraph(request): + if request.session['datatype'] == 'Normal': + from appbe.eda import ux_eda + + df_temp = dict(request.GET).get('features[]') + graphType = request.GET.get('graphType') + d3_url = request.GET.get('d3_url') + mpld3_url = request.GET.get('mpld3_url') + + dataLocation = request.session['datalocation'] + eda_obj = ux_eda(dataLocation) + + + # 2.2 patch changes + #------------------------------------------------- + edaRecords = request.session['edaRecords'] + #df = df.sample(n=int(edaRecords), random_state=1) + eda_obj.subsampleData(edaRecords) + + + eda_obj.subsetFeatures(df_temp) + features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature, catfeatures = eda_obj.getFeatures() + numericfeaturecount = eda_obj.getNumericFeatureCount() + correlated_features=[] + + df = eda_obj.getdata() + if numericfeaturecount > 1: + try: + if graphType == 'Default': + top5highcorr = eda_obj.getHighlyCorrelatedFeatures(5) + correlated_features = getHighlyCorrelatedFeatureCharts(df,top5highcorr) + + else: + correlated_features = getFeatureCharts(df,graphType,d3_url,mpld3_url) + except: + pass + + return correlated_features +# ---------------------------- + + +# ---------------------- 12686:Data Distribution related Changes S T A R T ---------------------- +def get_DataDistribution(request): + selectedFeature = request.GET.get('selected_feature') + + _featureItem = [] + _featureItem.append(selectedFeature) + + from appbe.eda import ux_eda + dataLocation = request.session['datalocation'] + eda_obj = ux_eda(dataLocation) + df = eda_obj.getdata() + + numericCatFeatures = request.session['edanumericCatFeatures'] + textFeature = request.session['edatextFeature'] + # features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() + dataDrift = '' + + if selectedFeature in numericCatFeatures: + dataDrift = getDriftDistribution(_featureItem, df[numericCatFeatures]) + + elif selectedFeature in textFeature: + try: + comment_words = eda_obj.word_token_for_feature(selectedFeature, df[_featureItem]) + stopwords = set(STOPWORDS) + wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, + min_font_size=10).generate(comment_words) + try: + plt.clf() + except: + pass + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis(""off"") + plt.tight_layout(pad=0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + # wordcloudpic = 'data:image/png;base64,' + urllib.parse.quote(string) + dataDrift = urllib.parse.quote(string) + + except: + dataDrift = '' + + del eda_obj + return dataDrift +# -------------------------------------------- E N D -------------------------------------------- + + +def get_DeepDiveData(request): + if request.session['datatype'] == 'Normal': + from appbe.eda import ux_eda + dataLocation = request.session['datalocation'] + eda_obj = ux_eda(dataLocation) + + edaRecords = request.session['edaRecords'] + edaFeatures = request.session['edaFeatures'] + + eda_obj.subsampleData(edaRecords) + eda_obj.subsetFeatures(edaFeatures) + + df = eda_obj.getdata() + data_deep_json = df.to_json(orient='records') + + return (data_deep_json) + + + +# Fairness Metrics changes +# ---------------------------- +def get_fairmetrics(request): + import mpld3 + if request.session['datatype'] == 'Normal': + from appbe.eda import ux_eda + + df_temp = dict(request.GET).get('features[]') + + d3_url = request.GET.get('d3_url') + mpld3_url = request.GET.get('mpld3_url') + global metricvalue + metricvalue = request.GET.get('metricvalue') + + dataLocation = request.session['datalocation'] + # dataLocation = 'C:\\\\MyFolder\\\\AION\\\\AION Datasets\\\\AIF360\\\\database.csv' + + eda_obj = ux_eda(dataLocation, optimize=1) + features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() + + # data = eda_obj.getdata() + data = pd.read_csv(dataLocation, na_values=['Unknown', ' ']) + features_toEncode = features + + from sklearn.preprocessing import MinMaxScaler, LabelEncoder + data_encoded = data.copy() + categorical_names = {} + encoders = {} + + # Use Label Encoder for categorical columns (including target column) + for feature in features_toEncode: + le = LabelEncoder() + le.fit(data_encoded[feature]) + data_encoded[feature] = le.transform(data_encoded[feature]) + categorical_names[feature] = le.classes_ + encoders[feature] = le + + data_perp = data_encoded + + protected_feature = df_temp[0] #'Victim Race' + target_feature = df_temp[1] #'Perpetrator Sex' + + # ------Theil index----- Task->13843 + from aif360.sklearn.metrics import generalized_entropy_index + Ti_List = [] + for items in categorical_names[protected_feature]: + df = data[data[protected_feature]==items] + le = LabelEncoder() + le.fit(df[target_feature]) + df[target_feature] = le.transform(df[target_feature]) + tf = generalized_entropy_index(df[target_feature], alpha = 1) + tf = round(tf, 4) + Ti_List.append(tf) + global Thi_idx + Thi_idx = Ti_List + + #claas_size = categorical_names[protected_feature].size + new_list = [item for item in categorical_names[protected_feature] if not(pd.isnull(item)) == True] + claas_size = len(new_list) + + if claas_size > 10: + return 'HeavyFeature' + + metrics = fair_metrics(categorical_names, data_perp, protected_feature, target_feature, claas_size) + figure = plot_fair_metrics(metrics) + html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url) + + return html_graph + + +def fair_metrics(categorical_names, data_perp, protected_feature, target_feature, claas_size): + import aif360 + from aif360.datasets import StandardDataset + from aif360.metrics import BinaryLabelDatasetMetric + cols = [metricvalue] + obj_fairness = [[0]] + fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols) + + for indx in range(claas_size): + priv_group = categorical_names[protected_feature][indx] + privileged_class = np.where(categorical_names[protected_feature] == priv_group)[0] + + data_orig = StandardDataset(data_perp, + label_name=target_feature, + favorable_classes=[1], + protected_attribute_names=[protected_feature], + privileged_classes=[privileged_class]) + + dataset_pred = data_orig + attr = dataset_pred.protected_attribute_names[0] + idx = dataset_pred.protected_attribute_names.index(attr) + privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] + + unprivileged_size = dataset_pred.unprivileged_protected_attributes[0].size + unprivileged_groups = [] + for idx2 in range(unprivileged_size): + unprivileged_groups.extend([{attr:dataset_pred.unprivileged_protected_attributes[idx][idx2]}]) + + metric_pred = BinaryLabelDatasetMetric(dataset_pred, + unprivileged_groups=unprivileged_groups, + privileged_groups=privileged_groups) + if metricvalue == ""Theil Index"": + row = pd.DataFrame([Thi_idx[indx]], + columns = cols , + index = [priv_group]) + elif metricvalue == ""Disparate Impact"": + row = pd.DataFrame([[metric_pred.disparate_impact()]], + columns = cols , + index = [priv_group]) + elif metricvalue == ""Statistical Parity Difference"": + row = pd.DataFrame([[metric_pred.mean_difference()]], + columns = cols , + index = [priv_group]) + #fair_metrics = fair_metrics.append(row) + fair_metrics = pd.concat([fair_metrics,row]) + + return fair_metrics + + + +def plot_fair_metrics(fair_metrics): + import matplotlib.patches as patches + plt.style.use('default') + + import seaborn as sns + fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1) + + plt.subplots_adjust( + left = 0.125, + bottom = 0.1, + right = 0.9, + top = 0.9, + wspace = .5, + hspace = 1.1 + ) + + y_title_margin = 1.2 + + plt.suptitle(""Fairness metrics"", y = 1.09, fontsize=20) + sns.set(style=""dark"") + + cols = fair_metrics.columns.values + obj = fair_metrics.loc['objective'] + if metricvalue == ""Theil Index"": + size_rect = [0.5] + rect = [-0.1] + bottom = [-0.1] + top = [2] + bound = [[-0.1,0.1]] + elif metricvalue == ""Disparate Impact"": + size_rect = [0.4] + rect = [0.8] + bottom = [0] + top = [2] + bound = [[-0.1,0.1]] + elif metricvalue == ""Statistical Parity Difference"": + size_rect = [0.2] + rect = [-0.1] + bottom = [-1] + top = [1] + bound = [[-0.1,0.1]] + + #display(Markdown(""### Check bias metrics :"")) + #display(Markdown(""A model can be considered bias if just one of these five metrics show that this model is biased."")) + + for attr in fair_metrics.index[0:len(fair_metrics)].values: + #display(Markdown(""#### For the %s attribute :""%attr)) + check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)] + #display(Markdown(""With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics""%(5 - sum(check)))) + for i in range(0,1): + plt.subplot(1, 1, i+1) + xx = fair_metrics.index[1:len(fair_metrics)].values.tolist() + yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist() + + palette = sns.color_palette('husl', len(xx)) + ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx) + + index = 0 + for p in zip(ax.get_xticks(), yy): + if (p[1] > 2.0): + _color = palette.as_hex()[index] + _val = 'Outlier(' + str(round(p[1],3)) + ')' + ax.text(p[0]-0.5, 0.02, _val, color=_color) + else: + ax.text(p[0], p[1]+0.05, round(p[1],3), color='k') + index = index + 1 + + plt.ylim(bottom[i], top[i]) + plt.setp(ax.patches, linewidth=0) + ax.get_xaxis().set_visible(False) + + ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1) + ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor=""green"", linewidth=1, linestyle='solid')) + # plt.axhline(obj[i], color='black', alpha=0.3) + + plt.title(cols[i], fontname=""Times New Roman"", size=20,fontweight=""bold"") + ax.set_ylabel('') + ax.set_xlabel('') + + return fig +# ---------------------------- + + +def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()): + try: + import numpy as np + import pandas as pd + import seaborn as sns + import matplotlib.pyplot as plt + import scipy + from scipy import stats + from scipy.stats import norm + import matplotlib.gridspec as gridspec + import math + import io, base64, urllib + np.seterr(divide='ignore', invalid='ignore') + from appbe.eda import ux_eda + eda_obj = ux_eda() + try: + plt.clf() + except: + pass + plt.rcParams.update({'figure.max_open_warning': 0}) + sns.set(color_codes=True) + pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + if len(feature) > 4: + numneroffeatures = len(feature) + plt.figure(figsize=(10, numneroffeatures*2)) + else: + plt.figure(fig" +"size=(10,5)) + + for i in enumerate(feature): + + dataType = dataframe[i[1]].dtypes + if dataType not in pandasNumericDtypes: + dataframe[i[1]] = pd.Categorical(dataframe[i[1]]) + dataframe[i[1]] = dataframe[i[1]].cat.codes + dataframe[i[1]] = dataframe[i[1]].astype(int) + dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0]) + else: + dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean()) + + plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1) + plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1) + + distname, sse = eda_obj.DistributionFinder(dataframe[i[1]]) + try: + ax = sns.distplot(dataframe[i[1]], label=distname) + + ax.legend(loc='best') + if newdataframe.empty == False: + dataType = newdataframe[i[1]].dtypes + if dataType not in pandasNumericDtypes: + newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]]) + newdataframe[i[1]] = newdataframe[i[1]].cat.codes + newdataframe[i[1]] = newdataframe[i[1]].astype(int) + newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0]) + else: + newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean()) + distname, sse = distribution.DistributionFinder(newdataframe[i[1]]) + ax = sns.distplot(newdataframe[i[1]], label=distname) + ax.legend(loc='best') + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + pass + buf = io.BytesIO() + plt.savefig(buf, format='png') + buf.seek(0) + string = base64.b64encode(buf.read()) + uri = urllib.parse.quote(string) + return uri + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + +def getCategoryWordCloud(df): + labels = df.Label.unique() + df_output = pd.DataFrame() + tcolumns=['text'] + for label in labels: + df2 = df[df['Label'] == label] + df2 = df2.reset_index() + + wordcloud,df_text = getWordCloud(df2,tcolumns) + newrow = {'Label':label,'wordCloud':wordcloud} + df_output = df_output.append(newrow,ignore_index=True) + return(df_output) +def getHighlyCorrelatedFeatureCharts(df, df_top): + numOfRows = df.shape[0] + cratio = 0.01 + if (numOfRows < 1000): + cratio = 0.2 + elif (numOfRows < 10000): + cratio = 0.1 + elif (numOfRows < 100000): + cratio = 0.01 + barcolor = [""red"", ""green"", ""blue"", ""goldenrod"", ""magenta""] + ffig = make_subplots(rows=2, cols=3) + height = 800 + rowno = 1 + colno = 1 + featureCharts = [] + try: + for index, row in df_top.iterrows(): + feature1 = row['FEATURE_1'] + feature2 = row['FEATURE_2'] + df_temp = df[[feature1, feature2]] + feature1data = df_temp[feature1] + feature2data = df_temp[feature2] + nUnique = len(feature1data.unique().tolist()) + if nUnique / numOfRows >= cratio: + feature1type = 'Continous' + else: + feature1type = 'Category' + nUnique = len(feature2data.unique().tolist()) + if nUnique / numOfRows >= cratio: + feature2type = 'Continous' + else: + feature2type = 'Category' + + charttype = 0 + if feature1type == 'Continous' and feature2type == 'Continous': + df_temp[feature1] = pd.qcut(df_temp[feature1], q=8, duplicates='drop',precision=0) + df_temp[feature1] = df_temp[feature1].astype(str).str.strip('()[]') + feature1type = 'Category' + xaxis = feature1 + yaxis = feature2 + charttype = 1 + if feature1type == 'Category' and feature2type == 'Continous': + xaxis = feature1 + yaxis = feature2 + charttype = 1 + + + if feature1type == 'Continous' and feature2type == 'Category': + xaxis = feature1 #xaxis = feature2 + yaxis = feature2 #yaxis = feature1 + charttype = 1 + + if feature1type == 'Category' and feature2type == 'Category': + if (len(feature1data.unique().tolist()) < len(feature2data.unique().tolist())): + xaxis = feature1 #xaxis = feature2 + yaxis = feature2 #yaxis = feature1 + else: + xaxis = feature1 + yaxis = feature2 + + if (len(df_temp[xaxis].unique().tolist()) > 5): + df_temp[xaxis] = pd.qcut(df_temp[xaxis], q=5, duplicates='drop',precision=0) + df_temp[xaxis] = df_temp[xaxis].astype(str).str.strip('()[]') + + if (len(df_temp[yaxis].unique().tolist()) > 5): + df_temp[yaxis] = pd.qcut(df_temp[yaxis], q=3, duplicates='drop',precision=0) + df_temp[yaxis] = df_temp[yaxis].astype(str).str.strip('()[]') + charttype = 2 + # if feature1type == 'Category' and feature2type == 'Category': + if charttype == 2: + uniqueclasses = df_temp[yaxis].unique().tolist() + cfig = go.Figure() + i = 1 + for x in uniqueclasses: + df_temp3 = df_temp.loc[df_temp[yaxis] == x] + df_temp2 = df_temp3.groupby(xaxis, as_index=False)[yaxis].count() + if df_temp2[xaxis].dtypes == ""object"": + df_temp2 = df_temp2.set_index(xaxis).reindex( + natsorted(df_temp2[xaxis].tolist(), key=lambda y: y.lower())).reset_index() + + xaxis_data = df_temp2[xaxis].tolist() + + yaxis_data = df_temp2[yaxis].tolist() + + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name=x, marker_color=barcolor[i])) + i = i + 1 + if i == 5: + break + cfig.update_layout(barmode='stack', xaxis_title=xaxis, yaxis_title=yaxis) + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=400) + featureCharts.append(bargraph) + + if charttype == 1: + df_temp2 = df_temp.groupby(xaxis, as_index=False)[yaxis].mean() + if df_temp2[xaxis].dtypes == ""object"": + df_temp2 = df_temp2.set_index(xaxis).reindex( + natsorted(df_temp2[xaxis].tolist(), key=lambda y: y.lower())).reset_index() + + xaxis_data = df_temp2[xaxis].tolist() + yaxis_data = df_temp2[yaxis].tolist() + + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Primary Product', marker_color='blue')) + cfig.update_layout(xaxis_title=xaxis, yaxis_title=yaxis) + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=400) + featureCharts.append(bargraph) + colno += 1 + if colno > 3: + colno = 1 + rowno += 1 + except Exception as e: + print(e) + return (featureCharts) + + +# EDA Visualization changes +# ---------------------------- +def getFeatureCharts(df, graphType, d3_url,mpld3_url): + featureCharts = [] + feature1 = df.columns[0] + feature2 = df.columns[1] + + import seaborn as sns + import mpld3 + fig, ax = plt.subplots(figsize=[10,5]) + + if graphType == 'marker': + df.plot(ax=ax, marker='o') + # df[['age','education-num']].plot(ax=ax, marker='o') + + if graphType == 'area': + df.plot(ax=ax, kind =""area"") + # df[['education-num','age']].plot(ax=ax, kind =""area"") # UIprb + + if graphType == 'hexbin': + df.plot.hexbin(ax=ax, x=feature1, y=feature2, gridsize=2) + + if graphType == 'boxplot': + plt.boxplot(df) + + if graphType == 'scatter': + ax.scatter(df[feature1], df[feature2]) + + if graphType == 'regplot': + ax = sns.regplot(x= feature1, y=feature2, data= df, fit_reg = False, scatter_kws={""alpha"": 0.5}) + + if graphType == 'lineplot': + ax = sns.lineplot(x= feature1, y=feature2, data= df) + + if graphType == 'barplot': + ax = sns.barplot(x= feature1, y=feature2, data= df) + # ax = sns.barplot(x= 'age', y='fnlwgt', data= df) #Start_prb + + ax.legend() + ax.set_xlabel(feature1) + ax.set_ylabel(feature2) + #print(d3_url) + #print(mpld3_url) + html_graph = mpld3.fig_to_html(fig,d3_url=d3_url,mpld3_url=mpld3_url) + + if graphType == 'kde': + ax = sns.pairplot(df, kind=""kde"", height=4, x_vars=feature1,y_vars = feature2) + # ax = sns.pairplot(df[['age','fnlwgt']], kind=""kde"") + html_graph = mpld3.fig_to_html(ax.fig) + + if graphType == 'relplot': + sns.set(style =""darkgrid"") + ax = sns.relplot(x =feature1, y =feature2, data = df) + html_graph = mpld3.fig_to_html(ax.fig) + + + + featureCharts.append(html_graph) + return (featureCharts) +# ---------------------------- + + + + +def MostCommonWords(stopwords, inputCorpus, num_of_words=10): + try: + from collections import Counter + new = inputCorpus.str.split() + new = new.values.tolist() + corpus = [word for i in new for word in i if word not in stopwords] + counter = Counter(corpus) + most = counter.most_common() + x, y = [], [] + for word, count in most[: num_of_words + 1]: + x.append(word) + y.append(count) + return pd.DataFrame([x, y], index=['most_common_words', 'freq']).T + except: + print(""exception"", sys.exc_info()) + return False + +def removeFeature(df): + featuresList = df.columns.values.tolist() + modelFeatures = featuresList.copy() + datetimeFeatures = [] + sequenceFeatures = [] + unimportantFeatures = [] + featuresRatio = {} + for i in featuresList: + check = match_date_format(df[i]) + if check == True: + modelFeatures.remove(i) + continue + seq_check = check_seq_feature(df[i]) + if seq_check == True: + modelFeatures.remove(i) + continue + ratio = check_category(df[i]) + if ratio != 0: + featuresRatio[i] = ratio + else: + modelFeatures.remove(i) + return featuresList, modelFeatures + +def check_category(data): + total_record = len(data) + nUnique = len(data.unique().tolist()) + if nUnique == 1: + return 0 + ratio = nUnique / total_record + return (ratio) + +def check_seq_feature(data): + if data.dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + total_record = data.count() + count = (data - data.shift() == 1).sum() + if ((total_record - count) == 1): + return True + return False + +def match_date_format(data): + data = data.astype(str) + beforecheckcount = (data.count()*80)/100 + #####YYYY-MM-DD HH:MM:SS#### + check1 = data[data.str.match( + r'(^\\d\\d\\d\\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$)') == True] + aftercheckcount = check1.count() + if (beforecheckcount <= aftercheckcount): + return True + #####MM/DD/YYYY HH:MM#### + check2 = data[data.str.match( + r'(^(0?[1-9]|1" +"[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount <= aftercheckcount): + return True + + #####DD-MM-YYYY HH:MM#### + check2 = data[data.str.match( + r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9]|1[0-2])-(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount <= aftercheckcount): + return True + + #####YYYY/MM/DD#### + check2 = data[data.str.match(r'(^\\d\\d\\d\\d/(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount <= aftercheckcount): + return True + #####MM/DD/YYYY#### + check2 = data[data.str.match(r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d)$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount <= aftercheckcount): + return True + return False + +def check_text_features(df, modelFeatures): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + textFeature = [] + for i in enumerate(modelFeatures): + dataType = df[i[1]].dtypes + numOfRows = df.shape[0] + if dataType not in aionNumericDtypes: + if dataType != 'bool': + nUnique = len(df[i[1]].unique().tolist()) + textnumbericratio = 0.01 + if (numOfRows < 1000): + textnumbericratio = 0.2 + elif (numOfRows < 10000): + textnumbericratio = 0.1 + elif (numOfRows < 100000): + textnumbericratio = 0.01 + if nUnique / numOfRows >= textnumbericratio: + textFeature.append(i[1]) + return (textFeature) + +def getWordCloud(df, text_columns): + df_text = pd.DataFrame() + stopwords = set(STOPWORDS) + if (len(text_columns) > 1): + df_text['combined'] = df[text_columns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + features = ['combined'] + else: + df_text[['combined']] = df[text_columns] + features = ['combined'] + df_text[features[0]] = df_text[features[0]].fillna(""NA"") + textCorpus = df_text[features[0]] + from text import TextProcessing + tp = TextProcessing.TextProcessing() + preprocessed_text = tp.transform(textCorpus) + df_text['combined'] = preprocessed_text + df_text_list = df_text.values.tolist() + comment_words = """" + for val in df_text_list: + val = str(val) + tokens = val.split() + for i in range(len(tokens)): + tokens[i] = tokens[i].lower() + + comment_words += "" "".join(tokens) + "" "" + wordcloud = WordCloud(stopwords=stopwords).generate(comment_words) + try: + plt.clf() + except: + pass + try: + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis(""off"") + plt.tight_layout(pad=0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + image_64='' + return (image_64, df_text) + +def getTopTextFeatures(df_text): + stopwords = set(STOPWORDS) + commonfeatures = MostCommonWords(stopwords, df_text['combined']) + xaxis_data = commonfeatures['most_common_words'].tolist() + yaxis_data = commonfeatures['freq'].tolist() + import plotly.graph_objects as go + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) + cfig.update_layout(barmode='stack', xaxis_title='Features') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) + return (bargraph) + + + +def getPCATop10Features(df, modelFeatures): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + categorial_features = [] + for i in enumerate(modelFeatures): + dataType = df[i[1]].dtypes + if dataType not in aionNumericDtypes: + categorial_features.append(i[1]) + df[i[1]] = pd.Categorical(df[i[1]]) + df[i[1]] = df[i[1]].cat.codes + df[i[1]] = df[i[1]].astype(int) + df[i[1]] = df[i[1]].fillna(df[i[1]].mode()[0]) + else: + df[i[1]] = df[i[1]].fillna(df[i[1]].mean()) + from sklearn.decomposition import PCA + pca = PCA(n_components=2).fit(df) + map = pd.DataFrame(pca.components_, columns=modelFeatures) + map = map.diff(axis=0).abs() + map = map.iloc[1] + map = map.sort_values(ascending=False).head(10) + yaxis_data = map.tolist() + xaxis_data = map.index.values.tolist() + import plotly.graph_objects as go + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) + cfig.update_layout(barmode='stack', xaxis_title='Features') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) + + + return (bargraph) + +def getCorrelationMatrix(df): + try: + #from dython.nominal import associations + if len(df.columns) > 25: + df3 = df[df.columns[0:24]] + else: + df3 = df.copy() + cor_mat= associations(df3,compute_only=True) + cor_mat=cor_mat['corr'] + #cor_mat = df3.corr() + cor_mat = cor_mat.astype(float).round(2) + #print(cor_mat) + z = cor_mat.values.tolist() + fig = ff.create_annotated_heatmap(z, x=cor_mat.columns.tolist(), y=cor_mat.index.tolist(), annotation_text=z, + colorscale='Blues') + fig.layout.yaxis.automargin = True + correlationgraph = fig.to_html(full_html=True, default_height=450, default_width=1000) + except Exception as e: + print(e) + correlationgraph = '' + return (correlationgraph) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import kfp +import kfp.dsl as dsl +import json +from pathlib import Path + + +class aionpipelinets(): + + containerRegistry = str() + containerLabel = str() + containerSecret = str() + + pipelineName = 'AION MLOps Pipeline {0}' + exeCmd = 'python' + codeFile = 'aionCode.py' + mntPoint = '/aion' + inputArg = '-i' + msIP = '0.0.0.0' + port = '8094' + cachingStrategy = 'P0D' + + deafultVolume = '2Gi' + volName = 'aion-pvc' + volMode = 'ReadWriteMany' + fileExt = '.tar.gz' + fileName = 'aion_mlops_pipeline_{0}' + + containerMM = 'modelmonitoring' + containerDI = 'dataingestion' + containerDT = 'datatransformation' + containerFE = 'featureengineering' + containerMR = 'modelregistry' + containerMS = 'modelserving' + containerImage = '{0}/{1}:{2}' + + models = {} + nameSeprator = '-' + modelsLiteral = 'models' + modelNameLiteral = 'modelname' + msTemplate = '{""apiVersion"": ""v1"", ""kind"": ""Pod"", ""metadata"": {""name"": ""{{workflow.name}}-{0}""}, ""spec"": {""containers"": [{""name"": ""{0}"", ""image"": ""{1}"", ""command"": [""python""], ""args"": [""aionCode.py"", ""-ip"", ""{2}"", ""-pn"", ""{3}""],""volumeMounts"": [{""name"": ""aion-pvc"", ""mountPath"": ""{4}""}], ""ports"": [{""name"": ""http"", ""containerPort"": {3}, ""protocol"": ""TCP""}]}], ""imagePullSecrets"": [{""name"": ""{5}""}], ""volumes"": [{""name"": ""aion-pvc"", ""persistentVolumeClaim"": {""claimName"": ""{{workflow.name}}-{6}""}}]}}' + + def __init__(self, models, containerRegistry, containerLabel, containerSecret=str()): + self.models = models + self.containerRegistry = containerRegistry + self.containerLabel = containerLabel + self.containerSecret = containerSecret + + @dsl.pipeline( + name=pipelineName.format(containerLabel), + description=pipelineName.format(containerLabel), + ) + def aion_mlops(self, inputUri=str(), volSize=deafultVolume): + vop = dsl.VolumeOp( + name=self.volName + self.nameSeprator + self.containerLabel, + resource_name=self.volName, + modes=[self.volMode], + size=volSize + ) + + mm = dsl.ContainerOp( + name=self.containerMM, + image=self.containerImage.format(self.containerRegistry,self.containerMM,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + self.inputArg, + inputUri, + ], + pvolumes={self.mntPoint: vop.volume} + ) + mm.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + di = dsl.ContainerOp( + name=self.containerDI, + image=self.containerImage.format(self.containerRegistry,self.containerDI,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: mm.pvolume} + ) + di.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + dt = dsl.ContainerOp( + name=self.containerDT, + image=self.containerImage.format(self.containerRegistry,self.containerDT,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: di.pvolume} + ) + dt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + fe = dsl.ContainerOp( + name=self.containerFE, + image=self.containerImage.format(self.containerRegistry,self.containerFE,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: dt.pvolume} + ) + fe.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + + dictMT = {} + listMTOps = [] + + for model in self.models[self.modelsLiteral]: + modelName = model[self.modelNameLiteral] + mt=dsl.ContainerOp( + name=modelName, + image=self.containerImage.format(self.containerRegistry,modelName,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes={self.mntPoint: fe.pvolume}) + mt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + listMTOps.append(mt) + dictMT[self.mntPoint]=mt.pvolume + + + mr = dsl.ContainerOp( + name=self.containerMR, + image=self.containerImage.format(self.containerRegistry,self.containerMR,self.containerLabel), + command=self.exeCmd, + arguments=[ + self.codeFile, + ], + pvolumes=dictMT + ).after(*tuple(listMTOps)) + + mr.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy + + msJson = self.msTemplate.replace(str({0}),self.containerMS).replace(str({1}),self.containerImage.format(self.containerRegistry,self.containerMS,self.containerLabel)).replace(str({2}),self.msIP).replace(str({3}),self.port).replace(str({4}),self.mntPoint).replace(str({5}),self.containerSecret).replace(str({6}),self.volName) + ms = dsl.ResourceOp( + name=self.containerMS + self.nameSeprator + self.containerLabel, + k8s_resource=json.loads(msJson), + ) + ms.after(mr) +" +" + + def compilepl(self, targetPath=str()): + filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt + if targetPath != str(): + filePath = Path(targetPath, filePath) + kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath)) + + + def executepl(self, kfhost=str()): + client = kfp.Client(kfhost) + client.create_run_from_pipeline_func(self.aion_mlops,arguments={}) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +from pathlib import Path +def label_filename(request): + filename = 'LabeledData.csv' + labelPath = os.path.join(request.session['datalocation'],'AION','Labels') + Path(labelPath).mkdir(parents=True, exist_ok=True) + filePath = os.path.join(labelPath,filename) + return filePath + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +import os +import rsa +import boto3 #usnish +import pandas as pd +import time +def add_new_azureStorage(request): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + f.close() + if data == '': + data = [] + except: + data = [] + if request.POST[""azurename""] =='' or request.POST[""azureaccountkey""] == '' or request.POST[""containername""] == '' : + return 'error' + newdata = {} + newdata['azurename'] = request.POST[""azurename""] + newdata['azureaccountkey'] = request.POST[""azureaccountkey""] + newdata['containername'] = request.POST[""containername""] + data.append(newdata) + with open(file_path, 'w') as f: + json.dump(data, f) + f.close() + return 'success' + +def get_azureStorage(): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + return data +def read_azureStorage(name,directoryname,DATA_FILE_PATH): + try: + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf')) + with open(file_path, 'r') as f: + data = json.load(f) + except: + data = [] + found = False + for x in data: + if x['azurename'] == name: + storage_account_name = str(x['azurename']) + storage_account_key = str(x['azureaccountkey']) + azure_container_name = x['containername'] + found = True + break + try: + if found: + root_dir = str(directoryname) + from azure.storage.filedatalake import DataLakeServiceClient + import io + import pandavro as pdx + from detect_delimiter import detect + try: + service_client = DataLakeServiceClient(account_url=""{}://{}.dfs.core.windows.net"".format(""https"", storage_account_name), credential=storage_account_key) + print(azure_container_name) + file_system_client = service_client.get_file_system_client(azure_container_name) + print(root_dir) + file_paths = file_system_client.get_paths(path=root_dir) + main_df = pd.DataFrame() + for path in file_paths: + if not path.is_directory: + file_client = file_system_client.get_file_client(path.name) + file_ext = os.path.basename(path.name).split('.', 1)[1] + if file_ext in [""csv"", ""tsv""]: + with open(csv_local, ""wb"") as my_file: + download = file_client.download_file() + download.readinto(my_file) + with open(csv_local, 'r') as file: + data = file.read() + row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t']) + processed_df = pd.read_csv(csv_local, sep=row_delimiter) + if file_ext == ""parquet"": + download = file_client.download_file() + stream = io.BytesIO() + download.readinto(stream) + processed_df = pd.read_parquet(stream, engine='pyarrow') + if file_ext == ""avro"": + with open(avro_local, ""wb"") as my_file: + download = file_client.download_file() + download.readinto(my_file) + processed_df = pdx.read_avro(avro_local) + if not main_df.empty: + main_df = main_df.append(processed_df, ignore_index=True) + else: + main_df = pd.DataFrame(processed_df) + except Exception as e: + print(e) + return 'Success',main_df + except Exception as e: + print(e) + return 'Error', pd.DataFrame() ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import time +import subprocess +import sys +import json +import pandas as pd +def getDataSetRecordsCount(datalocation): + try: + records = 0 + if os.path.isfile(datalocation): + for chunk in pd.read_csv(datalocation, chunksize=20000): + records = records+len(chunk) + if records == 0: + records = 'NA' + except Exception as e: + print(e) + records = 'NA' + return records +def get_train_model_details(deploy_location,request): + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + usename = request.session['usecaseid'].replace("" "", ""_"") + outputfile = os.path.join(deploy_location,usename,str(request.session['ModelVersion']),'etc','output.json') + if os.path.isfile(outputfile): + f1 = open(outputfile, ""r+"", encoding=""utf-8"") + outputStr = f1.read() + f1.close() + resultJsonObj = json.loads(outputStr) + trainingStatus = resultJsonObj['status'] + if trainingStatus.lower() == 'success': + details = resultJsonObj['data'] + modelType = details['ModelType'] + bestModel = details['BestModel'] + return trainingStatus,modelType,bestModel + else: + return trainingStatus,'NA','NA' + else: + return 'Not Trained','NA','NA' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from appbe import exploratory_Analysis as ea +import pandas as pd +from appbe.checkConfiguration import start_check +import json +import os +import ast +import time +import numpy as np +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +# from modelTraining.models import view +from appbe.aion_config import kafka_setting +from appbe.aion_config import running_setting +from appbe.s3buckets import get_s3_bucket +from appbe.gcsbuckets import get_gcs_bucket +from appbe import help_Text as ht + +def is_value_na( value): + if isinstance( value, str): + return value.strip().lower() in ['','na','none'] + return not value + +def set_ts_preprocessing(request,configSettingsJson): #Task 13052 Timeseries Preprocessing + + interpolationType = request.POST.get('interpolationType') + ts_config = configSettingsJson['basic']['preprocessing']['timeSeriesForecasting'] + for key in ts_config['interpolation']: + configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['interpolation'][ + key] = 'False' + if interpolationType != 'na': + configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['interpolation'][ + interpolationType] = 'True' + ts_config['rollingWindow'] = request.POST.get('rollingWindow') + if ts_config['rollingWindow'] == 'True': + ts_config['rollingWindowSize'] = request.POST.get('rollWindowsize') + + aggregation = request.POST.get('aaggregationType') + for key in ts_config['aggregation']['type']: + ts_config['aggregation']['type'][key]='False' + if is_value_na(aggregation) == False: + ts_config['aggregation']['type'][aggregation] = 'True' + granularityType = request.POST.get('unitType') + granularitySize = request.POST.get('garnularitysize') + for key in ts_config['aggregation']['granularity']['unit']: + ts_config['aggregation']['granularity']['unit'][key] = 'False' + ts_config['aggregation']['granularity']['unit'][granularityType]='True' + ts_config['aggregation']['granularity']['size'] = granularitySize + configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']= ts_config + return configSettingsJson + +def update_granularity(configSettingsJson,datapath=None): + try: + from AION.appbe.utils import set_true_option + import pandas as pd + from pathlib import Path + MINUTES = 60 + if not is_value_na(configSettingsJson['basic']['dateTimeFeature']): + if not datapath: + datapath = configSettingsJson['basic']['dataLocation'] + if Path( datapath).exists(): + df = pd.read_csv(datapath, nrows=2) + if isinstance( configSettingsJson['basic']['dateTimeFeature'], list): + datetime_feature = configSettingsJson['basic']['dateTimeFeature'][0] + else: + datetime_feature = configSettingsJson['basic']['dateTimeFeature'] + datetime = pd.to_datetime(df[ datetime_feature]) + if len(datetime) > 1: + time_delta = (datetime[1] - datetime[0]).total_seconds() + granularity_unit = configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['unit'] + if time_delta < (1 * MINUTES): + set_true_option(granularity_unit, key='second') + elif time_delta < (60 * MINUTES): + set_true_option(granularity_unit, key='minute') + elif time_delta < (24 * 60 * MINUTES): + set_true_option(granularity_unit, key='hour') + elif time_delta < (7 * 24 * 60 * MINUTES): + set_true_option(granularity_unit, key='day') + elif time_delta < (30 * 24 * 60 * MINUTES): + set_true_option(granularity_unit, key='week') + elif time_delta < (365 * 24 * 60 * MINUTES): + set_true_option(granularity_unit, key='month') + else: + set_true_option(granularity_unit, key='year') + return configSettingsJson + except Exception as e: + print(f'\\nIgnoring error during granularity unit conversion\\n:{str(e)}') + return configSettingsJson + +def save(request): + + try: + status = 'pass' + msg = """" + DEPLOY_LOCATION = request.session['deploylocation'] + if request.method == 'POST': + submittype = request.POST.get('BasicSubmit') + if submittype != 'BasicDefault': + filterjson = 'NA' + timegroupingjson = 'NA' + groupingjson = 'NA' + if request.POST.get('filters') != '': + filterjson = str(json.loads(request.POST.get('filters'))) + if request.POST.get('timegroup') != '': + timegroupingjson = str(json.loads(request.POST.get('timegroup'))) + if request.POST.get('idgroup') != '': + groupingjson = str(json.loads(request.POST.get('idgroup'))) + + configFile = request.session['config_json'] + f = open(configFile," +"""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + temp = {} + # Retraing settings changes + # -------- S T A R T -------- + prbType = request.POST.get('ProblemType') + if prbType is None: + prbType = request.POST.get('tempProblemType') + # temp['ProblemType'] = request.POST.get('ProblemType') + # request.session['Problem'] = request.POST.get('ProblemType') + temp['ProblemType'] = prbType + request.session['Problem'] = request.POST.get('ProblemType') + # --------------------------- + + + temp['ModelName'] = request.session['usecaseid'] + temp['Version'] = str(request.session['ModelVersion']) + temp['InputFeatures'] = request.POST.getlist('IncInputFeatures') + temp['dataLocation'] = str(request.session['datalocation']) + onlinelearning=request.POST.get('onlineLearning',None) + if (onlinelearning is not None): + if onlinelearning.lower() == 'onlinelearning': + configSettingsJson['basic']['onlineLearning'] = 'True' + if onlinelearning.lower() == 'distributedlearning': + configSettingsJson['basic']['distributedLearning'] = 'True' + temp['InputFeatures'] = request.POST.getlist('IncInputFeatures') + temp['TargetFeatures'] = request.POST.getlist('TargetFeatures') + temp['DateTimeFeatures'] = '' + temp['IndexFeatures'] = '' + for x in configSettingsJson['advance']['profiler']['normalization'].keys(): + configSettingsJson['advance']['profiler']['normalization'][x] = 'False' + configSettingsJson['advance']['profiler']['normalization']['standardScaler'] = 'True' + for x in configSettingsJson['advance']['profiler']['numericalFillMethod'].keys(): + configSettingsJson['advance']['profiler']['numericalFillMethod'][x] = 'False' + configSettingsJson['advance']['profiler']['numericalFillMethod']['Mean'] = 'True' + if onlinelearning.lower() == 'distributedlearning': + for x in configSettingsJson['advance']['profiler']['categoricalFillMethod'].keys(): + configSettingsJson['advance']['profiler']['categoricalFillMethod'][x] = 'False' + configSettingsJson['advance']['profiler']['categoricalFillMethod']['MostFrequent'] = 'True' + for x in configSettingsJson['advance']['profiler']['categoryEncoding'].keys(): + configSettingsJson['advance']['profiler']['categoryEncoding'][x] = 'False' + configSettingsJson['advance']['profiler']['categoryEncoding']['OneHotEncoding'] = 'True' + configSettingsJson['advance']['profiler']['normalization']['standardScaler'] = 'False' + for x in configSettingsJson['advance']['selector']['featureEngineering'].keys(): + if x != 'numberofComponents': + configSettingsJson['advance']['selector']['featureEngineering'][x] = 'False' + elif prbType == 'llmFineTuning': + if configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'False': + temp['InputFeatures'] = request.POST.getlist('IncInputFeatures') + temp['TargetFeatures'] = request.POST.getlist('TargetFeatures') + contextFeatures = request.POST.getlist('contextFeatures') + configSettingsJson['basic']['contextFeature'] = "","".join([model for model in contextFeatures]) + temp['DateTimeFeatures'] = '' + temp['IndexFeatures'] = '' + if request.POST.get('promptfriendlyname') != '': + configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt'] = request.POST.get('promptfriendlyname') + else: + configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt'] = 'Instruction' + if request.POST.get('responsefriendlyname') != '': + configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response'] = request.POST.get('responsefriendlyname') + else: + configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response'] = '' + else: + if request.session['datatype'] == 'LLM_Document': + for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'].keys(): + configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'][x] = 'False' + configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'][request.POST.get('dataPreprocessing')] = 'True' + if request.session['datatype'] == 'LLM_Code': + for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'].keys(): + configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'][x] = 'False' + configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'][request.POST.get('llmObjective')] = 'True' + + for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'].keys(): + configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'][x] = 'False' + configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'][request.POST.get('dataPreprocessing')] = 'True' + + else: + configSettingsJson['basic']['onlineLearning'] = 'False' + configSettingsJson['basic']['distributedLearning'] = 'False' + temp['InputFeatures'] = request.POST.getlist('InputFeatures') + temp['TargetFeatures'] = request.POST.getlist('TargetFeatures') + temp['DateTimeFeatures'] = request.POST.getlist('DateTimeFeatures') + temp['IndexFeatures'] = request.POST.getlist('IndexFeatures') + if (configSettingsJson['basic']['algorithms']['timeSeriesAnomalyDetection']['AutoEncoder'] == 'True'):#task 11997 + + if (request.POST.get('analysis') == 'MultiVariate'): + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'True' #task 11997 + + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'False' #task 11997 + else: + #print(configSettingsJson) + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'True' + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'False' #task 11997 + + + temp['UserID'] = '' + temp['ItemID'] = '' + temp['rating'] = '' + + temp['secondDocFeature'] = '' + temp['firstDocFeature'] = '' + temp['invoiceNoFeature'] = '' + temp['itemFeature'] = '' + model = '' + + if temp['ProblemType'].lower() == 'recommendersystem': + model = request.POST.get('MachineLearningModels') + + if model == 'ItemRating': + temp['ProblemType'] = 'RecommenderSystem' + temp['MachineLearningModels'] = ['ItemRating'] + temp['DeepLearningModels'] = '' + temp['UserID'] = request.POST.get('UserID') + temp['ItemID'] = request.POST.get('ItemID') + temp['rating'] = request.POST.get('rating') + temp['InputFeatures'] = [] + temp['InputFeatures'].append(temp['UserID']) + temp['InputFeatures'].append(temp['ItemID']) + temp['InputFeatures'].append(temp['rating']) + if model == 'TextSimilarity-Siamese': + temp['ProblemType'] = 'recommenderSystem' + temp['MachineLearningModels'] = ['TextSimilarity-Siamese'] + temp['secondDocFeature'] = request.POST.get('secondDocFeature') + temp['firstDocFeature'] = request.POST.get('firstDocFeature') + temp['InputFeatures'] = [] + temp['InputFeatures'].append(temp['secondDocFeature']) + temp['InputFeatures'].append(temp['firstDocFeature']) + if model == 'AssociationRules-Apriori': + temp['ProblemType'] = 'recommenderSystem' + temp['DeepLearningModels'] = '' + temp['MachineLearningModels'] = ['AssociationRules-Apriori'] + temp['invoiceNoFeature'] = request.POST.get('associationRuleInvoiceNo') + temp['itemFeature'] = request.POST.get('associationRuleItem') + temp['InputFeatures'] = [] + temp['InputFeatures'].append(temp['invoiceNoFeature']) + temp['InputFeatures'].append(temp['itemFeature']) + + temp['ScoringCriteria'] = request.POST.get('ScoringCriteria') + + if temp['ProblemType'].lower() not in ['recommendersystem','textsimilarity','associationrules','llmfinetuning']: + temp['MachineLearningModels'] = request.POST.getlist('MachineLearningModels') + temp['DeepLearningModels'] = request.POST.getlist('SelectDeepLearningModels') + elif temp['ProblemType'].lower() == 'llmfinetuning': + temp['MachineLearningModels'] = request.POST.getlist('MachineLearningModels') + model = temp['MachineLearningModels'][0] + supportedModelsSize = configSettingsJson['basic']['modelSize'][temp['ProblemType']][model] + selectedModelSize = request.POST.get('modelSize') + for x in supportedModelsSize.keys(): + configSettingsJson['basic']['modelSize'][temp['ProblemType']][model][x] = 'False' + configSettingsJson['basic']['modelSize'][temp['ProblemType']][model][selectedModelSize] = 'True' + + temp['noofforecasts'] = request.POST.get('noofforecasts') + temp['inlierLabels'] = request.POST.get('inlierLabels') + #temp['filterExpression'] = request.POST.get('filterExpression') + if temp['ProblemType'].lower() in ['clustering','topicmodelling','similarityidentification','contextualsearch']: + temp['TargetFeatures'] = '' + configSettingsJson['basic']['modelName'] = temp['ModelName'] + configSettingsJson['basic']['modelVersion'] = temp['Version'] + configSettingsJson['basic']['dataLocation'] = str(temp['dataLocation']) + configSettingsJson['basic']['deployLocation'] = DEPLOY_LOCATION + if configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'False': + configSettingsJson['basic']['trainingFeatures'] = "","".join([model for model in temp['InputFeatures']]) + configSettingsJson['basic']['dateTimeFeature'] = "","".join([model for model in temp['DateTimeFeatures']]) + configSettingsJson['basic']['targetFeature'] = "","".join([model for model in temp['TargetFeatures']]) + configSettingsJson['basic']['indexFeature'] = "","".join([model for model in temp['IndexFeatures']]) + + if filterjson == 'NA': + configSettingsJson['basic']['filter'] = 'NA' + else: + configSettingsJson['basic']['filter'] = eval(filterjson) + + if timegroupingjson == 'NA': + configSettingsJson['basic']['timegrouper'] = 'NA' + else: + configSettingsJson['basic']['timegrouper'] = eval(timegroupingjson) + + if groupingjson == 'NA': + configSettingsJson['basic']['group'] = 'NA' + else: + configSettingsJson['basic']['group'] = eval(groupingjson) + + problemtyp = configSettingsJson['basic']['analysisType'] + + for i in list(problemtyp.keys()): + configSettingsJson['basic']['analysisType'][i]='False' + + algorithm = configSettingsJson['basic']['algorithms'] + for i in list(algorithm.keys()): + for x in list(configSettingsJson['basic']['algorithms'][i].keys()): + if x not in ['textSimilarityConfig','itemRatingConfig','associationRulesConfig','textSummarization']: + configSettingsJson['basic']['algorithms'][i][x] = 'False' + + configSettingsJson['basic']['analysisType'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]] = 'True' + + # configSettingsJson['basic']['problem_type'] = temp['ProblemType'] + scoring = configSettingsJson['basic']['scoringCriteria'] + for i in list(scoring.keys()): + for x in list(configSettingsJson['basic']['scoringCriteria'][i].keys()): + configSettingsJson['basic']['scoringCriteria'][i][x] = 'False' + if temp['ProblemType'].lower() in [""classification"",""regression"",""survivalanalysis"",""similarityidentification"",""timeseriesforecasting"",""contextualsearch""]: #task 11997 + configSettingsJson['basic']['scoringCriteria'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][temp['ScoringCriteria']] = 'True' + # configSettingsJson['basic']['problem_type'] = temp['ProblemType'] + # configSettingsJson['basic']['scoringCriteria'] = temp['ScoringCriteria'] + configSettingsJson['basic']['noofforecasts'] = temp['noofforecasts'] + configSettingsJson['basic']['inlierLabels'] = temp['inlierLabels'] + #configSettingsJson['basic']['filterExpression'] = temp['filterExpression'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['userID'] = temp['UserID'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['itemID'] = temp['ItemID'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['rating'] = temp['rating'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['textSimilarityConfig']['baseFeature'] = temp['firstDocFeature'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['textSimilarityConfig']['comparisonFeature'] = temp['secondDocFeature'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] = temp['invoiceNoFeature'] + configSettingsJson['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'] = temp['itemFeature'] + for x in temp['MachineLearningModels']: + + if temp['ProblemType'].lower() =='associationrules' or temp['ProblemType'].lower() == 'textsimilarity': + temp['ProblemType'] = 'recommenderSystem' + if request.POST.get('SearchType') != 'NAS' and request.POST.get('SearchType') != 'GoogleModelSearch'and request.POST.get('SearchType') != 'AutoGluon': + configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][x] = 'True' + + #for y in temp['DeepLearningModels']: + # configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][y] = 'True' + " +" + configSettingsJson['basic']['output']['profilerStage'] = 'True' + configSettingsJson['basic']['output']['selectorStage'] = 'True' + for key in configSettingsJson['advance']['profiler']['textConversionMethod']: + configSettingsJson['advance']['profiler']['textConversionMethod'][key] = 'False' + if temp['ProblemType'].lower() != 'topicmodelling': + configSettingsJson['advance']['profiler']['textConversionMethod']['TF_IDF'] ='True' + else: + configSettingsJson['advance']['profiler']['textConversionMethod']['CountVectors'] ='True' + #print('============================') + #print(temp['ProblemType'].lower()) + #print('============================') + if temp['ProblemType'].lower() == 'textsummarization': + configSettingsJson['basic']['algorithms']['textSummarization']['Text Summarization'] = 'True' + configSettingsJson['basic']['textSummarization']['KeyWords'] = str(request.POST.get('addKeywordsForSummarization')) + configSettingsJson['basic']['textSummarization']['pathForKeywordFile'] = str(request.POST.get('DataFilePath')) + + if temp['ProblemType'].lower() not in ['recommendersystem','textsummarization','llmfinetuning']: + + if configSettingsJson['basic']['onlineLearning'] != 'True' and configSettingsJson['basic']['distributedLearning'] != 'True': + jsonarr =request.POST.get('jsonarr') + + res = ast.literal_eval(jsonarr) + for x in res: + if x['type'].lower() == 'text': + configSettingsJson['advance']['selector']['featureSelection']['allFeatures'] = 'False' + configSettingsJson['advance']['selector']['featureSelection']['statisticalBased'] = 'True' + configSettingsJson['advance']['selector']['featureSelection']['modelBased'] = 'False' + if len(request.POST.get('traindfeatures').split(',')) > 30: + configSettingsJson['advance']['selector']['featureSelection']['allFeatures'] = 'False' + configSettingsJson['advance']['selector']['featureSelection']['statisticalBased'] = 'True' + configSettingsJson['advance']['selector']['featureSelection']['modelBased'] = 'False' + + configSettingsJson['advance']['profiler']['featureDict'] = res + + configSettingsJson['basic']['indexFeature'] = request.POST.get('indexfeatures') + configSettingsJson['basic']['trainingFeatures'] = request.POST.get('traindfeatures') + configSettingsJson['basic']['dateTimeFeature'] = request.POST.get('datefeatures') + + if request.POST.get('SearchType') == 'GoogleModelSearch': + configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]]['GoogleModelSearch_DNN'] = 'True' + configSettingsJson['basic']['output']['profilerStage']= 'True' + + #---------- Time series Changes Task 13052 ----------------- + + if temp['ProblemType'].lower() == 'timeseriesforecasting': + configSettingsJson = set_ts_preprocessing(request,configSettingsJson) + status,msg= start_check(configSettingsJson) + updatedConfigSettings = json.dumps(configSettingsJson) + updatedConfigFile = request.session['config_json'] + with open(updatedConfigFile, ""w"") as fpWrite: + fpWrite.write(updatedConfigSettings) + fpWrite.close() + request.session['ModelStatus'] = 'Not Trained' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 1 + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'ProblemType',prbType) + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Configured') + context = {'tab': 'configure', 'temp': temp,'advconfig': configSettingsJson, + 'basic_status_msg': 'Configuration Done', + 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, + 'currentstate': request.session['currentstate'], 'selected': 'modeltraning','training':True,'basic_help':ht.basic_help} + + # return render(request, 'basicconfig.html', context) + if submittype == 'BasicDefault': + + temp = {} + temp['ModelName'] = request.session['UseCaseName'] + temp['Version'] = request.session['ModelVersion'] + + dataLocation = str(request.session['datalocation']) + df = pd.read_csv(dataLocation, encoding='latin1') + featuresList = df.columns.values.tolist() + datetimeFeatures = [] + sequenceFeatures = [] + unimportantFeatures = [] + featuresRatio = {} + for i in featuresList: + check = ea.match_date_format(df[i]) + if check == True: + datetimeFeatures.append(i) + unimportantFeatures.append(i) + seq_check = ea.check_seq_feature(df[i]) + if seq_check == True: + sequenceFeatures.append(i) + unimportantFeatures.append(i) + ratio = ea.check_category(df[i]) + if ratio != 0: + featuresRatio[i] = ratio + else: + unimportantFeatures.append(i) + + targetFeature = min(featuresRatio, key=featuresRatio.get) + unimportantFeatures.append(targetFeature) + config = {} + config['modelName'] = request.session['UseCaseName'] + config['modelVersion'] = request.session['ModelVersion'] + config['datetimeFeatures'] = datetimeFeatures + config['sequenceFeatures'] = sequenceFeatures + config['FeaturesList'] = featuresList + config['unimportantFeatures'] = unimportantFeatures + config['targetFeature'] = targetFeature + request.session['currentstate'] = 1 + context = {'tab': 'configure', 'temp': temp, 'config': config, + 'currentstate': request.session['currentstate'], 'selected': 'modeltraning'} + except Exception as e: + print(e) + import sys + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return status,msg,context + +def openbasicconf(request): + # 10012:Decision Threshold related Changes + data_is_under_RAM_threshold = True + + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"") + configSettingsData = f.read() + configSettingsJson = json.loads(configSettingsData) + temp = {} + # temp['ModelName'] = request.session['UseCaseName'] + # temp['Version'] = request.session['ModelVersion'] + if request.session['datatype'] == 'Video' or request.session['datatype'] == 'Image' or request.session['datatype'] == 'Document': + folderLocation = str(request.session['datalocation']) + dataFile = os.path.join(folderLocation, request.session['csvfullpath']) + else: + dataFile = str(request.session['datalocation']) + # -------------------------------- 10012:Decision Threshold related Changes S T A R T ------------------------------- + from appbe.dataIngestion import checkRAMThreshold + data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation']) + # ------------------------------------------------------ E N D ------------------------------------------------------ + + # Retraing settings changes + # -------- S T A R T -------- + IsReTrainingCase = False + if request.session['IsRetraining'] == 'Yes': + IsReTrainingCase = True + IsSameFeatures = True + # --------------------------- + + + featuresList = configSettingsJson['basic']['featureList'] + unimportantFeatures = [] + modelfeatures = configSettingsJson['basic']['trainingFeatures'] + for x in featuresList: + if x not in modelfeatures: + unimportantFeatures.append(x) + config = {} + config['ModelName'] = request.session['usecaseid'] + config['Version'] = request.session['ModelVersion'] + config['datetimeFeatures'] = configSettingsJson['basic']['dateTimeFeature'] # .split("","") + if configSettingsJson['basic']['indexFeature']: + config['sequenceFeatures'] = configSettingsJson['basic']['indexFeature'] # .split("","") + config['FeaturesList'] = featuresList + config['unimportantFeatures'] = unimportantFeatures + config['targetFeature'] = configSettingsJson['basic']['targetFeature'].split("","") + problemtypes = configSettingsJson['basic']['analysisType'] + onlineLearning = configSettingsJson['basic']['onlineLearning'] + problem_type = """" + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + #print('123',problem_type) + config['ProblemType'] = problem_type + # config['ProblemType'] = configSettingsJson['basic']['problem_type'] + + scoring = configSettingsJson['basic']['scoringCriteria'] + scoringCriteria = """" + for k in scoring.keys(): + if configSettingsJson['basic']['scoringCriteria'][k] == 'True': + scoringCriteria = k + break + config['ScoringCriteria'] = scoringCriteria + + # config['ProblemType'] = configSettingsJson['basic']['problem_type'] + # config['ScoringCriteria'] = configSettingsJson['basic']['scoringCriteria'] + + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + if 'NoOfRecords' in request.session: + records = request.session['NoOfRecords'] + else: + records = 'NA' + if request.session['finalstate'] <= 1: + request.session['finalstate'] = 1 + request.session['currentstate'] = 1 + # dataFile = str(request.session['datalocation']) + # df = pd.read_csv(dataFile,encoding='utf8') + if 'NoOfRecords' in request.session: + noofforecast = 20 + else: + noofforecast = 20 + config['noofforecasts'] = noofforecast + if 'numericFeature' in request.session: + numericFeature = request.session['numericFeature'] + else: + numericFeature = '' + + problemType = 'classification' + for key in configSettingsJson['basic']['analysisType']: + if configSettingsJson['basic']['analysisType'][key] == 'True': + problemType = key + break + scoringCreteria = 'NA' + if problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997 + for key in configSettingsJson['basic']['scoringCriteria'][problemType]: + if configSettingsJson['basic']['scoringCriteria'][problemType][key] == 'True': + scoringCreteria = key + break + selectAlgo = """" + if problemType in ['classification','regression','timeSeriesForecasting', + 'timeSeriesAnomalyDetection', + 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition','llmFineTuning']: #task 11997 + for key in configSettingsJson['basic']['algorithms'][problemType]: + if configSettingsJson['basic']['algorithms'][problemType][key] == 'True': + if selectAlgo != """": + selectAlgo += ',' + selectAlgo += key + modelSize = '' + if problemType == 'llmFineTuning': + for key in configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo].keys(): + if configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo][key] == 'True': + modelSize = key + break + + + featuresdict = [feature['feature'] for feature in configSettingsJson['advance']['profiler']['featureDict']] + context = {'tab': 'tabconfigure','modelSize':modelSize,'featuresdict':featuresdict, 'configsettings': configSettingsJson, 'temp': temp, 'config': config,'numericFeature':numericFeature,'onlineLearning':onlineLearning, + 'noOfRecords': records, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'problemType':problemType,'scoringCreteria':scoringCreteria,'selectAlgo':selectAlgo, + 'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'], + 'finalstate': request.session['finalstate'], 'selected': 'modeltraning','IsSameFeatures':IsSameFeatures,'IsReTrainingCase':IsReTrainingCase,'basic_help':ht.basic_help + + # 10012:Decision Threshold related changes + , 'DLCheckpoint':data_is_under_RAM_threshold} + return context + +def gotoconf(request): + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + try: + # 10012:Decision Threshold related Changes + data_is_under_RAM_threshold = True + + ModelName = usecasedetails.objects.get(id=request.session['ModelName']) + Version = request.session['ModelVersion'] + import os + + if request.session['datatype'] in ['Video', 'Image','Document','Object']: + folderLocation = str(request.session['datalocation']) + dataFile = os.path.join(folderLocation, request.session['csvfullpath']) + else: + dataFile = str(request.session['datalocation']) + # -------------------------------- 10012:Decision Threshold related Changes S T A R T ------------------------------- + from appbe.dataIngestion import checkRAMThreshold + data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation']) + # ------------------------------------------------------ E N D ------------------------------------------------------ + if request.session['datatype'] not in ['LLM_Document','LLM_Code']: + from appbe.eda import ux_eda + if 'delimiter' not in request.session: + request.session['delimiter'] = ',' + if 'textqualifier' not in request.session: + request.session['textqualifier'] = '""' + eda_obj = ux_eda(dataFile,request.session['delimiter'],request.session['textqualifier'],optimize=1) + featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeatures = eda_obj.getFeatures() + else: + featuresList = [] + featuresList.append('Instruction') + datetimeFeatures=[] + sequenceFeatures=[] + constantFeature=[] + textFeature=[] + targetFeature='Response' + numericCatFeatures = [] + numericFeature=[] + catFeatures=[] + featuresListJson = [] + for x in featuresList: + featureOperation={} + featureOperation['feature'] = x + if x in datetimeFeatures: +" +" featureOperation['type'] = 'date' + featureOperation['fillMethod'] = 'na' + featureOperation['categoryEncoding'] = 'na' + elif x in textFeature: + featureOperation['type'] = 'text' + featureOperation['fillMethod'] = 'na' + featureOperation['categoryEncoding'] = 'na' + elif x in sequenceFeatures: + featureOperation['type'] = 'index' + featureOperation['fillMethod'] = 'median' + featureOperation['categoryEncoding'] = 'na' + elif (x in catFeatures) or (x in constantFeature): + featureOperation['type'] = 'categorical' + featureOperation['fillMethod'] = 'mode' + featureOperation['categoryEncoding'] = 'targetEncoding' + else: + featureOperation['type'] = 'numerical' + featureOperation['fillMethod'] = 'medium' + featureOperation['categoryEncoding'] = 'na' + featureOperation['outlierDetection'] = 'disable' + featureOperation['outlierOperation'] = 'nochange' + featureOperation['normalizer'] = 'none' + featuresListJson.append(featureOperation) + + request.session['numericFeature'] = numericFeature + records = 0 + import os + if os.path.isfile(dataFile): + for chunk in pd.read_csv(dataFile, chunksize=20000,encoding=""utf-8"",encoding_errors= 'replace'): + records = records+len(chunk) + request.session['NoOfRecords'] = records + + filetimestamp = str(int(time.time())) + CONFIG_FILE_PATH = request.session['configfilepath'] + config_json_filename = os.path.join(CONFIG_FILE_PATH, 'AION_' + filetimestamp + '.json') + outputfile = os.path.join(CONFIG_FILE_PATH, 'AION_OUTPUT_' + filetimestamp + '.json') + request.session['outputfilepath'] = str(outputfile) + modelname = request.session['usecaseid'] + modelname = modelname.replace("" "", ""_"") + DEPLOY_LOCATION = request.session['deploylocation'] + request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION, modelname,str(Version),'log','model_training_logs.log') + request.session['config_json'] = config_json_filename + #request.session['ModelVersion'] = Version + request.session['ModelStatus'] = 'Not Trained' + + + + # p = Existusecases(DataFilePath=dataFile, DeployPath=DEPLOY_LOCATION, Status='Not Trained', + # ConfigPath=config_json_filename, Version=Version, ModelName=ModelName, + # TrainOuputLocation=outputfile) + # p.save() + + # from AION_UX import telemetry + # telemetry.telemetry_data('UseCaseCreated',modelname+'_'+str(Version),'UseCaseCreated') + # request.session['modelid'] = p.id + + + temp = {} + temp['ModelName'] = request.session['usecaseid'] + temp['Version'] = request.session['ModelVersion'] + ''' + featuresList = features #df.columns.values.tolist() + datetimeFeatures = + + datetimeFeatures = [] + sequenceFeatures = [] + unimportantFeatures = [] + featuresRatio = {} + for i in featuresList: + check = ea.match_date_format(df[i]) + if check == True: + datetimeFeatures.append(i) + unimportantFeatures.append(i) + seq_check = ea.check_seq_feature(df[i]) + if seq_check == True: + sequenceFeatures.append(i) + unimportantFeatures.append(i) + ratio = ea.check_category(df[i]) + if ratio != 0: + featuresRatio[i] = ratio + else: + unimportantFeatures.append(i) + targetFeature = min(featuresRatio, key=featuresRatio.get) + unimportantFeatures.append(targetFeature) + ''' + unimportantFeatures = list(datetimeFeatures) + unimportantFeatures.extend(sequenceFeatures) + #unimportantFeatures = list(set(unimportantFeatures) + set(sequenceFeatures)) + unimportantFeatures.append(targetFeature) + + config = {} + noofforecast = 20 + config['ModelName'] = request.session['usecaseid'] + config['Version'] = request.session['ModelVersion'] + config['datetimeFeatures'] = datetimeFeatures + config['sequenceFeatures'] = sequenceFeatures + config['FeaturesList'] = featuresList + config['unimportantFeatures'] = unimportantFeatures + config['targetFeature'] = targetFeature + config['noofforecasts'] = noofforecast + DEFAULT_FILE_PATH = request.session['defaultfilepath'] + + # Retraing settings changes + # -------- S T A R T -------- + + IsReTrainingCase = False + if request.session['IsRetraining'] == 'Yes': + id = request.session['ModelName'] + p = usecasedetails.objects.get(id=id) + model = Existusecases.objects.filter(ModelName=p) + + indexVal = model.count() - 1 + configFile = str(model[indexVal].ConfigPath) + # configFile = str(model[0].ConfigPath) + + # request.session['IsRetraining'] = 'No' + IsReTrainingCase = True + # --------------------------- + + else: + configFile = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json') + + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + + # Retraing settings changes + # -------- S T A R T -------- + pickDefaultSettings = False + IsSameFeatures = False + if 'featureList' not in configSettingsJson['basic']: + pickDefaultSettings = True + IsSameFeatures = True + else: + if configSettingsJson['basic']['featureList'] == featuresList: + pickDefaultSettings = False + IsSameFeatures = True + else: + pickDefaultSettings = True + + if pickDefaultSettings: + # --------------------------- + + configSettingsJson['basic']['featureList'] = featuresList + configSettingsJson['basic']['dateTimeFeature'] = "","".join([feature for feature in datetimeFeatures]) + configSettingsJson['basic']['indexFeature'] = sequenceFeatures + trainingFeatures = list(set(featuresList) - set(unimportantFeatures)) + configSettingsJson['basic']['trainingFeatures'] = "","".join([feature for feature in trainingFeatures]) + configSettingsJson['basic']['targetFeature'] = targetFeature + + if request.session['datatype'].lower() in ['video','image','object','document','llm_document','llm_code']: + for x in configSettingsJson['basic']['analysisType'].keys(): + configSettingsJson['basic']['analysisType'][x] = 'False' + configSettingsJson['basic']['folderSettings']['fileType'] = request.session['datatype'] + configSettingsJson['basic']['folderSettings']['labelDataFile'] = request.session['csvfullpath'] + configSettingsJson['basic']['folderSettings']['fileExtension'] = request.session['fileExtension'] + if request.session['datatype'] in ['LLM_Document','LLM_Code']: + configSettingsJson['basic']['analysisType']['llmFineTuning'] = 'True' + configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt']='Instruction' + configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response']='Response' + configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] = 'True' + + elif request.session['datatype'] == 'Video': + configSettingsJson['basic']['analysisType']['videoForecasting'] = 'True' + elif request.session['datatype'] == 'Image': + configSettingsJson['basic']['analysisType']['imageClassification'] = 'True' + elif request.session['datatype'] == 'Object': + configSettingsJson['basic']['analysisType']['objectDetection'] = 'True' + elif request.session['datatype'].lower() == 'document': + df = pd.read_csv(dataFile, encoding='utf8',sep=request.session['delimiter'],quotechar=request.session['textqualifier'],nrows=100) + noOfEmotyLevels = 0 + shape = df.shape + if shape[1] == 2: + noOfEmotyLevels = df['Label'].isnull().sum() + #print(noOfEmotyLevels) + if noOfEmotyLevels == 100: + configSettingsJson['basic']['analysisType']['topicModelling'] = 'True' + else: + configSettingsJson['basic']['analysisType']['classification'] = 'True' + else: + if 'uploadfiletype' in request.session: + configSettingsJson['basic']['folderSettings']['fileType'] = request.session['uploadfiletype'] + configSettingsJson['basic']['folderSettings']['labelDataFile'] = request.session['uploadLocation'] + try: + if isinstance(datetimeFeatures, list): + if len(datetimeFeatures) != 0: + configSettingsJson = update_granularity(configSettingsJson,datapath=dataFile) + elif isinstance(datetimeFeatures, str): + if datetimeFeatures != '': + configSettingsJson = update_granularity(configSettingsJson,datapath=dataFile) + except: + pass + # Retraing settings changes + # -------- S T A R T -------- + + tot_count=len(numericCatFeatures) + #task 11997 + if (tot_count > 1): + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'True' + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'False' + else: + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'True' + configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'False' + if 'delimiter' in request.session: + configSettingsJson['basic']['fileSettings']['delimiters'] = request.session['delimiter'] + else: + configSettingsJson['basic']['fileSettings']['delimiters'] = ',' + if 'textqualifier' in request.session: + configSettingsJson['basic']['fileSettings']['textqualifier'] = request.session['textqualifier'] + else: + request.session['textqualifier'] = '""' + + configSettingsJson['advance']['profiler']['featureDict'] = featuresListJson + configSettingsJson['basic']['onlineLearning'] = 'False' + configSettingsJson['basic']['dataLocation'] = request.session['datalocation'] + configSettingsJson['basic']['noOfRecords'] = request.session['NoOfRecords'] + onlineLearning = configSettingsJson['basic']['onlineLearning'] + updatedConfigSettings = json.dumps(configSettingsJson) + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(updatedConfigSettings) + fpWrite.close() + + ''' + p = Existusecases(DataFilePath=dataFile, DeployPath=DEPLOY_LOCATION, Status='Not Trained', + ConfigPath=config_json_filename, Version=Version, ModelName=ModelName, + TrainOuputLocation=outputfile) + p.save() + ''' + p = Existusecases.objects.get(ModelName=ModelName,Version=Version) + p.DataFilePath = dataFile + p.DeployPath = DEPLOY_LOCATION + p.ConfigPath = config_json_filename + p.TrainOuputLocation = outputfile + p.save() + #from appbe import telemetry + #telemetry.telemetry_data('UseCaseCreated',modelname+'_'+str(Version),'UseCaseCreated') + request.session['modelid'] = p.id + # --------------------------- + + from appbe.compute import selectedInfratructure + infra = selectedInfratructure() + if infra.lower() in ['aws','gcp']: + problemType = 'llmFineTuning' + else: + problemType = 'classification' + #print(problemType) + for key in configSettingsJson['basic']['analysisType']: + if configSettingsJson['basic']['analysisType'][key] == 'True': + problemType = key + break + scoringCreteria = 'NA' + if problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997 + for key in configSettingsJson['basic']['scoringCriteria'][problemType]: + if configSettingsJson['basic']['scoringCriteria'][problemType][key] == 'True': + scoringCreteria = key + break + selectAlgo = """" + if problemType in ['classification','regression','timeSeriesForecasting','timeSeriesAnomalyDetection', + 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition','llmFineTuning']: #task 11997 + for key in configSettingsJson['basic']['algorithms'][problemType]: + if configSettingsJson['basic']['algorithms'][problemType][key] == 'True': + if selectAlgo != """": + selectAlgo += ',' + selectAlgo += key + modelSize = '' + if problemType == 'llmFineTuning': + for key in configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo].keys(): + if configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo][key] == 'True': + modelSize = key + break + + movenext = True + request.session['finalstate'] = 1 + request.session['currentstate'] = 1 + context = {'tab': 'tabconfigure','modelSize':modelSize,'tot_count':tot_count, 'temp': temp, 'configsettings': configSettingsJson, 'config': config,'numericFeature':numericFeature,'onlineLearning':onlineLearning, + 'noOfRecords': records, 'selected_use_case': selected_use_case,'problemType':problemType,'scoringCreteria':scoringCreteria,'selectAlgo':selectAlgo,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'movenext': movenext, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'selected': 'modeltraning','advance':True,'basic_help':ht.basic_help + # Retraing settings changes + ,'IsSameFeatures':IsSameFeatures,'IsReTrainingCase':IsReTrainingCase + + # 10012:Decision Threshold related + ,'DLCheckpoint':data_is_under_RAM_threshold} + return context + except UnicodeDecodeError as e: + print(e) + context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'File Reading Error: '+str(e)} + return context + except Exception as e: + print(e) + import sys,os + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))" +" + context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'Config Error: '+str(e)} + return context ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import numpy as np +import json +import os + +def downloadtrainingfile(request,Existusecases): + usename = request.session['UseCaseName'].replace("" "", ""_"") + '_' + str(request.session['ModelVersion']) + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"", encoding=""utf-8"") + configSettingsData = f.read() + configSettingsJson = json.loads(configSettingsData) + modelName = request.session['UseCaseName'] + modelVersion = request.session['ModelVersion'] + modelStatus = request.session['ModelStatus'] + model = Existusecases.objects.get(ModelName=request.session['ModelName'],Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"") + training_output = f.read() + f.close() + dict = {'Attribute':[], + 'Value':[] + } + training_output = json.loads(training_output) + dfdashbord = pd.DataFrame(dict) + dfdashbord.loc[len(dfdashbord.index)] = ['UseCaseName',modelName] + dfdashbord.loc[len(dfdashbord.index)] = ['ProblemType',training_output['data']['ModelType']] + + dfdashbord.loc[len(dfdashbord.index)] = ['Version',str(modelVersion)] + dfdashbord.loc[len(dfdashbord.index)] = ['Status',modelStatus] + if 'vmDetails' in training_output['data']: + dfdashbord.loc[len(dfdashbord.index)] = ['DeployLocation', training_output['data']['vmDetails']] + else: + dfdashbord.loc[len(dfdashbord.index)] = ['DeployLocation',training_output['data']['deployLocation']] + dfdashbord.loc[len(dfdashbord.index)] = ['BestModel',training_output['data']['BestModel']] + dfdashbord.loc[len(dfdashbord.index)] = ['BestScore',training_output['data']['BestScore']] + dfdashbord.loc[len(dfdashbord.index)] = ['ScoringParam',training_output['data']['ScoreType']] + + if training_output['data']['ModelType'] != 'LLM Fine-Tuning': + dfdashbord.loc[len(dfdashbord.index)] = ['Test%',configSettingsJson['advance']['testPercentage']] + dfdashbord.loc[len(dfdashbord.index)] = ['FeaturesUsed',training_output['data']['featuresused']] + from io import BytesIO as IO + excel_file = IO() + edaFileName = usename + '_training.xlsx' + excel_writer = pd.ExcelWriter(excel_file, engine=""xlsxwriter"") + dfdashbord.to_excel(excel_writer, sheet_name='Dashboard',index=False) + if training_output['data']['ModelType'].lower() != 'multimodellearning' and training_output['data']['ModelType'].lower() != 'multilabelprediction': + EvaluatedModels = training_output['data']['EvaluatedModels'] + EvaluatedModels = pd.DataFrame(EvaluatedModels) + EvaluatedModels.to_excel(excel_writer, sheet_name='EvaluatedModels',startrow=0 , startcol=0) + if training_output['data']['ModelType'].lower() == 'classification': + #print(training_output['data']['matrix']) + row1 = 10 + row2 = 10 + if 'ConfusionMatrix' in training_output['data']['matrix']: + confusionMatrix = training_output['data']['matrix']['ConfusionMatrix'] + confusionMatrix = pd.DataFrame(confusionMatrix) + confusionMatrix.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) + row1 =confusionMatrix.shape[0]+5 + if 'ConfusionMatrix' in training_output['data']['trainmatrix']: + confusionMatrix = training_output['data']['trainmatrix']['ConfusionMatrix'] + confusionMatrix = pd.DataFrame(confusionMatrix) + confusionMatrix.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0) + if 'ClassificationReport' in training_output['data']['matrix']: + confusionMatrix = training_output['data']['matrix']['ClassificationReport'] + confusionMatrix = pd.DataFrame(confusionMatrix) + confusionMatrix.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=row1 , startcol=0) + if 'ClassificationReport' in training_output['data']['trainmatrix']: + confusionMatrix = training_output['data']['trainmatrix']['ClassificationReport'] + confusionMatrix = pd.DataFrame(confusionMatrix) + confusionMatrix.to_excel(excel_writer, sheet_name='Training Matrix',startrow=row2 , startcol=0) + if training_output['data']['ModelType'].lower() == 'regression': + dict = {'Attribute':[],'Value':[]} + testingDF = pd.DataFrame(dict) + try: + testingDF.loc[len(testingDF.index)] = ['MAE',training_output['data']['matrix']['MAE']] + testingDF.loc[len(testingDF.index)] = ['R2Score',training_output['data']['matrix']['R2Score']] + testingDF.loc[len(testingDF.index)] = ['MSE',training_output['data']['matrix']['MSE']] + testingDF.loc[len(testingDF.index)] = ['MAPE',training_output['data']['matrix']['MAPE']] + testingDF.loc[len(testingDF.index)] = ['RMSE',training_output['data']['matrix']['RMSE']] + except: + pass + testingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) + trainingDF = pd.DataFrame(dict) + try: + trainingDF.loc[len(trainingDF.index)] = ['MAE',training_output['data']['trainmatrix']['MAE']] + trainingDF.loc[len(trainingDF.index)] = ['R2Score',training_output['data']['trainmatrix']['R2Score']] + trainingDF.loc[len(trainingDF.index)] = ['MSE',training_output['data']['trainmatrix']['MSE']] + trainingDF.loc[len(trainingDF.index)] = ['MAPE',training_output['data']['trainmatrix']['MAPE']] + trainingDF.loc[len(trainingDF.index)] = ['RMSE',training_output['data']['trainmatrix']['RMSE']] + except: + pass + trainingDF.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0) + if training_output['data']['ModelType'].lower() == 'clustering': + dict = {'Attribute':[],'Value':[]} + trainingDF = pd.DataFrame(dict) + try: + trainingDF.loc[len(trainingDF.index)] = ['SilHouette_Avg',round(training_output['data']['trainmatrix']['SilHouette_Avg'],2)] + trainingDF.loc[len(trainingDF.index)] = ['DaviesBouldinScore',round(training_output['data']['trainmatrix']['DaviesBouldinScore'],2)] + trainingDF.loc[len(trainingDF.index)] = ['CalinskiHarabazScore',round(training_output['data']['trainmatrix']['CalinskiHarabazScore'],2)] + except: + pass + trainingDF.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0) + centroidpath = os.path.join(training_output['data']['deployLocation'],'centers.csv') + if(os.path.isfile(centroidpath)): + df_center = pd.read_csv(centroidpath) + df_center = df_center.rename(columns={""Unnamed: 0"": ""Cluster""}) + df_center.to_excel(excel_writer, sheet_name='Centroid',startrow=0 , startcol=0) + if training_output['data']['ModelType'].lower() == 'timeseriesforecasting': #task 11997 + if training_output['data']['BestModel'].lower() == 'var': + dict = {'Features':[],'Attribute':[],'Value':[]} + trainingDF = pd.DataFrame(dict) + FeaturesMatrix = training_output['data']['matrix'] + for x in FeaturesMatrix: + try: + trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MAE',x['MAE']] + trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MSE',x['MSE']] + trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MAPE',x['MAPE']] + trainingDF.loc[len(trainingDF.index)] = [x['Features'],'RMSE',x['RMSE']] + except: + pass + trainingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) + else: + dict = {'Attribute':[],'Value':[]} + trainingDF = pd.DataFrame(dict) + try: + trainingDF.loc[len(trainingDF.index)] = ['MAE',training_output['data']['matrix']['MAE']] + trainingDF.loc[len(trainingDF.index)] = ['MSE',training_output['data']['matrix']['MSE']] + trainingDF.loc[len(trainingDF.index)] = ['MAPE',training_output['data']['matrix']['MAPE']] + trainingDF.loc[len(trainingDF.index)] = ['RMSE',training_output['data']['matrix']['RMSE']] + except: + pass + trainingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) + workbook = excel_writer.book + + #excel_writer.save() + excel_writer.close() + excel_file.seek(0) + return edaFileName,excel_file + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pyodbc as pyodbc +import pandas as pd +import json +def simple_select(c, sql_query, bind_params=None, display_sql=False): + """"""where c is a cursor"""""" + if bind_params is None: + c.execute(sql_query) + else: + if display_sql: + c.execute(sql_query, bind_params) + headers = [] + if c.description is not None: + # We have a SELECT statement + for x in c.description: + headers.append(x[0]) + row_count = 0 + row = c.fetchone() + data=[] + while row: + row_count += 1 + xrow={} + for i in range(len(row)): + xrow[headers[i]] = row[i] + data.append(xrow) + row = c.fetchone() + #df = pd.DataFrame(data) + return(data) + +def validatequery(request,query): + resultdata = [] + try: + server_url = request.session['server_url'] + username_actian = request.session['username'] + password_actian = request.session['password'] + database_actian = request.session['database'] + conn = get_connection(server_url,username_actian,password_actian,database_actian) + sql_text = query + cur = conn.cursor() + resultdata = simple_select(cur, query) + cur.close() + + if len(resultdata) > 0: + return ""Query executed successfully"" + else: + return ""No rows returned"" + except Exception as e: + print(e) + return str(e) + +def executequery(request,query): + resultdata = [] + try: + server_url = request.session['server_url'] + username_actian = request.session['username'] + password_actian = request.session['password'] + database_actian = request.session['database'] + conn = get_connection(server_url,username_actian,password_actian,database_actian) + sql_text = query + cur = conn.cursor() + resultdata = simple_select(cur, query) + cur.close() + return(resultdata) + except Exception as e: + print(e) + return(resultdata) + +def list_tables_fields(request,table_list): + table_field_obj = {} + table_field_obj['data'] = [] + try: + server_url = request.session['server_url'] + username_actian = request.session['username'] + password_actian = request.session['password'] + database_actian = request.session['database'] + + table_list = json.loads(table_list) + conn = get_connection(server_url,username_actian,password_actian,database_actian) + for table in table_list: + tf_obj = {} + tf_obj['TableName'] = str(table).strip() + tf_obj['Fields']= [] + + field_list = [] + sql_text = ""SELECT column_name, false as is_select FROM iicolumns WHERE table_name='""+table+""'"" + cur = conn.cursor() + field_list = simple_select(cur, sql_text) + cur.close() + print(field_list) + tf_obj['Fields'] = field_list + table_field_obj['data'].append(tf_obj) + print(""----------------------"") + print(table_field_obj) + print(json.dumps(table_field_obj)) + print(""----------------------"") + return json.dumps(table_field_obj) + except Exception as e: + print(""Something went wrong ""+str(e)) + return table_field_obj + +def list_tables(request): + server_url = request.session['server_url'] + username_actian = request.session['username'] + password_actian = request.session['password'] + database_actian = request.session['database'] + dt_list = [] + try: + conn = get_connection(server_url,username_actian,password_actian,database_actian) + sql_text = ""select table_name from iitables where table_type='T' and table_owner='""+username_actian+""'"" + cur = conn.cursor() + + dt_list = simple_select(cur, sql_text) + cur.close() + return dt_list + except: + print(""Something went wrong"") + return dt_list + +def get_connection(server_url,username_actian,password_actian,database_actian): + conn = pyodbc.connect(""" +"driver=Ingres;servertype=ingres;server=@""+str(server_url)+"",tcp_ip,VW;uid=""+str(username_actian)+"";pwd=""+str(password_actian)+"";database=""+str(database_actian)) + print(""connected"") + return conn + + +def getDataFromActianAvalanche(request): + server_url = request.POST.get('server_url') + username_actian = request.POST.get('username') + password_actian = request.POST.get('password') + database_actian = request.POST.get('database') + table_actian = request.POST.get('table') + conn = get_connection(server_url,username_actian,password_actian,database_actian) + c = conn.cursor() + sql_text = ""select * from ""+str(table_actian) + data = simple_select(c, sql_text) + df = pd.DataFrame(data) + return(df) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os.path +from pathlib import Path +import time +import subprocess +import sys +import shutil +from appbe.aion_config import kafka_setting +from appbe.aion_config import running_setting +from appbe.publish import chech_publish_info +from llm.llm_tuning import update_sqllite_data +from appbe.data_io import sqlite_db +from appbe.dataPath import DATA_DIR +from appbe import installPackage +from appbe import compute +import json +import os +import signal +from os.path import expanduser +import platform +import pandas as pd +LOG_FILE_PATH = os.path.join(DATA_DIR,'logs') +GITHUB_FILE_PATH = os.path.join(DATA_DIR,'github') +PUBLISH_PATH = os.path.join(DATA_DIR,'target') +DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite') +os.makedirs(LOG_FILE_PATH, exist_ok=True) +''' +def check_publish_info(usecase,version): + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + if sqlite_dbObj.table_exists('publish'): + + publishState= 'Published' +''' +def get_instance(modelID): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) + + if len(data) > 0: + return (data[3],data[2],data[5],data[6],data[4]) + else: + return '','','','','' + else: + return '','','','','' +def startServices(request,usecasedetails,Existusecases): + try: + models = Existusecases.objects.filter(publishStatus='Published') + print(models) + if len(models) > 0: + for model in models: + try: + portNo = model.portNo + ppid = model.publishPID + if ppid == 0: + continue + + try: + os.kill(int(model.publishPID), signal.SIGTERM) + except Exception as e: + print(e) + scriptPath = os.path.join(PUBLISH_PATH,model.ModelName.usecaseid,'aion_publish_service.py') + + if os.path.exists(scriptPath): + outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)]) + model.publishStatus = 'Published' + model.publishPID = outputStr.pid + model.portNo = portNo + model.save() + else: + print(""Pass"") + pass + except Exception as e: + print(e) + except Exception as e: + print(e) + +def publishmodel(request,usecaseid,version,Existusecases,usecasedetails): + portNo=0 + usecased = usecasedetails.objects.get(usecaseid=usecaseid) + models = Existusecases.objects.filter(ModelName=usecased,publishStatus='Published') + if len(models) > 0: + for model in models: + try: + portNo = model.portNo + try: + os.kill(int(model.publishPID), signal.SIGTERM) + except Exception as e: + print(e) + mod = Existusecases.objects.get(id=model.id) + mod.publishStatus = '' + mod.publishPID = 0 + mod.portNo = 0 + mod.save() + except Exception as e: + print(e) + pass + missingNumbers = [] + if portNo == 0: + models = Existusecases.objects.filter(publishStatus='Published') + usedPortNo=[] + for model in models: + usedPortNo.append(model.portNo) + startPortNo = 8091 + endPortNo = 8091+5 + missingNumbers = [ i for i in range(startPortNo,endPortNo) if i not in usedPortNo] + if len(missingNumbers) > 0: + portNo = missingNumbers[0] + if portNo != 0: + scriptPath = os.path.join(PUBLISH_PATH,usecaseid,'aion_publish_service.py') + + model = Existusecases.objects.get(ModelName=usecased,Version=version) + isExist = os.path.exists(scriptPath) + if isExist: + configfile = os.path.join(PUBLISH_PATH,usecaseid,'config.json') + configdata = {'version': str(version)} + with open(configfile, ""w"") as outfile: + json.dump(configdata, outfile) + outfile.close() + outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)]) + model.publishStatus = 'Published' + model.publishPID = outputStr.pid + model.portNo = portNo + model.save() + Status = 'SUCCESS' + hosturl =request.get_host() + hosturl = hosturl.split(':') + url = 'http://'+hosturl[0]+':'+str(portNo)+'/AION/'+str(usecaseid)+'/predict' + Msg = 'Model Published Successfully' + else: + Status = 'Error' + Msg = 'Model Published Error' + url = '' + else: + Status = 'Error' + Msg = 'All ports are utilized' + url='' + return Status,Msg,url +def get_published_models(instanceid): + from appbe.sqliteUtility import sqlite_db + + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + condition = f'""instance""==""{instanceid}"" AND ""status""==""Published""' + datas = sqlite_obj.read_data('LLMTuning',condition) + + if len(datas)>0: + return True,datas[0][0] + return False,'' + +def maac_command(request,Existusecases,usecasedetails): + command = request.POST.get('maacsubmit') + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + modelID = request.POST.get('modelID') + Version = request.POST.get('Version') + p = Existusecases.objects.get(id=modelID,Version=Version) + usecasename = p.ModelName.usecaseid #bugid 13339 + usecaseid = p.ModelName.id + + # runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) + # installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename) + usecasedetail = usecasedetails.objects.get(id=p.ModelName.id) + usecase = usecasedetails.objects.all() + problemType = p.ProblemType + score = 0 + scoreType = '' + deployedModel = '' + deployedModelVersion = p.Version + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') + computeinfrastructure = compute.readComputeConfig() + for model in models: + model.scoringCreteria = 'NA' + model.score = 'NA' + model.deploymodel = 'NA' + if os.path.isdir(str(model.DeployPath)): + modelPath = os.path.join(str(model.DeployPath),'etc','output.json') + try: + with open(modelPath) as file: + outputconfig = json.load(file) + file.close() + if outputconfig['status'] == 'SUCCESS': + if deployedModelVersion == model.Version: + problemType = outputconfig['data']['ModelType'] + scoreType = outputconfig['data']['ScoreType'] + score = outputconfig['data']['BestScore'] + deployedModel = outputconfig['data']['BestModel'] + model.scoringCreteria = outputconfig['data']['ScoreType'] + model.score = outputconfig['data']['BestScore'] + model.deploymodel = outputconfig['data']['BestModel'] + model.maacsupport = 'True' + model.flserversupport = 'False' + supportedmodels = [""Logistic Regression"",""Neural Network"",""Linear Regression""] + if model.deploymodel in supportedmodels: + model.flserversupport = 'True' + else: + model.flserversupport = 'False' + supportedmodels = [""Extreme Gradient Boosting (XGBoost)""] + if model.deploymodel in supportedmodels: + model.encryptionsupport = 'True' + else: + model.encryptionsupport = 'False' + except Exception as e: + print(e) + pass + MLaaC_output = '' + + if command == 'generatemaac': + deployPath = str(p.DeployPath) + codeconfig = os.path.join(deployPath,'etc','code_config.json') + if os.path.isfile(codeconfig): + with open(codeconfig,'r') as f: + cconfig = json.load(f) + f.close() + dbserver = request.POST.get('productiondb') + db_config = {} + if dbserver.lower() == 'influxdb': + cconfig['prod_db_type'] = 'influx' + db_config['host'] = request.POST.get('influxdbhost') + db_config['port'] = request.POST.get('influxdbportno') + db_config['user'] = request.POST.get('influxdbuser') + db_config['password'] = request.POST.get('influxpassword') + db_config['database'] = 'production' + db_config['measurement'] = usecasename + tags = {} + db_config['tags']=tags + cconfig['db_config'] = db_config + else: + cconfig['prod_db_type'] = 'sqlite' + cconfig['db_config'] = db_config + dbserver = request.POST.get('mlflowserver') + mlflow_config = {} + if dbserver.lower() == 'local': + cconfig['mlflow_config'] = mlflow_config + else: + mlflow_config['tracking_uri_type'] = request.POST.get('mlflowserverurl') + mlflow_config['tracking_uri'] = request.POST.get('mlflowserverurl') + mlflow_config['registry_uri'] = request.POST.get('mlflowserverurl') + mlflow_config['artifacts_uri'] = request.POST.get('mlflowserverurl') + cconfig['mlflow_config'] = mlflow_config + with open(codeconfig,'w') as f: + json.dump(cconfig, f) + f.close() + from bin.aion_mlac import generate_mlac_code + outputStr = generate_mlac_code(codeconfig) + output = json.loads(outputStr) + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'MLaC','Yes') + if output['Status'] == 'SUCCESS': + Status = 'SUCCESS' + MLaaC_output = output['MLaC_Location'].replace('\\\\', '\\\\\\\\') + Msg = 'MLaC code successfully generated' + else: + Status = 'Failure' + Msg = output['msg'] + else: + Status = 'Failure' + Msg = 'Code Config Not Present' + if command == 'buildContainer': + deployPath = str(p.DeployPath) + maac_path = os.path.join(deployPath,'publish','MLaC') + if os.path.isdir(maac_path): + config={'usecase':str(usecasename),'version':str(p.Version),'mlacPath':maac_path} + config = json.dumps(config) + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py')) + + if platform.system() == 'Windows': + outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','buildMLaCContainerLocal' ,'-j',config],creationflags = subprocess.CREATE_NEW_CONSOLE) + else: + outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','buildMLaCContainerLocal' ,'-j',config]) + #cmd = scriptPath+"" ""+str(usecasename)+"" ""+str(p.Version)+"" ""+str(maac_path) + #subprocess.Popen(cmd,shell=True) + Status = 'SUCCESS' + Msg = 'Build Container Started' + else: + Status = 'Failure' + Msg = 'Run Code Generator' + if command == 'runpipeline': + deployPath = str(p.DeployPath) + dockerlist = os.path.join(deployPath,'publish','MLaC','dockerlist.json') + if os.path.isfile(dockerlist): + persistancevolume = request.POST.get('persistancevolume') + datasetpath = request.POST.get('dataset') + filetimestamp = str(int(time.time())) + logfilepath = os.path.join(LOG_FILE_PATH,'AIONPipeline_'+str(filetimestamp)+'.log') + config={'usecase':str(usecasename),'version':str(p.Version),'persistancevolume':persistancevolume,'datasetpath':datasetpath,'dockerlist':str(dockerlist)," +"'logfilepath':logfilepath} + config = json.dumps(config) + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py')) + if platform.system() == 'Windows': + outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','runpipelinelocal','-j',config],creationflags = subprocess.CREATE_NEW_CONSOLE) + else: + outputStr = subprocess.Popen([sys.executable, scriptPath, str(usecasename),str(p.Version),persistancevolume,datasetpath,str(dockerlist),logfilepath]) + Status = 'SUCCESS' + Msg = 'Pipeline Started' + MLaaC_output = 'Check log file for pipeline execution status: ' + str(logfilepath) + else: + Status = 'Failure' + Msg = 'Not found container information' + if command == 'generateyaml': + deployPath = str(p.DeployPath) + maac_path = os.path.join(deployPath,'publish','MLaC') + if os.path.isdir(maac_path): + persistancevolume = request.POST.get('persistancevolume') + datasetpath = request.POST.get('dataset') + supported_urls_starts_with = ('gs://','https://','http://') + if datasetpath.startswith(supported_urls_starts_with): + datasetpath = request.POST.get('dataset') + else: + datasetpath = '/aion/'+request.POST.get('dataset') + serviceport = request.POST.get('serviceport') + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_generateyaml.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath, str(usecasename),str(p.Version),persistancevolume,datasetpath,maac_path,serviceport]) + outputStr = outputStr.decode('utf-8') + outputStr=outputStr.strip() + print(outputStr) + output = json.loads(outputStr) + if output['Status'] == 'SUCCESS': + Status = 'SUCCESS' + MLaaC_output = output['location'] + Msg = 'MLaaC dockerfile successfully generated' + else: + Status = 'Failure' + Msg = output['msg'] + else: + Status = 'Failure' + Msg = 'Execute generate code first' + if command == 'githubupload': + if shutil.which('git') is None: + Status = 'Failure' + Msg = 'Git is not installed, Please install Git first.' + else: + try: + deployPath = str(p.DeployPath) + maac_path = os.path.join(deployPath,'publish','MLaC') + if os.path.isdir(maac_path): + githuburl = request.POST.get('githuburl') + githubusername = request.POST.get('githubusername') + githubtoken = request.POST.get('githubtoken') + githubemail = request.POST.get('githubemail') + githubconfig = {""url_type"":""https"",""url"":githuburl,""username"":githubusername,""email"":githubemail,""token"":githubtoken,""location"":maac_path,""modelName"":usecasename,""gitFolderLocation"":GITHUB_FILE_PATH} + from mlops import git_upload + outputStr = git_upload.upload(githubconfig) + print(outputStr) + output = json.loads(outputStr) + if output['Status'] == 'SUCCESS': + Status = 'SUCCESS' + MLaaC_output = githuburl + Msg = 'Code Uploaded to GitHub Successfully' + else: + Status = 'Failure' + Msg = output['msg'] + else: + Status = 'Failure' + Msg = 'GitHub Upload failed' + except Exception as e: + print(e) + Status = 'Failure' + Msg = 'GitHub Upload failed' + if command == 'unpublishmodel': + try: + models = Existusecases.objects.filter(ModelName=usecasedetail,publishStatus='Published') + if len(models) > 0: + for model in models: + try: + + if problemType.lower() == ""llm fine-tuning"": + cloudconfig = os.path.normpath( + os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json')) + modelid = usecasename + '_' + str(Version) + usecasename = usecasename.replace("" "", ""_"") + + hypervisor,instanceid,region,image,status = get_instance(usecasename + '_' + str(Version)) + from llm.llm_inference import kill_inference_server + kill_inference_server(cloudconfig,instanceid,hypervisor,region,image) + + update_sqllite_data(modelid,'status','Success') + else: + try: + os.kill(int(model.publishPID), signal.SIGTERM) + mod.publishPID = 0 + except Exception as e: + print(e) + mod = Existusecases.objects.get(id=model.id) + mod.publishStatus = '' + + mod.portNo = 0 + mod.save() + Status = 'SUCCESS' + Msg = 'Model Unpublished Successfully' + except Exception as e: + print(e) + Status = 'Error' + Msg = 'Model Unpublished Error' + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(e) + pass + + if command == 'publishmodel': + + try: + portNo=0 + models = Existusecases.objects.filter(ModelName=usecasedetail,publishStatus='Published') + if len(models) > 0: + for model in models: + try: + portNo = model.portNo + try: + os.kill(int(model.publishPID), signal.SIGTERM) + except Exception as e: + print(e) + mod = Existusecases.objects.get(id=model.id) + mod.publishStatus = '' + mod.publishPID = 0 + mod.portNo = 0 + mod.save() + except Exception as e: + print(e) + pass + missingNumbers = [] + + if problemType.lower() == ""llm fine-tuning"": + model = Existusecases.objects.get(ModelName=usecasedetail,Version=Version) + try: + usecasename = usecasename.replace("" "", ""_"") + hypervisor,instanceid,region,image,status = get_instance(usecasename + '_' + str(Version)) + if status.lower() in ['published','success'] : + if status.lower() == 'published': + from llm.llm_inference import kill_inference_server + kill_inference_server('',instanceid, hypervisor, region, image) + update_sqllite_data(usecasename + '_' + str(Version), 'status', 'Success') + already_published,published_usecase = get_published_models(instanceid) + if already_published: + Status = 'Error' + Msg = f'{published_usecase} is published at the same id, Please Unpublish mentioned model to proceed.' + else: + if not region: + region = '' + cloudconfig = os.path.normpath( + os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json')) + usecase = usecasename + '_' + str(Version) + #modelid = usecasename + '_' + str(Version) + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'aion.py')) + cmd = [sys.executable, scriptPath, '-m', 'llmpublish', '-cc', cloudconfig, '-i',instanceid,'-hv',hypervisor,'-md',deployedModel,'-uc',usecase,'-r',region,'-im',image ] + outputStr = subprocess.Popen(cmd) + model.publishStatus = 'Published' + model.publishPID = 0 + model.portNo = 8000 + model.save() + Status = 'SUCCESS' + from llm.llm_inference import get_ip + instanceip = get_ip(cloudconfig,instanceid,hypervisor,region,image) + print(instanceip) + url = 'http://' + instanceip + ':' + str(model.portNo) + '/generate' + Msg = 'Model Published Successfully, Server will take few minutes to be ready for Inferencing. URL: ' + url + update_sqllite_data(usecase,'status','Published') + else: + Status = 'Error' + Msg = 'Only Trained models are availble for Publish.' + + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + Status = 'Error' + Msg = 'Model Published Error' + + else: + if portNo == 0: + models = Existusecases.objects.filter(publishStatus='Published') + usedPortNo=[] + for model in models: + usedPortNo.append(model.portNo) + startPortNo = 8091 + endPortNo = 8091+5 + missingNumbers = [ i for i in range(startPortNo,endPortNo) if i not in usedPortNo] + if len(missingNumbers) > 0: + portNo = missingNumbers[0] + if portNo != 0: + model = Existusecases.objects.get(ModelName=usecasedetail,Version=Version) + scriptPath = os.path.join(PUBLISH_PATH,usecasename,'aion_publish_service.py') + isExist = os.path.exists(scriptPath) + if isExist: + configfile = os.path.join(PUBLISH_PATH,usecasename,'config.json') + configdata = {'version': str(Version)} + with open(configfile, ""w"") as outfile: + json.dump(configdata, outfile) + outfile.close() + outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)]) + model.publishStatus = 'Published' + model.publishPID = outputStr.pid + model.portNo = portNo + model.save() + Status = 'SUCCESS' + hosturl =request.get_host() + hosturl = hosturl.split(':') + url = 'http://'+hosturl[0]+':'+str(portNo)+'/AION/'+str(usecasename)+'/predict' + Msg = 'Model Published Successfully URL: '+url + else: + Status = 'Error' + Msg = 'Model Published Error' + else: + Status = 'Error' + Msg = 'All ports are utilized' + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(e) + pass + if command == 'generatekubeflowyaml': + try: + if problemType.lower() == 'timeseriesforecasting': #task 11997 + from appbe.aionpipelinets import aionpipelinets + else: + from appbe.aionpipeline import aionpipeline + deployPath = str(p.DeployPath) + codeconfig = os.path.join(deployPath,'etc','code_config.json') + featuresmapping = {'modelBased':'mlbased','statisticalBased':'statisticalBased'} + if os.path.isfile(codeconfig): + with open(codeconfig,'r') as f: + codeconfig = json.load(f) + f.close() + modelsarray=[] + for featureselection in codeconfig['feature_selector']: + for algo in codeconfig['algorithms'].keys(): + if problemType.lower() == 'timeseriesforecasting': #task 11997 + modelname = 'modeltraining_'+algo.lower() + else: + modelname = 'modeltraining_'+algo.lower()+'_'+featuresmapping[featureselection] + modelx = {'modelname':modelname} + modelsarray.append(modelx) + modelsjson = {'models':modelsarray} + kubeflowhost= request.POST.get('kubeflowhost') + containerregistry= request.POST.get('containerregistry') + containerlabel= request.POST.get('containerlabel') + containersecret= request.POST.get('containersecret') + if problemType.lower() == 'timeseriesforecasting': #task 11997 + ap = aionpipelinets(modelsjson,containerregistry,containerlabel,containersecret) + else: + ap = aionpipeline(modelsjson,containerregistry,containerlabel,containersecret) + ap.aion_mlops() + ap.compilepl() + ap.executepl(kubeflowhost) + Status = 'SUCCESS' + MLaaC_output = '' + Msg = 'MLOps pipeline executed successfully' + except Exception as e: + print(e) + Status = 'Failure' + Msg = 'Error in pipeline execution' + from appbe.pages import get_usecase_page + if command in ['publishmodel','unpublishmodel']: + status,context,action = get_usecase_page(request,usecasedetails,Existusecases,usecaseid) + context['Status'] = Status + context['MLaaC_output'] = MLaaC_output + context['Msg'] = Msg + return(context,'usecasedetails.html') + else: + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = Status + context['MLaaC_output'] = MLaaC_output + context['Msg'] = Msg + return(context,'usecases.html') + +def getusercasestatus(request): + if 'UseCaseName' in request.session: + selected_use_case = request.session['UseCaseName'] + else: + selected_use_case" +"= 'Not Defined' + + if 'ModelVersion' in request.session: + ModelVersion = request.session['ModelVersion'] + else: + ModelVersion = 0 + + if 'ModelStatus' in request.session: + ModelStatus = request.session['ModelStatus'] + else: + " +"itask-qa-qg"", model=""valhalla/t5-base-qa-qg-hl"") + + for _text in docs: + res = nlp(_text) + print(res) + extracted_QnAList.extend(res) + + for _record in extracted_QnAList: + extracted_QnA.append({'question': _record['question'], 'answer': _record['answer'].replace('', '')}) + + quesCount = len(extracted_QnA) + context = {'extracted_QnA':extracted_QnA, 'quesCount':quesCount} + + filetimestamp = str(int(time.time())) + output_filepath = os.path.join(DATA_FILE_PATH,'AION_QnA' + filetimestamp+'.txt') + + # Save the extracted questions as a JSON file + with open(output_filepath, 'w') as output_file: + json.dump(extracted_QnA, output_file, indent=4) + print(f""T5 based QnAs have been saved to {output_filepath}."") + request.session['QnAfilepath'] = output_filepath + + return context + + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + errormsg = str(e) + + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + context = {'error': 'Failed to generate QnA List using T5','LLM' : 'T5', 'selected':'DataOperations', 'errormessage':errormsg} + log.info('generateQA_Offline -- Error : Failed to generate QnA List using T5.. '+str(e)) + log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return context + + +def split_text_for_Offline(text, model_name): + lines = text.split('\\n') + current_section = '' + sections = [] + _lastsection = 0 + + for line in lines: + num_tokens = count_tokens_text_offline(''.join([current_section,line]), model_name) + + if num_tokens < set_tokens_limit_offline: + current_section = ''.join([current_section,line]) + else: + sections.append(current_section) + current_section = line + _lastsection = 1 + + if _lastsection == 1: + sections.append(current_section) + + return sections + + +def count_tokens_text_offline(text, model_name): + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(model_name) + + inputs = tokenizer(text, return_tensors=""pt"") + input_ids = inputs[""input_ids""] + _token_count = len(input_ids[0]) + + return _token_count + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +# def exploratorory_help(): +# +# +# +# return (data_overview_tip, feature_importance_tip, correlation_analysis_tip, exploratory_analysis_tip, data_deep_drive_tip, drift_tip) + +drift_tip = 'A data distribution represents a list of all of the possible values of each of the variables as provided in the data. Based on how the data values are distributed, it can be mapped to some well-known distribution curves so that the nature of the distribution can be shown.' + +data_overview_tip = 'Data Overview give users a quick understanding of the distribution of values across the features and provides summary statistics of the features. It helps to uncover several uncommon and common issues such as unexpected feature values, missing feature values and data skew.' + +timeseries_analysis_tip = ""Time Series Analysis provides information about the stationarity and seasonality of each of the features in the ingested data."" + +feature_importance_tip = 'Feature Importance provides a features and grades the features on a scale of relative importance' + +correlation_analysis_tip = 'Correlation Analysis provides the strength of relationships among various features. Values range from 0 (least correlation) to 1 (highest correlation). A high correlation means that two or more variables have a strong relationship with each other, while a weak correlation means that the variables are hardly related.' + +exploratory_analysis_tip = 'This provides an unsupervised clustering view of the data and provides insights on how the data is distributed. It helps profile the attributes of different clusters and gives insight into underlying patterns of different clusters and find similarities in the data points.' + +data_deep_drive_tip = 'Data Deep Dive provides an interactive interface for exploring the relationship between data points across all the different features of a dataset. Each individual item in the visualization represents a data point. Data can be grouped and binned in multiple dimensions based on their feature values.' + +pair_graph_tip = 'It is used to present the correlations between two selected features.' + +fair_metrics_tip = 'It provides interface to detect the bias in data associated with a sensitive or protected attribute and used for training.' + +hopkins_tip =['Since the value is in between (0.0, 0.3), it indicates that the data has a high tendency to cluster.','Since the value is around 0.5, it indicates that the data distriution is random.','Since the value is in between (0.7, 0.99), it indicates that the data is regularly spaced.'] + +basic_help={'RowFiltering':'You can easily filter rows based on whether the column match a condition or not'} +advance_help = {'NumericFillMethod':'This is used to handle the null values present in the numerical dataset.','NumericFillMethod_Median':'Replace with middle value of the data set. Efficient and not affected by outliers.','NumericFillMethod_Mean':'Replace with average value of the columns. Affected by outliers.','NumericFillMethod_Max':'Replace all nulls with maximum value in the column.','NumericFillMethod_KNN':'This implements KNN algorithm to replace the null','NumericFillMethod_Zero':'Replace the null with 0 value','NumericFillMethod_Drop':'To remove all the null values in the dataset','NumericFillMethod_Min':'Replace all null with minimum value present in the column','CategoricalFillMethod':'This is used to handle the null values present in the categorical dataset.','CategoricalFillMethod_Mode':'Replace with most common values in the dataset. Suggested for categorical columns.','CategoricalFillMethod_Zero':'Replace the null with 0 value.','CategoricalFillMethod_KNN':'This implements KNN algorithm to replace the null','CategoricalFillMethod_Drop':'To remove all the null values in the dataset.','OutlierDetection':'An unusual data point that differs significantly from other data points.','OutlierDetection_IQR':'Identifying the outliers with interquatile range by dividing the data into quartiles.','OutlierDetection_Zscore':'If the z score of a data point is more than 3, it indicates that the data point is an outlier.','OutlierDetection_Isolation':'Randomly sub-sampled data is processed in a tree structure based on randomly selected features.','MissValueRatio':'Permitted Missing Value Ratio i.e., Number of missing values by total number of obervation. If the number of missing value in a columns is more than ratio than the columns will be assumped as empty column','NumericFeatureRatio':'In case column is mix of number and text value. If the number of numeric columns to number of rows ratio is greator than the value mentioned it is assumed as numeric columns and remaining rows which have text values will be removed','NormalStandard':'Standardize features by removing the mean and scaling to unit variance.','NormalMinMax':'This scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.','NormalLogNormal':'When a feature does not follow a linear distributio, that helps minimize skewness and map any distribution to a normal one as close as possible.','RemoveNoise':'Used to remove the noise present in the text data. Noise like special characters, unicode, emojis, hyperlinks,hashtags, html parameters etc.','ExpandContractions':'Contractions are words or combinations of words that are shortened by dropping letters and replacing them by an apostrophe.','Normalize':'Normalization is the process of converting a token into its base form. In the normalization process, the inflectional form of a word is removed so that the base form can be obtained.','Lemmatization':'It is a more effective option than stemming because it converts the word into its root word, rather than just stripping the suffices.','Stemming':'It refers to the removal of suffices, like ing,ly,s etc. by a simple rule-based approach.','NGrams':'The combination of multiple words used together.','PosTags':'The process of classifying words into their parts of speech and labeling them accordingly is known as part-of-speech tagging, or simply POS-tagging.','FeatureSelection':'Feature selection is for filtering irrelevant or redundant features from your dataset. The key difference between feature selection and extraction is that feature selection keeps a subset of the original features while feature extraction creates brand new ones.','FeatureEngineering':'Feature extraction is for creating a new, smaller set of features that stills captures most of the useful information. Again, feature selection keeps a subset of the original features while feature extraction creates new ones.','PCA':'Principle Component Analysis (PCA) is a common feature extraction method in data science. Technically, PCA finds the eigenvectors of a covariance matrix with the highest eigenvalues and then uses those to project the data into a new subspace of equal or less dimensions.','StatisticalBased':'Features are selected on the basis of statistics measures. This method does not depend on the learning algorithm and chooses the features as a pre-processing step. The filter method filters out the irrelevant feature and redundant columns from the model by using different metrics through ranking.','ModelBased':'Different tree-based methods of feature selection help us with feature importance to provide a way of selecting features. Here, feature importance specifies which feature has more importance in model building or has a great impact on the target variable.','CorrelationThreshold':'Correlation Threshold for Statistican Based Feature Selection. Correlation relation analysis done on input features vs target feature and features having correlation value grather then threshold picks for training','PValue':'P Value again for Statistical Based Feature Selection','Variance':'For Feature Selection, features should have higher variance from threshold.','Normalization':'The goal of normalization is to change the values of numeric columns in the dataset to use a common scale , without distoring differences in the ranges of values or losing information.','SVD':'The singular value decomposition (SVD) provides another way to factorize a matrix, into singular vectors and singular values. The SVD allows us to discover some of the same kind of information as the eigendecomposition.','ReplaceAcro':'Replace any abrivations into its full form Eg:{""DM"":""DirectMessage""}', +'Factoranalysis':' This algorithm creates factors from the observed variables to represent the common variance i.e. variance due to correlation among the observed variables.','ICA':'ICA stands for Independent Components Analysis and it is a linear dimension reduction method, which transforms the dataset into columns of independent components.','optimizationmethod':'Optimization is the process where we train the model iteratively that results in a maximum and minimum function evaluation.','Random':'Random search is a method in which random combinations of hyperparameters are selected and used to train a model. The best random hyperparameter combinations are used. Random search bears some similarity to grid search.','Grid':'Grid search is essentially an optimization algorithm which lets to select the best parameters for your optimization problemfrom a list of parameter options that provided, hence automating the trial-and-error method.','Bays':'Bayesian optimisation in turn takes into account past evaluations when choosing the hyperparameter set to evaluate next. This approach typically requires less iterations to get to the optimal set of hyperparameter values.','Stopwords':'Stop words are commonly eliminated which are commonly used that they carry very little useful information. They are passed in a list [""Stopword1"",""Stopword2""]','Tokenization':'It is essentially splitting a phrase, sentence, paragraph, or an entire text document into smaller units, such as individual words or terms. Choose the library for tokenization','Lemma':'In lemmatization, the transformation uses a dictionary to map different variants of a word back to its root format.','Stopwords1':'Stop words are commonly eliminated which are commonly used that they carry very little useful information.Select from the below library to remove them', +'Genetic':'The genetic algorithm repeatedly modifies a population of individual solutions. At each step, the genetic algorithm selects individuals at random from the current population to be parents and uses them to produce the children for the next generation. Over successive generations, the population evolves toward an optimal solution.','CV':'Cross-validation is a resampling procedure used to evaluate machine learning models on a limited data sample. The procedure has a single parameter called k that refers to the number of groups that a given data sample is to be split into.','Ensemble':'Ensemble learning is a general meta approach to machine learning that seeks better predictive performance by combining the predictions from multiple models.','EnsembleStatus':'Enable or disable according to the preference','TargetEncoding':'Target encoding is the process of replacing a categorical value with the mean of the target variable','OneHotEndoding':'Encode categorical features as a one-hot numeric array.','LabelEncoding':'Encode target labels with value between 0 and n_classes-1.','SMCStrategy':'A most_frequent model - The default. In regression the prediction is equal to the mean value, in classification the prediction is equal to the most common value.\\n A uniform model - In regression, selects a random value from the y range. In classification, selects one of the labels by random.\\n A stratified model - Draws the prediction from the distribution of the labels in the train.\\n A tree model - Trains a simple decision tree with a given depth. The depth can be customized using the max_depth parameter.','SMCGain':'The gain is calculated as:\\ngain = (model score - simple score)/(perfect score - simple score)','SMCTreeDepth':'the max depth of the tree (used only if simple model type is tree).','MIcondition':'Measure model average inference time (in seconds) per sample'} ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sqlite3 +from pathlib import Path +import json +import os +import rsa +import boto3 #usnish +import pandas as pd +import time +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + + def table_exists(self, name): + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + return len(listOfTables) > 0 + + def read_data(self, table_name): + query = f""SELECT * FROM {table_name}"" + row = self.cursor.execute(query).fetchall() + return list(row) + #return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,""" +" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + def delete_record(self,table_name,col_name, col_value): + try: + query = f""DELETE FROM {table_name} WHERE {col_name}='{col_value}'"" + self.conn.execute(query) + self.conn.commit() + return 'success' + except Exception as e : + print(str(e)) + print(""Deletion Failed"") + return 'error' + + + def get_data(self,table_name,col_name,col_value): + + query = f""SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"" + row = self.cursor.execute(query).fetchone() + if(row == None): + return [] + return list(row) + + def write_data(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def close(self): + self.conn.close() + +def add_new_s3bucket(request): + + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + + if request.POST[""aionreferencename""] =='' or request.POST[""s3bucketname""] == '' or request.POST[""awsaccesskey""] == '' : + return 'error' + pkeydata='''-----BEGIN RSA PUBLIC KEY----- + MIIBCgKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1AfnrMv + fVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw0m4e + wQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2PM4Re + n0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHyKxlq + i/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhxWrs/ + lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQAB + -----END RSA PUBLIC KEY-----''' + + pubkey = rsa.PublicKey.load_pkcs1(pkeydata) + awssecretaccesskey = rsa.encrypt(request.POST[""awssecretaccesskey""].encode(), pubkey) + newdata = {} + newdata['Name'] = [request.POST[""aionreferencename""]] + newdata['AWSAccessKeyID'] = [request.POST[""awsaccesskey""]] + newdata['AWSSecretAccessKey'] = [str(awssecretaccesskey)] + newdata['S3BucketName'] = [request.POST[""s3bucketname""]] + name = request.POST[""aionreferencename""] + + if sqlite_obj.table_exists(""s3bucket""): + if(len(sqlite_obj.get_data(""s3bucket"",""Name"",name)) > 0): + return 'error1' + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'s3bucket') + + except Exception as e: + print(e) + return 'error' + +def get_s3_bucket(): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + temp_data = sqlite_obj.read_data('s3bucket') + + data = [] + for x in temp_data: + data_dict = {} + data_dict['Name'] = x[0] + data_dict['AWSAccessKeyID'] = x[1] + data_dict['AWSSecretAccessKey'] = x[2] + data_dict['S3BucketName'] = x[3] + data.append(data_dict) + except Exception as e: + print(e) + data = [] + return data +def remove_s3_bucket(name): + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + return sqlite_obj.delete_record('s3bucket','Name',name) + + +def read_s3_bucket(name,filename,DATA_FILE_PATH): + privkey = '''-----BEGIN RSA PRIVATE KEY----- +MIIEqQIBAAKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1Af +nrMvfVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw +0m4ewQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2P +M4Ren0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHy +Kxlqi/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhx +Wrs/lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQABAoIBAC/VbNfQPEqJSO3f +VFPqfR73q2MbGdgiMQOTgeDvLxiF1QdizJ+j/I5mgiIAMviXuOpPU+NbdMHbZZWd +D15kNlD8UCXVg6yyiOuHStjmjK4uHe8I86E1nxTb0hbyZCWZlbk/WizlDHInu+dT +KdIZcq2AIidU6tAxtwA0ingHaRSoXDlSGwOTEigNqmWOKnDTVg0SMscoHOD7siXF +DHm1/lkvD3uvcZk6c7fGxC8SgNX2dj6n/Nbuy0Em+bJ0Ya5wq4HFdLJn3EHZYORF +ODUDYoGaSxeXqYsGg/KHJBc8J7xW9FdN9fGbHfw1YplrmiGL3daATtArjMmAh0EQ +H8Sj7+ECgYkA3oWMCHi+4t8txRPkg1Fwt8dcqYhGtqpAus3NESVurAdi0ZPqEJcQ +4cUbflwQPhX0TOaBlkgzdP8DMdcW/4RalxHsAh5N8ezx/97PQMb3Bht0WsQUBeYJ +xLV7T2astjTRWactGCG7dwTaUYRtU3FqL6//3CysmA12B5EMX0udNBOTKwmaYKww +AwJ5AOISS7f12Q0fgTEVY0H8Zu5hHXNOA7DN92BUzf99iPx+H+codLet4Ut4Eh0C +cFmjA3TC78oirp5mOOQmYxwaFaxlZ7Rs60dlPFrhz0rsHYPK1yUOWRr3RcXWSR13 +r+kn+f+8k7nItfGi7shdcQW+adm/EqPfwTHM8QKBiQCIPEMrvKFBzVn8Wt2A+I+G +NOyqbuC8XSgcNnvij4RelncN0P1xAsw3LbJTfpIDMPXNTyLvm2zFqIuQLBvMfH/q +FfLkqSEXiPXwrb0975K1joGCQKHxqpE4edPxHO+I7nVt6khVifF4QORZHDbC66ET +aTHA3ykcPsGQiGGGxoiMpZ9orgxyO3l5Anh92jmU26RNjfBZ5tIu9dhHdID0o8Wi +M8c3NX7IcJZGGeCgywDPEFmPrfRHeggZnopaAfuDx/L182pQeJ5MEqlmI72rz8bb +JByJa5P+3ZtAtzc2RdqNDIMnM7fYU7z2S279U3nZv0aqkk3j9UDqNaqvsZMq73GZ +y8ECgYgoeJDi+YyVtqgzXyDTLv6MNWKna9LQZlbkRLcpg6ELRnb5F/dL/eB/D0Sx +QpUFi8ZqBWL+A/TvgrCrTSIrfk71CKv6h1CGAS02dXorYro86KBLbJ0yp1T/WJUj +rHrGHczglvoB+5stY/EpquNpyca03GcutgIi9P2IsTIuFdnUgjc7t96WEQwL +-----END RSA PRIVATE KEY-----''' + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + data = sqlite_obj.get_data(""s3bucket"",'Name',name) + except: + data = [] + awssecretaccesskey = '' + found = False + + if len(data)!=0: + aws_access_key_id = data[1] + awssecretaccesskey = data[2] + bucketName = data[3] + found = True + + if found: + privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') + awssecretaccesskey = eval(awssecretaccesskey) + awssecretaccesskey = rsa.decrypt(awssecretaccesskey, privkey) + awssecretaccesskey = awssecretaccesskey.decode('utf-8') + #awssecretaccesskey = 'SGcyJavYEQPwTbOg1ikqThT+Op/ZNsk7UkRCpt9g'#rsa.decrypt(awssecretaccesskey, privkey) + client_s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(awssecretaccesskey)) + #print(bucketName,filename) + try: + response = client_s3.get_object(Bucket=bucketName, Key=filename) + df = pd.read_csv(response['Body']) + except Exception as e: + print(str(e))#usnish + return 'Error',str(e), pd.DataFrame() + + #return 'Error', pd.DataFrame() + return 'Success','',df + return 'Error',""Please check bucket configuration"", pd.DataFrame() ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import math +import sys,os +import pandas as pd +from sklearn.cluster import KMeans +from sklearn.decomposition import PCA +import numpy as np +import scipy.stats as st +from sklearn.preprocessing import StandardScaler +from dython.nominal import associations +class ux_eda (): + + def __init__(self, dataPath=pd.DataFrame(),delimiter=',',textqualifier='""',optimize=None,): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.dataFrame = pd.DataFrame() + + if isinstance(dataPath, pd.DataFrame): + self.dataFrame = dataPath + if optimize == 1: + self.dataFrame = self.dataFrame.sample(n=1000, random_state=1) + else: + if optimize == 1: + self.dataFrame = pd.read_csv(dataPath,nrows=1000,encoding='utf-8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') + else: + self.dataFrame = pd.read_csv(dataPath, encoding='utf-8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') + self.dataFrame.rename(columns=lambda x: x.strip(), inplace=True) + + self.features = self.dataFrame.columns.tolist() + self.indexFeature = [] + self.dateFeature = [] + self.categoricalFeature = [] + self.constantFeature = [] + self.textFeature = [] + self.numericFeature = [] + self.numericAndCatFeature = [] + + for feature, featureType in zip(self.features, self.dataFrame.dtypes): + + if self.__check_seq_feature(self.dataFrame[feature]): + self.indexFeature.append(feature) + elif self.__match_date_format(self.dataFrame[feature]): + self.dateFeature.append(feature) + elif self.__check_constant_features(self.dataFrame[feature]): + self.constantFeature.append(feature) + elif self.__check_category_features(self.dataFrame[feature]): + self.categoricalFeature.append(feature) + elif feature" +"Type == 'object': + ''' + numOfRows = self.dataFrame.shape[0] + distinctCount = len(self.dataFrame[feature].unique()) + tempDff = self.dataFrame[feature] + self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x)) + tempDf = self.dataFrame[feature] + tempDf = tempDf.dropna() + numberOfNonNullVals = tempDf.count() + numericRatio = 0.8 + if(numberOfNonNullVals > int(numOfRows * numericRatio)): + self.numericFeature.append(feature) + else: + self.dataFrame[feature] = tempDff + ''' + self.textFeature.append(feature) + elif featureType in aionNumericDtypes: + self.numericFeature.append(feature) +# self.dataFrame[self.categoricalFeature] = self.dataFrame[self.categoricalFeature].apply(lambda x: x.cat.codes) + + self.numericAndCatFeature = self.numericFeature + self.categoricalFeature + + + # EDA Performance change + # ---------------------------- + def subsampleData(self, subsampleData): + self.dataFrame = self.dataFrame.sample(n=subsampleData, random_state=1) + + def get_features_datatype(self,v,num_list,cat_list,text_list): + """""" To get exact datatype of the feature in Data Overview."""""" + if v in cat_list: + return 'Categorical' + elif v in num_list: + return 'Numerical' + elif v in text_list: + return 'Text' + + def getCorrelationMatrix(self): + try: + if len(self.dataFrame.columns) > 25: + df3 = df[self.dataFrame.columns[0:24]] + else: + df3 = self.dataFrame.copy() + cor_mat= associations(self.dataFrame,compute_only=True) + cor_mat=cor_mat['corr'] + cor_mat = cor_mat.astype(float).round(2) + cor_mat.replace(np.nan, 0, inplace=True) + cor_mat.fillna('None',inplace=True) + return cor_mat + except Exception as e: + print(e) + correlationgraph = pd.DataFrame() + return (correlationgraph) + + def dataDistribution(self): + df_eda_actual = self.dataFrame.copy() + des1 = df_eda_actual.describe(include='all').T + des1['missing count %'] = df_eda_actual.isnull().mean() * 100 + des1['zero count %'] = df_eda_actual.isin([0]).mean() * 100 + dataColumns = list(self.dataFrame.columns.values) + des1.insert(0, 'Features', dataColumns) + actual_df_numerical_features = df_eda_actual.select_dtypes(exclude='object') + actual_df_categorical_features = df_eda_actual.select_dtypes(include='object') + #For text features + textFeature_df = df_eda_actual.filter(self.textFeature) + actual_df_categorical_features = actual_df_categorical_features.drop(self.textFeature, axis=1) + for i in des1['Features']: + num_cols = actual_df_numerical_features.columns.to_list() + cat_cols = actual_df_categorical_features.columns.to_list() + text_cols = self.textFeature + des1['Features Type'] = des1['Features'].apply(lambda x: self.get_features_datatype(x, num_cols,cat_cols,text_cols)) + curr_columns = des1.columns.to_list() + curr_columns.remove('Features Type') + insert_i = curr_columns.index('Features')+1 + curr_columns.insert(insert_i,'Features Type') + des1 = des1[curr_columns] + return des1 + # ---------------------------- + def subsetFeatures(self, edaFeatures): + print(self.dataFrame.columns) + self.dataFrame = self.dataFrame[edaFeatures] + self.features = edaFeatures + + self.indexFeature = [] + self.dateFeature = [] + self.categoricalFeature = [] + self.constantFeature = [] + self.textFeature = [] + self.numericFeature = [] + self.numericAndCatFeature = [] + print('abc') + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + + for feature, featureType in zip(self.features, self.dataFrame.dtypes): + if self.__check_seq_feature(self.dataFrame[feature]): + self.indexFeature.append(feature) + elif self.__match_date_format(self.dataFrame[feature]): + self.dateFeature.append(feature) + elif self.__check_constant_features(self.dataFrame[feature]): + self.constantFeature.append(feature) + elif self.__check_category_features(self.dataFrame[feature]): + self.categoricalFeature.append(feature) + elif featureType == 'object': + ''' + numOfRows = self.dataFrame.shape[0] + distinctCount = len(self.dataFrame[feature].unique()) + tempDff = self.dataFrame[feature] + self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x)) + tempDf = self.dataFrame[feature] + tempDf = tempDf.dropna() + numberOfNonNullVals = tempDf.count() + numericRatio = 0.8 + if(numberOfNonNullVals > int(numOfRows * numericRatio)): + self.numericFeature.append(feature) + else: + self.dataFrame[feature] = tempDff + ''' + self.textFeature.append(feature) + elif featureType in aionNumericDtypes: + self.numericFeature.append(feature) + print('def') + self.numericAndCatFeature = self.numericFeature + self.categoricalFeature + # ---------------------------- + + def testNum(self,value): + try: + x=eval(value) + return x + except: + return np.nan + + def getFeatures(self): + leastRatioFeature = self.__LeastfeatureRatio() + return (self.features, self.dateFeature, self.indexFeature, self.constantFeature, self.textFeature, leastRatioFeature,self.numericAndCatFeature,self.numericFeature,self.categoricalFeature) + def getNumericFeatureCount(self): + return(len(self.numericAndCatFeature)) + def calculateNumberofCluster(self): + df = self.dataFrame[self.numericFeature] + return self.__NumberofCluster(df) + def getTopTextFeatures(self,topn): + df_text = pd.DataFrame() + if (len(self.textFeature) > 1): + df_text['combined'] = self.dataFrame[self.textFeature].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + features = ['combined'] + else: + df_text[['combined']] = self.dataFrame[self.textFeature] + features = ['combined'] + + df_text[features[0]] = df_text[features[0]].fillna(""NA"") + textCorpus = df_text[features[0]] + from text import eda + texteda_obj = eda.ExploreTextData() + df = texteda_obj.MostCommonWords(textCorpus,topn) + return df + + def __NumberofCluster(self, featureData): + Sum_of_squared_distances = [] + K = range(1, 15) + for k in K: + km = KMeans(n_clusters=k) + km = km.fit(featureData) + Sum_of_squared_distances.append(km.inertia_) + x1, y1 = 1, Sum_of_squared_distances[0] + x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances) - 1] + distances = [] + for inertia in range(len(Sum_of_squared_distances)): + x0 = inertia + 2 + y0 = Sum_of_squared_distances[inertia] + numerator = abs((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1) + denominator = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2) + distances.append(numerator / denominator) + n_clusters = distances.index(max(distances)) + 2 + return (n_clusters) +#13841 : TrustedAI: hopkins stat + def getHopkinsVal(self,df): + + try: + from appbe.hopkinsStat import hopkins + from sklearn.preprocessing import StandardScaler,OneHotEncoder + from sklearn.compose import ColumnTransformer + from sklearn.pipeline import Pipeline + from sklearn.impute import SimpleImputer + numeric_transformer = Pipeline( + steps=[(""imputer"", SimpleImputer(missing_values=np.nan,strategy=""mean"")), + (""standard_scaler"", StandardScaler())] + ) + categorical_transformer = Pipeline( + steps=[ + (""imputer"", SimpleImputer(missing_values=np.nan,strategy=""most_frequent"")), + (""encoder"", OneHotEncoder(handle_unknown=""ignore"")) + ] + ) + + preprocessor = ColumnTransformer( + transformers=[ + (""num"", numeric_transformer, self.numericFeature), + (""cat"", categorical_transformer, self.categoricalFeature) + ] + ) + pipe = Pipeline([('scaler',preprocessor)]) + scaled_df = pipe.fit_transform(df) + if type(scaled_df) != np.ndarray: + scaled_df = scaled_df.toarray() + score = round(hopkins(scaled_df,scaled_df.shape[0]),2) + + return str(score) + except Exception as e: + print(e) + return '' + + def getClusterDetails(self): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + df_clus = pd.get_dummies(self.dataFrame[self.numericAndCatFeature], prefix_sep='####') + for i in df_clus.columns: + dataType = df_clus[i].dtypes + if dataType not in aionNumericDtypes: + df_clus[i] = df_clus[i].fillna(df_clus[i].mode()[0]) + else: + df_clus[i] = df_clus[i].fillna(df_clus[i].mean()) + + n = self.__NumberofCluster(df_clus) + n = n - 1 + kmeans = KMeans(n_clusters=n, init='k-means++', max_iter=10, n_init=10, random_state=0) + # Fit and predict + y_means = kmeans.fit_predict(df_clus) + centroids = kmeans.cluster_centers_.squeeze() + labels = kmeans.labels_ + features = df_clus.columns + cluster_details = [] + for j in range(len(features)): + cluster = {} + feature = features[j] + perflag = 0 + if '####' in feature: + x = features[j].split('####') + feature = x[0] + ' ' + x[1] + '(%)' + perflag = 1 + else: + feature = feature + '(AVG)' + cluster['label'] = feature + total_sum = 0 + if perflag == 1: + for i in range(n): + centroid = centroids[i] + value = round(centroid[j], 2) + total_sum = total_sum + value + + for i in range(n): + centroid = centroids[i] + value = round(centroid[j], 2) + if perflag == 1: + value = (value / total_sum) * 100 + value = round(value, 2) + cluster['Cluster ' + str(i + 1)] = value + + cluster_details.append(cluster) + + hopkins_val = self.getHopkinsVal(self.dataFrame,) + return cluster_details,hopkins_val + + + def getHighlyCorrelatedFeatures(self,noOfTop): + df_corr = abs(self.dataFrame[self.numericAndCatFeature].corr()).stack().reset_index() + df_corr.columns = ['FEATURE_1', 'FEATURE_2', 'CORRELATION'] + mask_dups = (df_corr[['FEATURE_1', 'FEATURE_2']].apply(frozenset, axis=1).duplicated()) | ( + df_corr['FEATURE_1'] == df_corr['FEATURE_2']) + + df_corr = df_corr[~mask_dups] + df_corr = df_corr.sort_values(by='CORRELATION', ascending=False) + df_top = df_corr.head(n=noOfTop) + return(df_top) + + # ---------------------- 12686:Data Distribution related Changes S T A R T ---------------------- + def word_token_for_feature(self, selectedFeature, dataframe): + comment_words = """" + try: + df_text = pd.DataFrame() + df_text[[selectedFeature]] = dataframe + features = [selectedFeature] + + df_text[features[0]] = df_text[features[0]].fillna(""NA"") + textCorpus = df_text[features[0]] + from text import TextProcessing + tp = TextProcessing.TextProcessing() + preprocessed_text = tp.transform(textCorpus) + df_text[selectedFeature] = preprocessed_text + df_text_list = df_text.values.tolist() + + for val in df_text_list: + val = str(val) + tokens = val.split() + for i in range(len(tokens)): + tokens[i] = tokens[i].lower() + comment_words += "" "".join(tokens) + "" "" + except: + comment_words = """" + + return comment_words + # -------------------------------------------- E N D -------------------------------------------- + + + def word_token(self): + df_text = pd.DataFrame() + if (len(self.textFeature) > 1): + df_text['combined'] = self.dataFrame[self.textFeature].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + features = ['combined'] + else: + df_text[['combined']] = self.dataFrame[self.textFeature] + features = ['combined'] + df_text[features[0]] = df_text[features[0]].fillna(""NA"") + textCorpus = df_text[features[0]] + from text import TextProcessing + tp = TextProcessing.TextProcessing() + preprocessed_text = tp.transform(textCorpus) + df_text['combined'] = preprocessed" +"_text + df_text_list = df_text.values.tolist() + comment_words = """" + for val in df_text_list: + val = str(val) + tokens = val.split() + for i in range(len(tokens)): + tokens[i] = tokens[i].lower() + comment_words += "" "".join(tokens) + "" "" + if comment_words == """": + comment_words = 'Not found any token' + return comment_words + def getdata(self): + return self.dataFrame + + def getPCATop10Features(self): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + df = self.dataFrame[self.numericAndCatFeature] + for feature in self.numericAndCatFeature: + if feature in self.categoricalFeature: + df[feature] = pd.Categorical(df[feature]) + df[feature] = df[feature].cat.codes + df[feature] = df[feature].fillna(df[feature].mode()[0]) + else: + df[feature] = df[feature].fillna(df[feature].mean()) + pca = PCA(n_components=2).fit(StandardScaler().fit_transform(df)) + mapping = pd.DataFrame(pca.components_, columns=self.numericAndCatFeature) + mapping = mapping.diff(axis=0).abs() + mapping = mapping.iloc[1] + mapping = mapping.sort_values(ascending=False).head(10) + return mapping + + def getTopRows(self, rows=5): + return self.dataFrame.head(rows) + + def __check_seq_feature(self, data): + if data.dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + total_record = data.count() + count = (data - data.shift() == 1).sum() + if ((total_record - count) == 1): + return True + return False + + def __match_date_format(self, data): + try: + ## Using regex lib, we are check if any col contains datetime format like yyyy-mm-dd or yyyy/mm/dd format. if it finds return true. + import re + u_data = data.to_string() + date_find = (re.findall(r""[0-9]{1,4}[\\_|\\-|\\/|\\|][0-9]{1,2}[\\_|\\-|\\/|\\|][0-9]{1,4}"", u_data) or re.findall(r'\\d{,2}\\-[A-Za-z]{,9}\\-\\d{,4}', u_data) or re.findall(r""[0-9]{1,4}[\\_|\\-|\\/|\\|][0-9]{1,2}[\\_|\\-|\\/|\\|][0-9]{1,4}.\\d"" , u_data) or re.findall(r""[0-9]{1,4}[\\_|\\-|\\/|\\|][A-Za-z]{,9}[\\_|\\-|\\/|\\|][0-9]{1,4}"", u_data)) + if (date_find): + try: + data = pd.to_datetime(data, utc=True) + return True + except Exception as e: + ##If not a datetime col, just pass to return false statement. + pass + except Exception as e: + data = data.astype(str) + beforecheckcount = data.count() + #####YYYY-MM-DD HH:MM:SS#### + check1 = data[data.str.match( + r'(^\\d\\d\\d\\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$)') == True] + aftercheckcount = check1.count() + if (beforecheckcount == aftercheckcount): + return True + #####MM/DD/YYYY HH:MM#### + check2 = data[data.str.match( + r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount == aftercheckcount): + return True + + #####DD-MM-YYYY HH:MM#### + check2 = data[data.str.match( + r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9]|1[0-2])-(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount == aftercheckcount): + return True + + #####YYYY/MM/DD#### + check2 = data[data.str.match(r'(^\\d\\d\\d\\d/(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount == aftercheckcount): + return True + #####MM/DD/YYYY#### + check2 = data[data.str.match(r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d)$)') == True] + aftercheckcount = check2.count() + if (beforecheckcount == aftercheckcount): + return True + #####YYYY-MM-DD HH:MM:SS.fff#### + check11 = data[data.str.match( + r'(^\\d\\d\\d\\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])\\.(\\d{3})$)') == True] + aftercheckcount = check11.count() + if (beforecheckcount == aftercheckcount): + return True + + return False + + def __check_category_features(self, modelFeatures): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + dataType = modelFeatures.dtypes + numOfRows = len(modelFeatures) + if dataType not in aionNumericDtypes: + if dataType != 'bool': + nUnique = len(modelFeatures.unique().tolist()) + if nUnique <= 30: + return True + return False + + def __check_constant_features(self, modelFeatures): + return len(modelFeatures.unique().tolist()) == 1 + + def __featureRatio(self, modelFeatures): + if len(modelFeatures): + return len(modelFeatures.unique().tolist()) / len(modelFeatures) + return 0 + + def __LeastfeatureRatio(self): + ratio = 1 + feat = """" + for feature in (self.numericAndCatFeature + self.textFeature): + r = self.__featureRatio(self.dataFrame[feature]) + if r < ratio: + ratio = r + feat = feature + return feat + def getDistribution(self): + aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + df = self.dataFrame[self.numericAndCatFeature] + dist={} + for feature in self.numericAndCatFeature: + if feature in self.categoricalFeature: + df[feature] = pd.Categorical(df[feature]) + df[feature] = df[feature].cat.codes + df[feature] = df[feature].fillna(df[feature].mode()[0]) + else: + df[feature] = df[feature].fillna(df[feature].mean()) + distributionname,sse = self.DistributionFinder(df[feature]) + if distributionname == '': + dist[feature] = 'Unknown' + else: + dist[feature] = distributionname + return dist + + def DistributionFinder(self,data): + try: + distributionName = """" + sse = 0.0 + KStestStatic = 0.0 + dataType = """" + if (data.dtype == ""float64""): + dataType = ""Continuous"" + elif (data.dtype == ""int""): + dataType = ""Discrete"" + elif (data.dtype == ""int64""): + dataType = ""Discrete"" + if (dataType == ""Discrete""): + distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] + index, counts = np.unique(data.astype(int), return_counts=True) + + if (len(index) >= 2): + best_sse = np.inf + y1 = [] + total = sum(counts) + mean = float(sum(index * counts)) / total + variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) + dispersion = mean / float(variance) + theta = 1 / float(dispersion) + r = mean * (float(theta) / 1 - theta) + datamin = data.min() + datamax = data.max() + for j in counts: + y1.append(float(j) / total) + + pmf1 = st.bernoulli.pmf(index, mean) + pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) + pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) + pmf4 = st.nbinom.pmf(index, mean, r) + pmf5 = st.poisson.pmf(index, mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1 - pmf5, 2.0)) + + sselist = [sse1, sse2, sse3, sse4, sse5] + best_distribution = 'NA' + for i in range(0, len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName = best_distribution + sse = best_sse + + elif (dataType == ""Continuous""): + + distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, + st.gamma, st.beta] + best_distribution = st.norm.name + best_sse = np.inf + datamin = data.min() + datamax = data.max() + nrange = datamax - datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + params = distribution.fit(data.astype(float)) + arg = params[:-2] + loc = params[-2] + scale = params[-1] + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if (best_sse > sse > 0): + best_distribution = distribution.name + best_sse = sse + distributionName = best_distribution + sse = best_sse + except: + response = str(sys.exc_info()[0]) + message = 'Job has Failed' + response + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + + return distributionName, sse + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sqlite3 +from pathlib import Path +import json +import os +import rsa +import boto3 #usnish +import pandas as pd +import time + +import sqlite3 + +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + + def table_exists(self, name): + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + return len(listOfTables) > 0 + + def read_data(self, table_name): + query = f""SELECT * FROM {table_name}"" + row = self.cursor.execute(query).fetchall() + return list(row) + #return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query)" +" + return True + def delete_record(self,table_name,col_name, col_value): + try: + query = f""DELETE FROM {table_name} WHERE {col_name}='{col_value}'"" + self.conn.execute(query) + self.conn.commit() + return 'success' + except Exception as e : + print(str(e)) + print(""Deletion Failed"") + return 'error' + + def get_data(self,table_name,col_name,col_value): + query = f""SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"" + row = self.cursor.execute(query).fetchone() + if(row == None): + return [] + return list(row) + + def write_data(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def close(self): + self.conn.close() + + +def add_new_azureStorage(request): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if request.POST[""azurename""] =='' or request.POST[""azureaccountkey""] == '' or request.POST[""containername""] == '' : + return 'error' + newdata = {} + newdata['azurename'] = [request.POST[""azurename""]] + newdata['azureaccountkey'] = [request.POST[""azureaccountkey""]] + newdata['containername'] = [request.POST[""containername""]] + name = request.POST[""azurename""] + if sqlite_obj.table_exists(""azurebucket""): + if(len(sqlite_obj.get_data('azurebucket','azurename',name))>0): + return 'error1' + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'azurebucket') + + except: + return 'error' + +def get_azureStorage(): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + temp_data = sqlite_obj.read_data('azurebucket') + data = [] + for x in temp_data: + data_dict = {} + data_dict['azurename'] = x[0] + data_dict['azureaccountkey'] = x[1] + data_dict['containername'] = x[2] + data.append(data_dict) + except Exception as e: + print(e) + data = [] + return data +def read_azureStorage(name,directoryname,DATA_FILE_PATH): + try: + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + data = sqlite_obj.get_data('azurebucket','azurename',name) + except: + data = [] + found = False + + if len(data)!=0: + storage_account_name = str(data[0]) + storage_account_key = str(data[1]) + azure_container_name = data[2] + found = True + + try: + if found: + root_dir = str(directoryname) + from azure.storage.filedatalake import DataLakeServiceClient + import io + import pandavro as pdx + from detect_delimiter import detect + try: + service_client = DataLakeServiceClient(account_url=""{}://{}.dfs.core.windows.net"".format(""https"", storage_account_name), credential=storage_account_key) + print(azure_container_name) + file_system_client = service_client.get_file_system_client(azure_container_name) + print(root_dir) + file_paths = file_system_client.get_paths(path=root_dir) + main_df = pd.DataFrame() + for path in file_paths: + if not path.is_directory: + file_client = file_system_client.get_file_client(path.name) + file_ext = os.path.basename(path.name).split('.', 1)[1] + if file_ext in [""csv"", ""tsv""]: + with open(csv_local, ""wb"") as my_file: + download = file_client.download_file() + download.readinto(my_file) + with open(csv_local, 'r') as file: + data = file.read() + row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t']) + processed_df = pd.read_csv(csv_local, sep=row_delimiter) + if file_ext == ""parquet"": + download = file_client.download_file() + stream = io.BytesIO() + download.readinto(stream) + processed_df = pd.read_parquet(stream, engine='pyarrow') + if file_ext == ""avro"": + with open(avro_local, ""wb"") as my_file: + download = file_client.download_file() + download.readinto(my_file) + processed_df = pdx.read_avro(avro_local) + if not main_df.empty: + main_df = main_df.append(processed_df, ignore_index=True) + else: + main_df = pd.DataFrame(processed_df) + except Exception as e: + + msg = str(e).split(""."")[0] + print(msg) + return 'Error',str(msg), pd.DataFrame() + return ""Success"","""",main_df + except: + return 'Error',""Please check bucket configuration"", pd.DataFrame() + +def remove_azure_bucket(name): + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + return sqlite_obj.delete_record('azurebucket','azurename',name) from typing import Union + +import numpy as np +import pandas as pd +from sklearn.neighbors import BallTree + + +def hopkins(data_frame: Union[np.ndarray, pd.DataFrame], sampling_size: int) -> float: + if type(data_frame) == np.ndarray: + data_frame = pd.DataFrame(data_frame) + + data_frame_sample = sample_observation_from_dataset(data_frame, sampling_size) + + sample_distances_to_nearest_neighbours = get_distance_sample_to_nearest_neighbours( + data_frame, data_frame_sample + ) + + uniformly_selected_observations_df = simulate_df_with_same_variation( + data_frame, sampling_size + ) + + df_distances_to_nearest_neighbours = get_nearest_sample( + data_frame, uniformly_selected_observations_df + ) + + x = sum(sample_distances_to_nearest_neighbours) + y = sum(df_distances_to_nearest_neighbours) + + if x + y == 0: + raise Exception(""The denominator of the hopkins statistics is null"") + + return x / (x + y)[0] + + +def get_nearest_sample(df: pd.DataFrame, uniformly_selected_observations: pd.DataFrame): + tree = BallTree(df, leaf_size=2) + dist, _ = tree.query(uniformly_selected_observations, k=1) + uniformly_df_distances_to_nearest_neighbours = dist + return uniformly_df_distances_to_nearest_neighbours + + +def simulate_df_with_same_variation( + df: pd.DataFrame, sampling_size: int +) -> pd.DataFrame: + max_data_frame = df.max() + min_data_frame = df.min() + uniformly_selected_values_0 = np.random.uniform( + min_data_frame[0], max_data_frame[0], sampling_size + ) + uniformly_selected_values_1 = np.random.uniform( + min_data_frame[1], max_data_frame[1], sampling_size + ) + uniformly_selected_observations = np.column_stack( + (uniformly_selected_values_0, uniformly_selected_values_1) + ) + if len(max_data_frame) >= 2: + for i in range(2, len(max_data_frame)): + uniformly_selected_values_i = np.random.uniform( + min_data_frame[i], max_data_frame[i], sampling_size + ) + to_stack = (uniformly_selected_observations, uniformly_selected_values_i) + uniformly_selected_observations = np.column_stack(to_stack) + uniformly_selected_observations_df = pd.DataFrame(uniformly_selected_observations) + return uniformly_selected_observations_df + + +def get_distance_sample_to_nearest_neighbours(df: pd.DataFrame, data_frame_sample): + tree = BallTree(df, leaf_size=2) + dist, _ = tree.query(data_frame_sample, k=2) + data_frame_sample_distances_to_nearest_neighbours = dist[:, 1] + return data_frame_sample_distances_to_nearest_neighbours + + +def sample_observation_from_dataset(df, sampling_size: int): + if sampling_size > df.shape[0]: + raise Exception(""The number of sample of sample is bigger than the shape of D"") + data_frame_sample = df.sample(n=sampling_size) + return data_frame_sample + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pyodbc as pyodbc +import pandas as pd +import json +import sqlalchemy as db +import pandas as pd +import urllib + + +def get_connection(request): + dbType = request.session['dbType'] + connection_string = """" + if dbType.lower()==""sqlite"": + filepath = request.session['filepath'] + #table = request.session[""tablenamesql""] + connection_string = ""sqlite:///""+str(filepath) + elif dbType.lower() in [""postgresql"",""mysql"",""mssql""]: + db_name = request.session['dbname'] + password = request.session['password'] + user = request.session['username'] + port = request.session['port'] + host = request.session['host'] + password=urllib.parse.quote_plus(password) + if dbType.lower()==""postgresql"": + connection_string = ""postgresql+psycopg2://"" + user + "":"" + password + ""@"" + host + "":"" + port + ""/"" + db_name + if dbType.lower()==""mysql"": + connection_string = ""mysql+pyodbc://"" + user + "":"" + password + ""@"" + host + "":"" + port + ""/"" + db_name + if dbType.lower()==""mssql"": + driver=request.session['driver'] + params = urllib.parse.quote_plus( + 'Driver=%s;' % driver + + 'Server=tcp:%s,' % host + + '%s;' % port + + 'Database=%s;' % db_name + + 'Uid=%s;' % user + + 'Pwd={%s};' % password + + 'Encrypt=yes;' + + 'TrustServerCertificate=no;' + + 'Connection Timeout=30;') + + connection_string = 'mssql+pyodbc:///?odbc_connect=' + params + return connection_string +def list_tables(request): + connection_string = get_connection(request) + + engine = db.create_engine(connection_string) + connection = engine.connect() + metadata = db.MetaData() + metadata.reflect(engine) + dt_list = [] + try: + dt_list= list(metadata.tables.keys()) + print(dt_list) + return dt_list + except: + print(""Something went wrong"") + return dt_list + +def list_tables_fields(request,table_list): + connection_string = get_connection(request) + engine = db.create_engine(connection_string) + connection = engine.connect() + metadata = db.MetaData() + metadata.reflect(engine) + table_field_obj = {} + table_field_obj['data'] = [] + try: + # filepath = request.session['filepath'] + #table = request.session[""tablenamesql""] + + table_list = json.loads(table_list) + for table in table_list: + tf_obj = {} + tf_obj['TableName'] = str(table).strip() + tf_obj['Fields']= [] + + table = db.Table(table, metadata, autoload=True, autoload_with=engine) + col = table.columns.keys() + tempdata = [] + for x in col: + my_list = {""column_name"": x,""is_select"":""false""} + tempdata.append(my_list) + + tf_obj['Fields'] = tempdata + table_field_obj['data'].append(tf_obj) + return json.dumps(table_field_obj) + except Exception as e:" +" + print(""Something went wrong ""+str(e)) + return table_field_obj + +def get_data(connection_string,table): + engine = db.create_engine(connection_string) + connection = engine.connect() + metadata = db.MetaData() + metadata.reflect(engine) + table = db.Table(table,metadata, autoload=True, autoload_with=engine) + query = db.select([table]) + ResultProxy = connection.execute(query) + ResultSet = ResultProxy.fetchall() + col = table.columns.keys() + return pd.DataFrame(ResultSet, columns=col) + +def getDataFromSingleTable(request): + + dbType = request.session['dbType'] + if dbType.lower() == ""sqlite"": + table = request.session[""tablenamesql""] + else: + table = request.session[""tablename""] + connection_string = get_connection(request) + df = get_data(connection_string,table) + return df + +def validatequery(request,table_details,join_details,where_details): + resultdata = [] + try: + table_details = json.loads(table_details) + join_details = json.loads(join_details) + where_details = json.loads(where_details) + connection_string = get_connection(request) + engine = db.create_engine(connection_string) + connection = engine.connect() + metadata = db.MetaData() + metadata.reflect(engine) + sel_col = [] + for item in table_details: + table = item[""TableName""] + table = db.Table(table, metadata, autoload=True, autoload_with=engine) + for ele in item[""Fields""]: + if str(ele[""is_select""]).lower() == 'true': + sel_col.append(table.columns[ele[""column_name""]]) + + join_condition = [] + where_clause = """" + for item in join_details: + table1 = item[""Table1Name""] + table1 = db.Table(table1, metadata, autoload=True, autoload_with=engine) + left_join = table1.columns[item[""Table1Field""]] + table2 = item[""Table2Name""] + table2 = db.Table(table2, metadata, autoload=True, autoload_with=engine) + right_join = table2.columns[item[""Table2Field""]] + join_condition = ""{left_join} {Condition}= {right_join}"".format(left_join=left_join, + Condition=item[""Condition""],right_join= right_join) + '''dbType = request.session['dbType'] + if dbType.lower()==""sqlite"": + for item in where_details: + where_clause = ""{table}.'{column}'{condition}{value}"".format(table=item[""TableName""],column=str(item[""FieldName""]),condition=item[""Condition""],value=item[""CompareValue""]) + + if dbType.lower()==""postgresql"": + for item in where_details: + + where_clause = ""{table}.{column}{condition}{value}"".format(table=item[""TableName""],column=str(item[""FieldName""]),condition=item[""Condition""],value=item[""CompareValue""]) + + ''' + + if len(join_details)!=0: + try: + for item in where_details: + where_clause = ""{table}.'{column}'{condition}{value}"".format(table=item[""TableName""],column=str(item[""FieldName""]),condition=item[""Condition""],value=item[""CompareValue""]) + + query =db.select(sel_col).\\ + select_from(table1.join(table2,db.text(join_condition))). \\ + where(db.and_(db.text(where_clause))) + ResultProxy = connection.execute(query) + ResultSet = ResultProxy.fetchall() + except: + for item in where_details: + where_clause = ""{table}.{column}{condition}{value}"".format(table=item[""TableName""],column=str(item[""FieldName""]),condition=item[""Condition""],value=item[""CompareValue""]) + + query =db.select(sel_col).\\ + select_from(table1.join(table2,db.text(join_condition))). \\ + where(db.and_(db.text(where_clause))) + ResultProxy = connection.execute(query) + ResultSet = ResultProxy.fetchall() + + else: + table = table_details[0][""TableName""] + table = db.Table(table, metadata, autoload=True, autoload_with=engine) + try: + for item in where_details: + where_clause = ""{table}.'{column}'{condition}{value}"".format(table=item[""TableName""],column=str(item[""FieldName""]),condition=item[""Condition""],value=item[""CompareValue""]) + query = db.select(sel_col). \\ + select_from(table). \\ + where(db.and_(db.text(where_clause))) + ResultProxy = connection.execute(query) + ResultSet = ResultProxy.fetchall() + except: + for item in where_details: + where_clause = ""{table}.{column}{condition}{value}"".format(table=item[""TableName""],column=str(item[""FieldName""]),condition=item[""Condition""],value=item[""CompareValue""]) + query = db.select(sel_col). \\ + select_from(table). \\ + where(db.and_(db.text(where_clause))) + ResultProxy = connection.execute(query) + ResultSet = ResultProxy.fetchall() + + + if len(ResultSet) > 0: + data = pd.DataFrame(ResultSet) + data.columns = ResultSet[0].keys() + print(data) + return data,""query exectuted successfully"" + else: + return pd.DataFrame(),""No rows returned"" + + # conn = get_connection(server_url,username_actian,password_actian,database_actian) + # sql_text = query + # cur = conn.cursor() + # resultdata = simple_select(cur, query) + # cur.close() + #df = pd.DataFrame(resultdata) + #print(df) + + except Exception as e: + print(e) + return pd.DataFrame(), str(e) import json +import os +import pandas as pd +import urllib, base64 +def check_deepCheckPlots(deployedLocation): + deepCheck = 'False' + boostOverfit = 'False' + boostOverfitCond = 'False' + mi='False' + miCond='False' + smc = 'False' + smsCond = 'False' + boostOverfitFile= os.path.join(deployedLocation,'log','boosting_overfit.html') + boostOverfitCondFile= os.path.join(deployedLocation,'log','boosting_overfit_condition.html') + smcFile= os.path.join(deployedLocation,'log','smc.html') + smcCondFile= os.path.join(deployedLocation,'log','smc_condition.html') + miFile= os.path.join(deployedLocation,'log','mi.html') + miConFile= os.path.join(deployedLocation,'log','mi_con.html') + file_exists = os.path.exists(boostOverfitFile) + if file_exists: + deepCheck = 'True' + boostOverfit = 'True' + file_exists = os.path.exists(boostOverfitCondFile) + if file_exists: + deepCheck = 'True' + boostOverfitCond = 'True' + file_exists = os.path.exists(miFile) + if file_exists: + deepCheck = 'True' + mi = 'True' + file_exists = os.path.exists(miConFile) + if file_exists: + deepCheck = 'True' + miCond = 'True' + file_exists = os.path.exists(smcFile) + if file_exists: + deepCheck = 'True' + smc = 'True' + file_exists = os.path.exists(smcCondFile) + if file_exists: + deepCheck = 'True' + smsCond = 'True' + output = {'deepCheck':deepCheck,'boostOverfit':boostOverfit,'boostOverfitCond':boostOverfitCond,'mi':mi,'miCond':miCond,'smc':smc,'smsCond':smsCond} + return output +def FeaturesUsedForTraining(output_json): + resultJsonObj = json.loads(output_json) + result = {} + result['Status'] = resultJsonObj['status'] + result['ModelType'] = resultJsonObj['data']['ModelType'] + result['ScoreType'] = resultJsonObj['data']['ScoreType'] + result['FeaturesUsed'] = resultJsonObj['data']['featuresused'] + result['BestModel'] = resultJsonObj['data']['BestModel'] + return result +def ParseResults(output_json): + msg1 = 'Results...' + resultJsonObj = json.loads(output_json) + result = {} + survical_images = [] + result['Status'] = resultJsonObj['status'] + result['ModelType'] = resultJsonObj['data']['ModelType'] + if 'vmDetails' in resultJsonObj['data']: + result['DeployLocation'] = resultJsonObj['data']['vmDetails'] + else: + result['DeployLocation'] = resultJsonObj['data']['deployLocation'] + result['BestModel'] = resultJsonObj['data']['BestModel'] + if str(resultJsonObj['data']['BestScore']) == ""NA"": + result['BestScore'] = 'NA' + else: + result['BestScore'] = round(float(resultJsonObj['data']['BestScore']), 2) + result['ScoreType'] = resultJsonObj['data']['ScoreType'] + result['FeaturesUsed'] = resultJsonObj['data']['featuresused'] + ##### Training Confusion Matrix + result['problem_type'] = result['ModelType'] + if result['ModelType'].lower() == 'timeseriesanomalydetection': + result['problem_type'] = 'TimeSeriesAnomalydetection' + if result['ModelType'] == 'classification' or result['ModelType'].lower() == 'distributed classification' or (result['ModelType'] == 'anomalydetection' and (result['BestScore']) != 0) or result['ModelType'] == 'ImageClassification': + bestmodel = resultJsonObj['data']['BestModel'] + if bestmodel.lower() == 'nas': + modelSummary= os.path.join(result['DeployLocation'],'summary.txt') + f = open(modelSummary, 'r') + file_content = f.read() + f.close() + #print(file_content) + result['modelSummary'] = file_content + #task 11997 + if result['ModelType'].lower() == 'classification': + result['problem_type'] = 'Classification' + elif result['ModelType'].lower() == 'anomalydetection': + result['problem_type'] = 'AnomalyDetection' + elif result['ModelType'].lower() == 'imageclassification': + result['problem_type'] = 'ImageClassification' + elif result['ModelType'].lower() == 'distributed classification': + result['problem_type'] = 'Distributed Classification' + try: + result['deepCheck'] = check_deepCheckPlots(result['DeployLocation']) + except Exception as e: + print(e) + + if 'ConfusionMatrix' in resultJsonObj['data']['trainmatrix']: + TrainConfusionMatrix = resultJsonObj['data']['trainmatrix']['ConfusionMatrix'] + + numLabels = len(TrainConfusionMatrix) + TrainConfusionMatrixList = [] + for act_key, value in TrainConfusionMatrix.items(): + temp = {} + temp['Label'] = act_key + for pred_key, pred_value in value.items(): + temp[pred_key] = pred_value + TrainConfusionMatrixList.append(temp) + result['TrainConfusionMatrix'] = TrainConfusionMatrixList + + TrainClassificationReport = resultJsonObj['data']['trainmatrix']['ClassificationReport'] + numRows = len(TrainClassificationReport) + TrainClassificationReportList = [] + metrics_keys_list = [] + for key, value in TrainClassificationReport.items(): + temp = {} + temp['Label'] = key + if isinstance( value, dict): + for metricsKey, metricsValue in value.items(): + temp[metricsKey] = round(metricsValue, 4) + if metricsKey not in metrics_keys_list: + metrics_keys_list.append( metricsKey) + else: + if metrics_keys_list: + for key in metrics_keys_list: + temp[key] = round(value, 4) + TrainClassificationReportList.append(temp) + + result['TrainClassificationReport'] = TrainClassificationReportList + result['Train_ROC_AUC_SCORE'] = round(float(resultJsonObj['data']['trainmatrix']['ROC_AUC_SCORE']), 4) + else: + result['TrainClassificationReport'] = '' + result['Train_ROC_AUC_SCORE']='' + + ##### Testing Confusion Matix + if 'ConfusionMatrix' in resultJsonObj['data']['matrix']: + ConfusionMatrix = resultJsonObj['data']['matrix']['ConfusionMatrix'] + numLabels = len(ConfusionMatrix) + ConfusionMatrixList = [] + for act_key, value in ConfusionMatrix.items(): + temp = {} + temp['Label'] = act_key + for pred_key, pred_value in value.items(): + temp[pred_key] = pred_value + ConfusionMatrixList.append(temp) + result['ConfusionMatrix'] = ConfusionMatrixList + + ClassificationReport = resultJsonObj['data']['matrix']['ClassificationReport'] + numRows = len(ClassificationReport) + ClassificationReportList = [] + metrics_keys_list = [] + for key, value in ClassificationReport.items(): + temp = {} + temp['Label'] = key + if isinstance( value, dict): + for metricsKey, metricsValue in value.items(): + temp[metricsKey] = round(metricsValue, 4) + if metricsKey not in metrics_keys_list: + metrics_keys_list.append( metricsKey) + else: + if metrics_keys_list: + for key in metrics_keys_list: + temp[key] = round(value, 4) + ClassificationReportList.append(temp) + + result['ClassificationReport'] = ClassificationReportList + result['ROC_AUC_SCORE'] = round(float(resultJsonObj['data']['matrix']['ROC_AUC_SCORE']), 4) + elif result['ModelType'] == 'similarityIdentification': + result['problem_type'] = 'similarityIdentification' + elif result['ModelType'] == 'contextualSearch': + result['problem_type'] = 'contextualSearch' + elif result['ModelType'] == 'MultiLabelPrediction': + result['problem_type'] = 'MultiLabelPrediction' + matrix = resultJsonObj['data']['matrix'] + training_matrix = [] + for x in matrix: + fmatrix = {} + fmatrix['feature'] =" +"x + performance = {} + for y in matrix[x]: + performance[y] = matrix[x][y] + fmatrix['performance'] = performance + training_matrix.append(fmatrix) + testmatrix = resultJsonObj['data']['testmatrix'] + testing_matrix = [] + for x in testmatrix: + fmatrix = {} + fmatrix['feature'] = x + performance = {} + for y in testmatrix[x]: + performance[y] = testmatrix[x][y] + fmatrix['performance'] = performance + testing_matrix.append(fmatrix) + result['testing_matrix'] = testing_matrix + result['training_matrix'] = training_matrix + elif result['ModelType'] == 'regression' or result['ModelType'].lower() == 'distributed regression': + try: + result['deepCheck'] = check_deepCheckPlots(result['DeployLocation']) + except Exception as e: + print(e) + try: + result['problem_type'] = 'Regression' + testing_matrix = {} + if 'MAE' in resultJsonObj['data']['matrix']: + testing_matrix['MAE'] = float(resultJsonObj['data']['matrix'].get('MAE','0')) + testing_matrix['R2Score'] = float(resultJsonObj['data']['matrix'].get('R2Score','0')) + testing_matrix['MSE'] = float(resultJsonObj['data']['matrix'].get('MSE','0')) + testing_matrix['MAPE'] = float(resultJsonObj['data']['matrix'].get('MAPE','0')) + testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix'].get('RMSE','0')) + testing_matrix['NormalisedRMSEPercentage'] = float(resultJsonObj['data']['matrix'].get('Normalised RMSE(%)','0')) + result['testing_matrix'] = testing_matrix + training_matrix = {} + training_matrix['MAE'] = float(resultJsonObj['data']['trainmatrix'].get('MAE','0')) + training_matrix['R2Score'] = float(resultJsonObj['data']['trainmatrix'].get('R2Score','0')) + training_matrix['MSE'] = float(resultJsonObj['data']['trainmatrix'].get('MSE','0')) + training_matrix['MAPE'] = float(resultJsonObj['data']['trainmatrix'].get('MAPE','0')) + training_matrix['RMSE'] = float(resultJsonObj['data']['trainmatrix'].get('RMSE','0')) + training_matrix['NormalisedRMSEPercentage'] = float(resultJsonObj['data']['trainmatrix'].get('Normalised RMSE(%)','0')) + result['training_matrix'] = training_matrix + except Exception as e: + print(e) + + elif result['ModelType'] == 'Text Similarity': + result['problem_type'] = 'textsimilarity' + testing_matrix = {} + testing_matrix['Accuracy'] = float(resultJsonObj['data']['matrix']['Accuracy']) + testing_matrix['ROC_AUC'] = float(resultJsonObj['data']['matrix']['ROC AUC']) + result['testing_matrix'] = testing_matrix + training_matrix = {} + training_matrix['Accuracy'] = float(resultJsonObj['data']['trainmatrix']['Accuracy']) + training_matrix['ROC_AUC'] = float(resultJsonObj['data']['trainmatrix']['ROC AUC']) + result['training_matrix'] = training_matrix + elif result['ModelType'] == 'RecommenderSystem': #taskid 11190 + result['problem_type'] = 'Recommender' + testing_matrix = {} + testing_matrix['RMSE'] = 'NA' + result['testing_matrix'] = testing_matrix + training_matrix = {} + training_matrix['RMSE'] = 'NA' + result['training_matrix'] = training_matrix + elif result['ModelType'] == 'SurvivalAnalysis': + result['problem_type'] = 'SurvivalAnalysis' + survivalProbabilityjson = resultJsonObj['data']['survivalProbability'] + performanceimages = resultJsonObj['data']['imageLocation'] + start = '[' + end = ']' + performanceimages = performanceimages[performanceimages.find(start) + len(start):performanceimages.rfind(end)] + performanceimages = performanceimages.split(',') + for imagefile in performanceimages: + imagefile = imagefile.replace(""'"", """") + string = base64.b64encode(open(imagefile, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + survical_images.append(image_64) + result['survivalProbability'] = survivalProbabilityjson + elif result['ModelType'] == 'StateTransition': + result['problem_type'] = 'StateTransition' + stateprobabilityfile = os.path.join(result['DeployLocation'],'stateTransitionProbability.csv') + clusterfile = os.path.join(result['DeployLocation'],'stateClustering.csv') + if(os.path.isfile(stateprobabilityfile)): + df_prob = pd.read_csv(stateprobabilityfile) + df_prob = df_prob[['State','NextState','Probability']] + result['probability'] = df_prob + if(os.path.isfile(clusterfile)): + df_clus = pd.read_csv(clusterfile) + df_clus = df_clus[['clusterid','clusterlist']] + result['cluster'] = df_clus + elif result['ModelType'].lower() == 'timeseriesforecasting': #task 11997 + result['problem_type'] = 'TimeSeriesForecasting' + if result['BestModel'] == 'FBPROPHET': + imagefile = os.path.join(result['DeployLocation'],'log','img','prophet_fig.png') + string = base64.b64encode(open(imagefile, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + survical_images.append(image_64) + testing_matrix = {} + testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE']) + testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) + testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2']) + testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) + result['testing_matrix'] = testing_matrix + forecastjson = resultJsonObj['data']['forecasts'] + result['forecast'] = forecastjson + if result['BestModel'] == 'VAR': + ''' + FeaturesMatrix = resultJsonObj['data']['matrix']['FeaturesMatrix'] + mae = '' + mse = '' + mape = '' + rmse = '' + for x in FeaturesMatrix: + if mae != '': + mae += ',' + if mse != '': + mse += ',' + if R2 != '': + R2 += ',' + if rmse != '': + rmse += ',' + featurename = x['Features'] + mae = mae + featurename + '=' + x['MAE'] + mse = mse + featurename + '=' + x['MSE'] + R2 = R2 + featurename + '=' + x['R2'] + rmse = rmse + featurename + '=' + x['RMSE'] + testing_matrix = {} + testing_matrix['MAE'] = mae + testing_matrix['MSE'] = mse + testing_matrix['R2'] = R2 + testing_matrix['RMSE'] = rmse + result['testing_matrix'] = testing_matrix + forecastjson = resultJsonObj['data']['forecasts'] + result['forecast'] = forecastjson + ''' + testing_matrix = {} + testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE']) + testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) + testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2']) + testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) + result['testing_matrix'] = testing_matrix + forecastjson = resultJsonObj['data']['forecasts'] + result['forecast'] = forecastjson + elif result['BestModel'] == 'LSTM' or result['BestModel'] == 'MLP': + testing_matrix = {} + testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) + testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) + result['testing_matrix'] = testing_matrix + forecastjson = resultJsonObj['data']['forecasts'] + result['forecast'] = forecastjson + else: + testing_matrix = {} + testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE']) + testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) + testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2']) + testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) + result['testing_matrix'] = testing_matrix + forecastjson = resultJsonObj['data']['forecasts'] + result['forecast'] = forecastjson + elif result['ModelType'] == 'topicmodelling': + result['problem_type'] = 'TopicModelling' + topics = resultJsonObj['topics'] + + df_topic = [] + dataDict = {} + for x in topics: + dataDict = {} + words = topics[x] + print(words) + word = '' + for key in words: + print(key) + if word != '': + word = word+', ' + word = word+key+'('+str(round(words[key],2))+')' + dataDict[""ID""] = x + dataDict[""Words""] = word + df_topic.append(dataDict) + result['topics'] = df_topic + elif result['ModelType'].lower() == 'association rule': + result['problem_type'] = 'AssociationRules' + deploy_location = result['DeployLocation'] + freq_item_file = os.path.join(result['DeployLocation'],'frequentItems.csv') + if(os.path.isfile(freq_item_file)): + rules_file = os.path.join(result['DeployLocation'],'associationRules.csv') + if(os.path.isfile(rules_file)): + df_rules = pd.read_csv(rules_file) + df_rules = df_rules[['antecedents','consequents','support','confidence','lift']] + #df_rules['antecedents'] = df_rules['antecedents'] + result['rules'] = df_rules + else: + result['error'] = 'There are no association found in frequent items above that threshold (minThreshold)' + else: + result['error'] = 'There are no frequent items above that threshold (minSupport), try by reducing the minSupport value' + elif result['ModelType'] == 'clustering': + result['problem_type'] = 'Clustering' + testing_matrix = {} + if 'SilHouette_Avg' in resultJsonObj['data']['matrix']: + testing_matrix['SilHouette_Avg'] = round(float(resultJsonObj['data']['matrix']['SilHouette_Avg']),2) + else: + testing_matrix['SilHouette_Avg'] = 'NA' + if 'DaviesBouldinScore' in resultJsonObj['data']['matrix']: + testing_matrix['DaviesBouldinScore'] = round(float(resultJsonObj['data']['matrix']['DaviesBouldinScore']),2) + else: + testing_matrix['DaviesBouldinScore'] = 'NA' + if 'CalinskiHarabazScore' in resultJsonObj['data']['matrix']: + testing_matrix['CalinskiHarabazScore'] = round(float(resultJsonObj['data']['matrix']['CalinskiHarabazScore']),2) + else: + testing_matrix['CalinskiHarabazScore'] = 'NA' + + centroidpath = os.path.join(result['DeployLocation'],'centers.csv') + if(os.path.isfile(centroidpath)): + df_center = pd.read_csv(centroidpath) + df_center = df_center.rename(columns={""Unnamed: 0"": ""Cluster""}) + result['centerpoints'] = round(df_center,2) + result['testing_matrix'] = testing_matrix + training_matrix = {} + if 'SilHouette_Avg' in resultJsonObj['data']['matrix']: + training_matrix['SilHouette_Avg'] = round(float(resultJsonObj['data']['matrix']['SilHouette_Avg']),2) + training_matrix['DaviesBouldinScore'] = round(float(resultJsonObj['data']['matrix']['DaviesBouldinScore']),2) + training_matrix['CalinskiHarabazScore'] = round(float(resultJsonObj['data']['matrix']['CalinskiHarabazScore']),2) + else: + training_matrix['SilHouette_Avg'] = 'NA' + training_matrix['DaviesBouldinScore'] = 'NA' + training_matrix['CalinskiHarabazScore'] = 'NA' + + result['training_matrix'] = training_matrix + #print(result) + evaluatedModelsList = resultJsonObj['data']['EvaluatedModels'] + #print(evaluatedModelsList) + for index in range(len(evaluatedModelsList)): + if evaluatedModelsList[index]['Score'] == 'NA': + evaluatedModelsList[index]['Score'] = 'NA' + else: + evaluatedModelsList[index]['Score'] = round(float(evaluatedModelsList[index]['Score']), 4) + if result['ModelType'] == 'classification': + evaluatedModelsList = sorted(evaluatedModelsList, key=lambda k: k['Score'],reverse=True) + else: + evaluatedModelsList = sorted(evaluatedModelsList, key=lambda k: k['Score']) + result['EvaluatedModels'] = evaluatedModelsList + result['LogFile'] = resultJsonObj['data']['LogFile'] + return result, survical_images ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import requests +import json +import os +from datetime import datetime +import socket +import getmac +from appbe.sqliteUtility import sqlite_db +import pandas as pd +from appbe.dataPath import DATA_DIR +def TelemetryCreateSyncState(state): + try: + newdata = {} + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'telemetry.db') + now = datetime.now() + SyncingTime = int(datetime.timestamp(now)) + newdata.update({'ID':['1'],'state':[state],'syncingTime':[SyncingTime]}) + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'syncState') + except Exception as e: + print(e) + pass + +def TelemetryUpdateSyncState(state): + try: + newdata = {} + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'telemetry.db') + now = datetime.now() + " +"SyncingTime = int(datetime.timestamp(now)) + updated_data = '""state""=""'+state+'"",""syncingTime""=""'+str(SyncingTime)+'""' + sqlite_obj.update_data(updated_data,'ID=""1""','syncState') + except Exception as e: + print(e) + pass +def checkTelemtry(): + import subprocess + import sys + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py')) + if os.path.exists(scriptPath): + outputStr = subprocess.Popen([sys.executable,scriptPath,'-m','pushtelemetry']) + +def SyncTelemetry(): + try: + newdata = {} + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'telemetry.db') + if sqlite_obj.table_exists('syncState'): + data = sqlite_obj.read_data('syncState')[0] + param_keys = ['ID','state','syncingTime'] + sync_data = dict((x,y) for x,y in zip(param_keys,data)) + #print(sync_data['state'],sync_data['syncingTime']) + if sync_data['state'].lower() != 'syncing': + sync_time = sync_data['syncingTime'] + now = datetime.now() + currTime = datetime.timestamp(now) + diffTime = int(float(currTime)) - int(float(sync_time)) + #print(diffTime) + if int(diffTime) > 86400: + TelemetryUpdateSyncState('Syncing') + SendTelemetryUpdate(sync_time) + TelemetryUpdateSyncState('Done') + else: + TelemetryCreateSyncState('Initialize') + except Exception as e: + print(e) + pass + + +def UseCaseCreated(Usecase): + try: + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'telemetry.db') + newdata = {} + now = datetime.now() + ID = datetime.timestamp(now) + record_date = int(datetime.timestamp(now)) + computername = socket.getfqdn() + macaddress = getmac.get_mac_address() + try: + user = os.getlogin() + except: + user = 'NA' + newdata.update({'ID':[str(int(ID))],'RecordDate': [record_date],'Usecase': [Usecase],'Operation':['Created'],'User':[str(user)],'HostName' :[computername],'MACAddress':[macaddress],'ProblemType':[''],'Algorithms':[''],'EDA':['No'],'Prediction':['No'],'MLaC':['No'],'Drift':['No'],'TrustedAI':['No']}) + sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'logs') + except Exception as e: + print(e) + pass +def UpdateTelemetry(Usecase,operation,value): + try: + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'telemetry.db') + data = sqlite_obj.read_data('logs','Usecase=""'+Usecase+'""') + #print(data) + if sqlite_obj.table_exists('logs'): + updated_data = operation+'=""'+value+'""' + now = datetime.now() + ID = datetime.timestamp(now) + record_date = int(datetime.timestamp(now)) + updated_data += ',""RecordDate""=""'+str(record_date)+'""' + sqlite_obj.update_data(updated_data,'Usecase=""'+Usecase+'""','logs') + except Exception as e: + print(e) + pass +def SendTelemetryUpdate(sync_time): + file_path = os.path.join(DATA_DIR, 'sqlite') + sqlite_obj = sqlite_db(file_path, 'telemetry.db') + if sqlite_obj.table_exists('logs'): + ddata = sqlite_obj.read_data(""logs"",""RecordDate >= '""+str(sync_time)+""'"") + #print(ddata) + keys = sqlite_obj.column_names('logs') + for data in ddata: + now = datetime.now() + ID = datetime.timestamp(now) + item = {} + item['ID'] = str(int(ID)) + item['RecordID'] = data[ keys.index('ID')] + item['RecordDate'] = data[ keys.index('RecordDate')] + item['Usecase'] = data[ keys.index('Usecase')] + item['Operation'] = data[ keys.index('Operation')] + item['User'] = data[ keys.index('User')] + item['HostName'] = data[ keys.index('HostName')] + item['MACAddress'] = data[ keys.index('MACAddress')] + item['Algorithms'] = data[ keys.index('Algorithms')] + item['ProblemType'] = data[ keys.index('ProblemType')] + item['EDA'] = data[ keys.index('EDA')] + item['Prediction'] = data[ keys.index('Prediction')] + item['MLaC'] = data[ keys.index('MLaC')] + item['Drift'] = data[ keys.index('Drift')] + item['TrustedAI'] = data[ keys.index('TrustedAI')] + url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' + record = {} + record['TableName'] = 'AION_LOGS' + record['Item'] = item + record = json.dumps(record) + #print(record) + try: + response = requests.post(url, data=record,headers={""x-api-key"":""Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK"",""Content-Type"":""application/json"",}) + except Exception as e: + print(e) + +def telemetry_data(operation,Usecase,data): + now = datetime.now() + ID = datetime.timestamp(now) + record_date = now.strftime(""%y-%m-%d %H:%M:%S"") + computername = socket.getfqdn() + macaddress = getmac.get_mac_address() + try: + user = os.getlogin() + except: + user = 'NA' + item = {} + item['ID'] = str(int(ID)) + item['record_date'] = record_date + item['UseCase'] = Usecase + item['operation'] = operation + item['remarks'] = data + item['user'] = str(user) + item['hostname'] = computername + item['macaddress'] = macaddress + url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' + record = {} + record['TableName'] = 'AION_OPERATION' + record['Item'] = item + record = json.dumps(record) + + try: + response = requests.post(url, data=record,headers={""x-api-key"":""Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK"",""Content-Type"":""application/json"",}) + check_telemetry_file() + except Exception as inst: + + filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') + f=open(filename, ""a+"") + f.write(record+'\\n') + f.close() + +def check_telemetry_file(): + file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') + if(os.path.isfile(file_path)): + f = open(file_path, 'r') + url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' + file_content = f.read() + f.close() + matched_lines = file_content.split('\\n') + write_lines = [] + for record in matched_lines: + try: + response = requests.post(url, data=record,headers={""x-api-key"":""Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK"",""Content-Type"":""application/json"",}) + except: + write_lines.append(record) + f = open(file_path, ""a"") + f.seek(0) + f.truncate() + for record in write_lines: + f.write(record+'\\n') + f.close() + + + else: + return True ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from typing import Tuple, Union, List +import numpy as np +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.linear_model import SGDClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.tree import DecisionTreeClassifier +from flwr.common.logger import log +from logging import INFO + +TRUE_FALSE_MAPPING = {'True':'False','true':'false',True:False,'y':'n','Y':'N','Yes':'No','yes':'no','YES':'NO'} +XY = Tuple[np.ndarray, np.ndarray] +Dataset = Tuple[XY, XY] +LogRegParams = Union[XY, Tuple[np.ndarray]] +XYList = List[XY] +modelUsed=None +modelname=None +def setmodelName(modelselected): + try: + modelname=str(modelselected) + print(""setmodelName ,given modelname: \\n"",modelname) + if (modelname.lower() == 'logisticregression'): + modelUsed=LogisticRegression() + return True + elif (modelname.lower() == ""naivebayes""): + modelUsed = GaussianNB() + return True + elif (modelname.lower() == ""sgdclassifier""): + #from sklearn.linear_model import SGDClassifier + modelUsed=SGDClassifier() + return True + elif (modelname.lower() == ""knn""): + modelUsed = KNeighborsClassifier() + return True + elif (modelname.lower() == ""decisiontreeclassifier""): + modelUsed = DecisionTreeClassifier() + return True + else: + return False + except Exception as e: + log(INFO, ""set fl model name fn issue: "",e) + +def get_model_parameters(model:modelUsed) -> LogRegParams: + """"""Returns the paramters of a sklearn LogisticRegression model."""""" + model_name=model.__class__.__name__ + if model.fit_intercept: + params = (model.coef_, model.intercept_) + else: + params = (model.coef_,) + + return params + + +def set_model_params( + model:modelUsed, params: LogRegParams +) -> modelUsed: + """"""Sets the parameters of a sklean LogisticRegression model."""""" + model.coef_ = params[0] + model_name=model.__class__.__name__ + try: + if model.fit_intercept: + model.intercept_ = params[1] + except Exception as e: + log(INFO, ""set_model_params fn issue: "",e) + pass + + return model + + +def set_initial_params(model,no_classes,no_features): + """"""Sets initial parameters as zeros Required since model params are + uninitialized until model.fit is called. + + But server asks for initial parameters from clients at launch. Refer + to sklearn.linear_model.LogisticRegression documentation for more + information. + """""" + + n_classes = no_classes + n_features = no_features + model.classes_ = np.array([i for i in range(n_classes)]) + model.coef_ = np.zeros((n_classes, n_features)) + model_name=model.__class__.__name__ + try: + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + except Exception as e: + log(INFO, ""set_initial_params fn issue: "",e) + pass + + + +def shuffle(X: np.ndarray, y: np.ndarray) -> XY: + """"""Shuffle X and y."""""" + rng = np.random.default_rng() + idx = rng.permutation(len(X)) + return X[idx], y[idx] + + +def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: + """"""Split X and y into a number of partitions."""""" + return list( + zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) + ) + +def get_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if v in TRUE_FALSE_MAPPING.keys(): + return k + return default_value + +def get_true_options( d): + options = [] + if isinstance(d, dict): + for k,v in d.items(): + if v in TRUE_FALSE_MAPPING.keys(): + options.append(k) + return options + +def set_true_option(d, key=None, value='True'): + if key in d.keys(): + if value in TRUE_FALSE_MAPPING.keys(): + for k in d.keys(): + d[ k] = TRUE_FALSE_MAPPING[ value] + d[key] = value + return d + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +def analysis_images(folder_path): + from AIX import image_eda + qualityscore = image_eda.img_MeasureImageQuality(folder_path) + eda_result = image_eda.img_EDA(folder_path) + #Image Duplicate Finder + duplicate_img = image_eda.img_duplicatefinder(folder_path) + color_plt = image_eda.img_plot_colour_hist(folder_path) + return qualityscore,eda_result,duplicate_img,color_plt ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ==================================================" +"=========================== +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this fil" +"dictionary.get(""number_samples"") + number_numerical_features = dictionary.get(""number_numerical_features"") + number_categorical_features = dictionary.get(""number_categorical_features"") + missing_proportion = dictionary.get(""missing_proportion"") + number_informative = dictionary.get(""number_informative"") + number_target = dictionary.get(""number_target"") + bias = dictionary.get(""bias"") + noise = dictionary.get(""noise"") + value_range_dict = dictionary.get(""value_range_dict"") + gen_data_series(univariate=is_univariate, + number_samples=number_samples, + number_numerical_features=number_numerical_features, + file_name=data_path, + number_categorical_features=number_categorical_features, + # number_text_features=2, + missing_proportion=missing_proportion, + number_informative=number_informative, + number_target=number_target, bias=bias, + noise=noise, + value_range_dict=value_range_dict) + + +if __name__ == ""__main__"": + data_generated_csv() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#Standard Library modules +import sqlite3 +import pandas as pd +from pathlib import Path +class sqlite_writer(): + def __init__(self, target_path): + self.target_path = Path(target_path) + database_file = self.target_path.stem + '.db' + self.db = sqlite_db(self.target_path, database_file) + + def file_exists(self, file): + if file: + return self.db.table_exists(file) + else: + return False + + def read(self, file): + return self.db.read(file) + + def write(self, data, file): + self.db.write(data, file) + + def close(self): + self.db.close() + +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + '.db' + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + self.tables = [] + + def table_exists(self, name): + if name in self.tables: + return True + elif name: + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + if len(listOfTables) > 0 : + self.tables.append(name) + return True + return False + + def read(self, table_name,condition=''): + if condition == '': + return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + else: + return pd.read_sql_query(f""SELECT * FROM {table_name} WHERE {condition}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + def update(self,table_name,updates,condition): + update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' + self.cursor.execute(update_query) + self.conn.commit() + return True + def write(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def delete(self, name): + pass + + def close(self): + self.conn.close() + import json +import os +import sys +import re +import numpy as np + + +def check_unsupported_col(config): #bugId14444 + unsupported_chars = '[]<>#{}@&' + try: + featureList = config['basic']['featureList'] + return any([x in y for x in unsupported_chars for y in featureList]) + except Exception as e: + print(str(e)) + return False + +def check_granularity(configSettingsJson,datapath=None): + try: + from AION.appbe.utils import get_true_option + import pandas as pd + from pathlib import Path + seconds_per_unit = {'second':1,'minute':60,'hour':60 * 60,'day':24 * 60 * 60,'week':7 * 24 * 60 * 60,'month':30 * 24 * 60 * 60,'year':365 * 24 * 60 * 60} + if not get_true_option(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['type']): + return '' + if isinstance( configSettingsJson['basic']['dateTimeFeature'], list): + datetime_feature = configSettingsJson['basic']['dateTimeFeature'][0] + else: + datetime_feature = configSettingsJson['basic']['dateTimeFeature'] + if get_true_option(configSettingsJson['basic']['analysisType']) == 'timeSeriesForecasting' and datetime_feature: + if not datapath: + datapath = configSettingsJson['basic']['dataLocation'] + if Path( datapath).exists(): + df = pd.read_csv(datapath, nrows=2) + datetime = pd.to_datetime(df[ datetime_feature]) + if len(datetime) > 1: + source_time_delta = (datetime[1] - datetime[0]).total_seconds() + granularity_unit = get_true_option(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['unit']) + size = int(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['size']) + target_time_delta = size * seconds_per_unit[granularity_unit] + amplify = int(source_time_delta / target_time_delta) + if amplify > 20: + return f'Current Granularity setting will amplify the data approx {amplify} times. Depending on your system configuration, this may cause Memory error' + + return '' + except Exception as e: + return '' + +def getStatusCount(matched_lines,total_steps): + stepsdone = 0 + leaner = True + #print(matched_lines) + for line in matched_lines: + if 'AION feature transformation completed' in line: + stepsdone = stepsdone + 1 + elif 'AION feature engineering completed' in line: + stepsdone = stepsdone + 1 + elif 'AION Association Rule completed' in line: + stepsdone = stepsdone + 1 + elif 'AION Image Classification completed' in line: + stepsdone = stepsdone + 1 + elif 'AION Association Rule completed' in line: + stepsdone = stepsdone + 1 + elif 'AION State Transition completed' in line: + stepsdone = stepsdone + 1 + elif 'AION SurvivalAnalysis completed' in line: + stepsdone = stepsdone + 1 + elif 'AION Recommender completed' in line: + stepsdone = stepsdone + 1 + elif 'AION Gluon Stop' in line: + stepsdone = stepsdone + 1 + elif 'AION Evaluation Stop' in line: + stepsdone = stepsdone + 1 + elif 'AION Object Detection completed' in line: + stepsdone = stepsdone + 1 + elif ('training completed' in line) and leaner: + stepsdone = stepsdone + 1 + leaner = False + elif 'Prediction Service completed' in line: + stepsdone = stepsdone + 1 + elif 'AION TimeSeries Forecasting started' in line: #task 11997 + stepsdone = stepsdone + 1 + elif 'Distributed Learning Completed' in line: + stepsdone = stepsdone + 4 + elif 'AION Batch Deployment completed' in line: + stepsdone = stepsdone + 2 + match_lines = [] + for line in matched_lines: + count = len(line)-len(line.lstrip()) + uline = line.split('...') + uline = uline[1] + if count == 0: + uline = '|... '+uline+'' + elif count == 8 or count == 1: + uline = ' |... '+uline+'' + elif count == 16 or count == 2: + uline = ' |... '+uline+'' + elif count == 32 or count == 3: + uline = ' |... '+uline+'' + else: + uline = line + match_lines.append(uline) + stepline = 'Stage: ' + str(stepsdone) + '/' + str(total_steps) + ' Complete' + match_lines.insert(0, stepline) + #print(match_lines) + output = ""\\n"".join([status_text for status_text in match_lines]) + output = ""
{}
"".format(output) + #print(output) + return(output) + +def calculate_total_interations(config): + try: + noOfIterations = 0 + problemtypes = config['basic']['analysisType'] + problem_type = """" + for key in problemtypes: + if config['basic']['analysisType'][key] == 'True': + problem_type = key + break + if problem_type.lower() in ['classification','regression']: + algorithms = config['basic']['algorithms'][problem_type] + for key in algorithms: + if config['basic']['algorithms'][problem_type][key] == 'True': + if key not in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Deep Q Network','Dueling Deep Q Network']: + if problem_type.lower() == 'classification': + configparam = config['advance']['mllearner_config']['modelParams']['classifierModelParams'][key] + else: + configparam = config['advance']['mllearner_config']['modelParams']['regressorModelParams'][key] + param = paramDefine(configparam,config['advance']['mllearner_config']['optimizationMethod']) + interationsum = 1 + for x in param.values(): + interationsum = interationsum*len(x) + if config['advance']['mllearner_config']['optimizationMethod'].lower() == 'random': + if interationsum > int(config['advance']['mllearner_config']['optimizationHyperParameter']['iterations']): + interationsum = int(config['advance']['mllearner_config']['optimizationHyperParameter']['iterations']) + noOfIterations = noOfIterations+interationsum + else: + if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: + if problem_type.lower() == 'classification': + configparam = config['advance']['dllearner_config']['modelParams']['classifierModelParams'][key] + else: + configparam = config['advance']['dllearner_config']['modelParams']['regressorModelParams'][key] + interationsum = 1 + for j in list(configparam.keys()): + if isinstance(configparam[j],(list,dict,tuple,str)): + x = configparam[j].split(',') + interationsum = interationsum*len(x) + noOfIterations = noOfIterations+interationsum + elif key in ['Deep Q Network','Dueling Deep Q Network']: + if problem_type.lower() == 'classification': + configparam = config['advance']['rllearner_config']['modelParams']['classifierModelParams'][key] + interationsum = 1 + for j in list(configparam.keys()): + if isinstance(configparam[j],(list,dict,tuple,str)): + x = configparam[j].split(',') + interationsum = interationsum*len(x) + noOfIterations = noOfIterations+interationsum + elif problem_type.lower() in ['llmfinetuning']: + algorithms = config['basic']['algorithms'][problem_type] + for key in algorithms: + if config['basic']['algorithms'][problem_type][key] == 'True': + noOfIterations = configparam = config['advance']['llmFineTuning']['modelParams'][key]['epochs'] + break + else: + noOfIterations= 'NA' + except Exception as e: + print(e) + noOfIterations = 'NA' + pass + return(noOfIterations) +def paramDefine(paramSpace, method): + paramDict = {} + for j in list(paramSpace.keys()): + inp = paramSpace[j] + try: + isLog = False + isLin = False + isRan = False + isList = False + isString = False + try: + # check if functions are given as input and reassign paramspace + v = paramSpace[j] + if 'logspace' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isLog = True + elif 'linspace' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isLin = True + elif 'range' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """")" +" + isRan = True + elif 'list' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isList = True + elif '[' and ']' in paramSpace[j]: + paramSpace[j] = v.split('[')[1].split(']')[0].replace("" "", """") + isList = True + x = paramSpace[j].split(',') + except: + x = paramSpace[j] + str_arg = paramSpace[j] + + # check if arguments are string + try: + test = eval(x[0]) + except: + isString = True + + if isString: + paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x}) + else: + res = eval(str_arg) + if isLin: + y = eval('np.linspace' + str(res)) + paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) + elif isLog: + y = eval('np.logspace' + str(res)) + paramDict.update( + {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y}) + elif isRan: + y = eval('np.arange' + str(res)) + paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) + # check datatype of argument + elif isinstance(eval(x[0]), bool): + y = list(map(lambda i: eval(i), x)) + paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y}) + elif isinstance(eval(x[0]), float): + res = eval(str_arg) + if len(str_arg.split(',')) == 3 and not isList: + y = eval('np.linspace' + str(res)) + #print(y) + paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) + else: + y = list(res) if isinstance(res, tuple) else [res] + paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) + else: + res = eval(str_arg) + if len(str_arg.split(',')) == 3 and not isList: + y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res)) + else: + y = list(res) if isinstance(res, tuple) else [res] + paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) + except Exception as inst: + print(inst) + return paramDict + +def calculate_total_activities(config): + req_step = 0 + problemtypes = config['basic']['analysisType'] + problem_type = """" + for key in problemtypes: + if config['basic']['analysisType'][key] == 'True': + problem_type = key + break + Modelproblem = problem_type + if Modelproblem.lower() in ['classification','regression','clustering','anomalydetection','topicmodelling']: + req_step = req_step+4 + if Modelproblem.lower() in ['timeseriesforecasting','imageclassification','objectdetection','multilabelprediction','similarityidentification','contextualsearch']: #task 11997 + req_step = req_step+2 + if Modelproblem.lower() in ['survivalanalysis']: + req_step = req_step+3 + if Modelproblem.lower() in ['recommendersystem']: + if config['basic']['algorithms']['recommenderSystem']['ItemRating'] == 'True': + req_step = req_step+3 + if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'] == 'True': + req_step = req_step+1 + if Modelproblem.lower() in ['statetransition']: + req_step = req_step+1 + return (req_step) +def getModelStatus(Existusecases,modelid): + model = Existusecases.objects.get(id=modelid) + return(model.Status) +def changeModelStatus(Existusecases,modelid,status,problemType,deployPath): + model = Existusecases.objects.get(id=modelid) + model.Status = status + model.ProblemType = problemType + model.DeployPath = deployPath + model.save() +def checkversionrunningstatus(modelid,usecasedetails,Existusecases): + modelx = Existusecases.objects.get(id=modelid) + ConfigPath = str(modelx.ConfigPath) + status = 'Running' + try: + if os.path.exists(ConfigPath): + with open(ConfigPath, 'r') as json_file: + data = json.load(json_file) + json_file.close() + deployPath = str(data['basic']['deployLocation']) + modelName = data['basic']['modelName'] + modelVersion = data['basic']['modelVersion'] + modelName = modelName.replace("" "", ""_"") + logfile = os.path.join(deployPath,modelName,str(modelVersion),'log','model_training_logs.log') + print(logfile) + if os.path.exists(logfile): + with open(logfile) as f: + contents = f.read() + f.close() + contents = re.search(r'aion_learner_status:(.*)', str(contents), re.IGNORECASE).group(1) + contents = contents.strip() + print(contents) + if contents != '': + resultJsonObj = json.loads(contents) + odataFile = str(modelx.TrainOuputLocation) + with open(odataFile, 'w') as json_file: + json.dump(resultJsonObj, json_file) + json_file.close() + modelx.Status = resultJsonObj['status'] + status = modelx.Status + if resultJsonObj['status'] == 'SUCCESS': + modelx.DeployPath = str(resultJsonObj['data']['deployLocation']) + if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection']: + modelx.ProblemType = 'unsupervised' + else: + modelx.ProblemType = 'supervised' + modelx.save() + except Exception as e: + pass + return status +def updateLLM_Model_training_logs(deployPath,modelName,modelVersion,model,configPath): + from appbe.prediction import get_instance + hypervisor,instanceid,region,image = get_instance(modelName+'_'+str(modelVersion)) + from llm.llm_tuning import llm_logs + cloudconfig = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config','compute_conf.json')) + llm_logs(configPath,cloudconfig,instanceid,hypervisor,model) + +def checkModelUnderTraining(request,usecasedetails,Existusecases): + try: + models = Existusecases.objects.filter(Status='Running') + for model in models: + ConfigPath = str(model.ConfigPath) + try: + if os.path.exists(ConfigPath): + with open(ConfigPath, 'r') as json_file: + data = json.load(json_file) + json_file.close() + deployPath = str(data['basic']['deployLocation']) + modelName = data['basic']['modelName'] + modelVersion = data['basic']['modelVersion'] + modelName = modelName.replace("" "", ""_"") + if data['basic']['analysisType']['llmFineTuning'] == 'True': + mlmodels ='' + algorihtms = data['basic']['algorithms']['llmFineTuning'] + for k in algorihtms.keys(): + if data['basic']['algorithms']['llmFineTuning'][k] == 'True': + if mlmodels != '': + mlmodels += ', ' + mlmodels += k + + updateLLM_Model_training_logs(deployPath,modelName,modelVersion,mlmodels,ConfigPath) + logfile = os.path.join(deployPath,modelName,str(modelVersion),'log','model_training_logs.log') + if os.path.exists(logfile): + with open(logfile,encoding=""utf-8"") as f: + contents = f.read() + f.close() + contents = re.search(r'aion_learner_status:(.*)', str(contents), re.IGNORECASE).group(1) + contents = contents.strip() + if contents != '': + resultJsonObj = json.loads(contents) + odataFile = str(model.TrainOuputLocation) + with open(odataFile, 'w') as json_file: + json.dump(resultJsonObj, json_file) + json_file.close() + modelx = Existusecases.objects.get(id=model.id) + modelx.Status = resultJsonObj['status'] + if resultJsonObj['status'] == 'SUCCESS': + modelx.DeployPath = str(resultJsonObj['data']['deployLocation']) + if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection']: + modelx.ProblemType = 'unsupervised' + else: + modelx.ProblemType = 'supervised' + modelx.save() + except Exception as e: + print(ConfigPath) + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + pass + except Exception as e: + print(e) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import shutil +import subprocess +import sys +import time +import glob +import re +from appbe.pages import get_usecase_page +import json +from django.http import FileResponse +def startIncrementallearning(request,usecasedetails,Existusecases,DATA_FILE_PATH): + try: + modelid = request.POST.get('modelid') + #incfilepath = request.POST.get('incfilepath') + Datapath = request.FILES['incfilepath'] + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv') + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + # destination.close()#bugfix 11656 + incfilepath = dataFile + p = Existusecases.objects.get(id=modelid) + deployPath = str(p.DeployPath) + scriptPath = os.path.abspath(os.path.join(deployPath,'aion_inclearning.py')) + request.session['IsRetraining'] = 'No' + if not os.path.exists(scriptPath): + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['Msg'] = 'Incremental/Online learning not supported for this model.For online training select Online Training in basic configuration page and provide with training' + else: + + outputStr = subprocess.check_output([sys.executable, scriptPath, incfilepath]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + if decoded_data['status'] == 'SUCCESS': + msg = decoded_data['Msg'] + context['Status'] = 'SUCCESS' + context['Msg'] = msg + else: + msg = decoded_data['Msg'] + context['Status'] = 'SUCCESS' + context['Msg'] = msg + except Exception as e: + print(e) + try: + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + except Exception as msg: + context['errorMsg'] = msg + return action,context + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import tensorflow +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense, Dropout, Flatten +from tensorflow.keras.layers import Conv2D, MaxPooling2D +from tensorflow.keras.utils import to_categorical +from tensorflow.keras.preprocessing import image +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.model_selection import train_test_split +from tensorflow.keras.layers import Input +from tensorflow.keras.models import Model +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.applications import VGG16 +from tensorflow.keras.callbacks import EarlyStopping +import logging +from sklearn.preprocessing import LabelEncoder +from statistics import mean +import sys +from learner.machinelearning import machinelearning +from learner.aion_matrix import a" +"ion_matrix +from profiler.imageAug import ImageAugmentation +from pathlib import Path + + +class ImageLearning: + def __init__(self,dataFrame,input_directory,outputdir,modelname,hyperParam, AugEnabled,keepAugImages,operations,augConf): + self.image_list = dataFrame + self.input_directory = input_directory + self.outputdir = outputdir + self.modelname = modelname + self.hyperParam = hyperParam + self.labelMapping={} + self.log = logging.getLogger('eion') + self.AIONNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + self.AugEnabled = AugEnabled + self.keepAugImages = keepAugImages + self.operations = operations + self.augConf = augConf + + def TrainCAST(self,predicted_data_file): + + datatype = self.image_list['Label'].dtypes + if datatype not in self.AIONNumericDtypes: + labelEncode= LabelEncoder() + self.image_list['Label'] = self.image_list['Label'].apply(str) + self.image_list['Label'] = labelEncode.fit_transform(self.image_list['Label']) + self.labelMapping = dict(zip(labelEncode.classes_, labelEncode.transform(labelEncode.classes_))) + self.log.info('\\n-------> First Ten Rows of Input Data After Encoding: ') + self.log.info(self.image_list.head(10)) + self.log.info('Status:- |... Target Feature Encoding Done') + if not os.path.exists(self.outputdir): + os.makedirs(self.outputdir) + train_df, test_df = train_test_split(self.image_list, random_state=42, test_size=self.hyperParam['test_split_ratio']) + if self.AugEnabled: + csv_file = ""tempTrainDf.csv"" + train_df.to_csv(csv_file, index=False) + ia = ImageAugmentation(self.input_directory, csv_file) + csv_file = ia.augment(""imageclassification"", self.operations,None,self.augConf) + train_df = pd.read_csv(csv_file) + Path(csv_file).unlink() + train_image = [] + train_df.reset_index(drop=True, inplace=True) + for i in range(train_df.shape[0]): + #print(os.path.join(self.input_directory,str(self.image_list['File'][i]))) + img = image.load_img(os.path.join(self.input_directory,str(train_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False) + img = image.img_to_array(img) + img = img/255 + train_image.append(img) + test_image = [] + test_df.reset_index(drop=True, inplace=True) + for i in range(test_df.shape[0]): + #print(os.path.join(self.input_directory,str(self.image_list['File'][i]))) + img = image.load_img(os.path.join(self.input_directory,str(test_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False) + img = image.img_to_array(img) + img = img/255 + test_image.append(img) + self.log.info('Status:- |... Image Loading Done') + X_train = np.array(train_image) + y_train = train_df['Label'] + X_test = np.array(test_image) + y_test = test_df['Label'] + ytrain = y_train.values + ytrain = to_categorical(ytrain) + ytest = y_test.values + ytest = to_categorical(ytest) + #print(y) + + self.log.info(""Loading Imagenet Weights..."") + + if self.modelname == ""densenet"": + self.log.info('Loading Densenet model') + baseModel = tensorflow.keras.applications.DenseNet121(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #98 + elif self.modelname == ""inception"": + self.log.info('Loading Inception model') + baseModel = tensorflow.keras.applications.InceptionV3(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #97 + + headModel = baseModel.output + headModel = Flatten(name=""flatten"")(headModel) + headModel = Dense(1024, activation='relu')(headModel) + headModel = Dropout(0.5)(headModel) + headModel = Dense(2, activation='sigmoid')(headModel) + + model = Model(inputs=baseModel.input, outputs=headModel) + + self.log.info(""[INFO] compiling model..."") + opt = Adam(lr=self.hyperParam['lr']) + model.compile(loss=""binary_crossentropy"", optimizer=opt, metrics=[""accuracy""]) + #early_stop = EarlyStopping(monitor='val_loss',patience=2) + #history = model.fit(X_train, y_train, epochs=hyperparam_config['epochs'], validation_data=(X_test, y_test), callbacks=[early_stop]) + history = model.fit(X_train, ytrain, epochs=self.hyperParam['epochs'], validation_data=(X_test, ytest)) + self.log.info('Status:- |... Image Classification Algorithm applied:'+str(self.modelname)) + #Saving trained model weights + model.save_weights(os.path.join(self.outputdir, self.modelname)) + saved_model = self.modelname + modelname = self.modelname + prediction = model.predict(X_train) + predictedData = np.argmax(prediction,axis=1) + mlobj = machinelearning() + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(y_train, predictedData,self.labelMapping) + prediction = model.predict(X_test) + predictedData = np.argmax(prediction,axis=1) + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performancematrix = mlobj.getClassificationPerformaceMatrix(y_test, predictedData,self.labelMapping) + df_test = pd.DataFrame() + df_test['actual'] = y_test + df_test['predict'] = predictedData + df_test.to_csv(predicted_data_file) + objClf = aion_matrix() + scoring_param = 'Accuracy' + score = objClf.get_score(scoring_param,y_test,predictedData) + #score = mean(history.history['accuracy']) + if self.AugEnabled and not self.keepAugImages: + ia.removeAugmentedImages(train_df) + scoredetails = '{""Model"":""'+modelname+'"",""Score"":'+str(round(score,2))+'}' + self.log.info('Status:- |... Score Accuracy: '+str(round(score,2))) + return saved_model,modelname,'ImageClassification',scoring_param,score,scoredetails,self.labelMapping,trainingperformancematrix,performancematrix + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import keras +from keras.models import Sequential +from keras.layers import Dense, Dropout, Flatten +from keras.layers import Conv2D, MaxPooling2D +from keras.utils import to_categorical +from keras.preprocessing import image +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.model_selection import train_test_split +from keras.utils import to_categorical +from Fkeras.layers import Input +from keras.models import Model +from keras.optimizers import Adam +from keras.applications import VGG16 +from tensorflow.keras.callbacks import EarlyStopping +from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve +import seaborn as sns + + +def PredictCAST(test_csv, test_dataset_directory, load_model_dir, model_name, hparams_config_file): + + hyperparam_config = hparams_config_file['img_classifier'] + + print(""[Info] loading imagenet weights..."") + #baseModel = keras.applications.ResNet101(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(128, 128, 3))) + if model_name == ""densenet"": + print('Loading Densenet model') + baseModel = keras.applications.DenseNet121(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98 + elif model_name == ""inception"": + print('Loading Inception model') + baseModel = keras.applications.InceptionV3(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97 + + headModel = baseModel.output + headModel = Flatten(name=""flatten"")(headModel) + headModel = Dense(1024, activation='relu')(headModel) + headModel = Dropout(0.5)(headModel) + headModel = Dense(2, activation='sigmoid')(headModel) + + model = Model(inputs=baseModel.input, outputs=headModel) + print(""[INFO] compiling model..."") + opt = Adam(lr=hyperparam_config['lr']) + model.compile(loss=""binary_crossentropy"", optimizer=opt, metrics=[""accuracy""]) + + model.load_weights(os.path.join(load_model_dir, model_name)) + #model.load_weights(load_model_dir) + + test_image = [] + + for i in range(test_csv.shape[0]): + img = image.load_img(test_dataset_directory + '/' + str(test_csv['file_name'][i]), target_size=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']), grayscale=False) + img = image.img_to_array(img) + img = img/255 + test_image.append(img) + test_images = np.array(test_image) + + test_labels = test_csv['class'].values + test_labels = to_categorical(test_labels) + + # making predictions + prediction = model.predict(test_images) + prediction = np.argmax(prediction,axis=1) + + print('Classification Report : ') + print(classification_report(test_csv['class'],prediction)) + + sns.heatmap(confusion_matrix(test_csv['class'],prediction),annot=True) + plt.show() + + print('Confusion matrix : ') + print(confusion_matrix(test_csv['class'],prediction)) + + print(""[INFO] Evaluating model accuracy and loss...Take some moment..."") + test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) + print('\\nTest accuracy:', test_acc) + print('\\nTest loss:', test_loss) + + print(""Prediction Completed..."") + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +warnings.simplefilter(""ignore"") +import os +import numpy as np +from numpy import asarray +import cv2 +import sys +import random +import glob as glob +import math as m + +# for gamma function, called +from scipy.special import gamma as tgamma + +import matplotlib.image as mpimg +import skimage +from libsvm import svmutil,svm +#import svmutil +from svmutil import * +from svm import * + +from PIL import Image +from collections import Counter +from imutils import paths +import matplotlib.pyplot as plt + +import json + + +################################################################################### +#Input - # AGGD fit model, takes input as the MSCN Image / Pair-wise Product +#Output - best values of image parameters +#Defination - used as internal method to measure_ImageQualityScore +################################################################################### + + +def AGGDfit(structdis): + # variables to count positive pixels / negative pixels and their squared sum + poscount = 0 + negcount = 0 + possqsum = 0 + negsqsum = 0 + abssum = 0 + + poscount = len(structdis[structdis > 0]) # number of positive pixels + negcount = len(structdis[structdis < 0]) # number of negative pixels + + # calculate squared sum of positive pixels and negative pixels + possqsum = np.sum(np.power(structdis[structdis > 0], 2)) + negsqsum = np.sum(np.power(structdis[structdis < 0], 2)) + + # absolute squared sum + abssum = np.sum(structdis[structdis > 0" +"]) + np.sum(-1 * structdis[structdis < 0]) + + # calculate left sigma variance and right sigma variance + lsigma_best = np.sqrt((negsqsum/negcount)) + rsigma_best = np.sqrt((possqsum/poscount)) + + gammahat = lsigma_best/rsigma_best + + # total number of pixels - totalcount + totalcount = structdis.shape[1] * structdis.shape[0] + + rhat = m.pow(abssum/totalcount, 2)/((negsqsum + possqsum)/totalcount) + rhatnorm = rhat * (m.pow(gammahat, 3) + 1) * (gammahat + 1)/(m.pow(m.pow(gammahat, 2) + 1, 2)) + + prevgamma = 0 + prevdiff = 1e10 + sampling = 0.001 + gam = 0.2 + + # vectorized function call for best fitting parameters + vectfunc = np.vectorize(func, otypes = [np.float], cache = False) + + # calculate best fit params + gamma_best = vectfunc(gam, prevgamma, prevdiff, sampling, rhatnorm) + + return [lsigma_best, rsigma_best, gamma_best] + +def func(gam, prevgamma, prevdiff, sampling, rhatnorm): + while(gam < 10): + r_gam = tgamma(2/gam) * tgamma(2/gam) / (tgamma(1/gam) * tgamma(3/gam)) + diff = abs(r_gam - rhatnorm) + if(diff > prevdiff): break + prevdiff = diff + prevgamma = gam + gam += sampling + gamma_best = prevgamma + return gamma_best + +def compute_features(img): + scalenum = 2 + feat = [] + # make a copy of the image + im_original = img.copy() + + # scale the images twice + for itr_scale in range(scalenum): + im = im_original.copy() + # normalize the image + im = im / 255.0 + + # calculating MSCN coefficients + mu = cv2.GaussianBlur(im, (7, 7), 1.166) + mu_sq = mu * mu + sigma = cv2.GaussianBlur(im*im, (7, 7), 1.166) + sigma = (sigma - mu_sq)**0.5 + + # structdis is the MSCN image + structdis = im - mu + structdis /= (sigma + 1.0/255) + + # calculate best fitted parameters from MSCN image + best_fit_params = AGGDfit(structdis) + # unwrap the best fit parameters + lsigma_best = best_fit_params[0] + rsigma_best = best_fit_params[1] + gamma_best = best_fit_params[2] + + # append the best fit parameters for MSCN image + feat.append(gamma_best) + feat.append((lsigma_best*lsigma_best + rsigma_best*rsigma_best)/2) + + # shifting indices for creating pair-wise products + shifts = [[0,1], [1,0], [1,1], [-1,1]] # H V D1 D2 + + for itr_shift in range(1, len(shifts) + 1): + OrigArr = structdis + reqshift = shifts[itr_shift-1] # shifting index + + # create transformation matrix for warpAffine function + M = np.float32([[1, 0, reqshift[1]], [0, 1, reqshift[0]]]) + ShiftArr = cv2.warpAffine(OrigArr, M, (structdis.shape[1], structdis.shape[0])) + + Shifted_new_structdis = ShiftArr + Shifted_new_structdis = Shifted_new_structdis * structdis + # shifted_new_structdis is the pairwise product + # best fit the pairwise product + best_fit_params = AGGDfit(Shifted_new_structdis) + lsigma_best = best_fit_params[0] + rsigma_best = best_fit_params[1] + gamma_best = best_fit_params[2] + + constant = m.pow(tgamma(1/gamma_best), 0.5)/m.pow(tgamma(3/gamma_best), 0.5) + meanparam = (rsigma_best - lsigma_best) * (tgamma(2/gamma_best)/tgamma(1/gamma_best)) * constant + + # append the best fit calculated parameters + feat.append(gamma_best) # gamma best + feat.append(meanparam) # mean shape + feat.append(m.pow(lsigma_best, 2)) # left variance square + feat.append(m.pow(rsigma_best, 2)) # right variance square + + # resize the image on next iteration + im_original = cv2.resize(im_original, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC) + return feat + +def img_MeasureImageQuality(dataset_directory): + """""" + #################################################################################### + #Input - img_path + #Output - Quality index of input image + #Defination - function to calculate BRISQUE quality score in range of 0 and 100 [0:good;100:bad] + #################################################################################### + """""" + imgfile_dict = {} + for file in os.listdir(dataset_directory): + if (file.endswith("".jfif"") or file.endswith("".png"") or file.endswith("".jpg"") or file.endswith("".jpeg"")): + filename = os.path.join(dataset_directory , file) + if os.path.isfile(filename)==False: + + sys.exit() + + file_extension = os.path.splitext(filename)[1] + + if file_extension=="".jfif"": + extension="".jfif"" + + if file_extension=="".png"": + extension="".png"" + + if file_extension=="".jpg"": + extension="".jpg"" + + if file_extension=="".jpeg"": + extension="".jpeg"" + + + if (extension not in ["".jpg"","".jpeg"","".jfif"","".png""]): + + sys.exit() + + try: + # read image from given path + dis = cv2.imread(filename, 1) + if(dis is None): + sys.exit(0) + # convert to gray scale + dis = cv2.cvtColor(dis, cv2.COLOR_BGR2GRAY) + + # compute feature vectors of the image + features = compute_features(dis) + + # rescale the brisqueFeatures vector from -1 to 1 + x = [0] + + # pre loaded lists from C++ Module to rescale brisquefeatures vector to [-1, 1] + min_= [0.336999 ,0.019667 ,0.230000 ,-0.125959 ,0.000167 ,0.000616 ,0.231000 ,-0.125873 ,0.000165 ,0.000600 ,0.241000 ,-0.128814 ,0.000179 ,0.000386 ,0.243000 ,- 0.133080 ,0.000182 ,0.000421 ,0.436998 ,0.016929 ,0.247000 ,-0.200231 ,0.000104 ,0.000834 ,0.257000 ,-0.200017 ,0.000112 ,0.000876 ,0.257000 ,-0.155072 , 0.000112 ,0.000356 ,0.258000 ,-0.154374 ,0.000117 ,0.000351] + + max_= [9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000 , 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484] + + # append the rescaled vector to x + for i in range(0, 36): + min = min_[i] + max = max_[i] + x.append(-1 + (2.0/(max - min) * (features[i] - min))) + modelPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'allmodel.txt') + # load model + model = svmutil.svm_load_model(modelPath) + + # create svm node array from python list + x, idx = gen_svm_nodearray(x[1:], isKernel=(model.param.kernel_type == PRECOMPUTED)) + x[36].index = -1 # set last index to -1 to indicate the end. + + # get important parameters from model + svm_type = model.get_svm_type() + is_prob_model = model.is_probability_model() + nr_class = model.get_nr_class() + + if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC): + # here svm_type is EPSILON_SVR as it's regression problem + nr_classifier = 1 + dec_values = (c_double * nr_classifier)() + + # calculate the quality score of the image using the model and svm_node_array + qualityscore = svmutil.libsvm.svm_predict_probability(model, x, dec_values) + imgfile_dict[file] = round(qualityscore,2) + #print (""Quality Score of the given image is: "", qualityscore, ""[0:Good;100:Bad]"") + except: + pass + finally: + warnings.simplefilter(""ignore"") + #print(imgfile_dict) + return imgfile_dict + +# calculate moode +def mode(arr): + if arr==[]: + return None + else: + return max(set(arr), key=arr.count) + +def img_EDA(dataset_directory): + + """""" + #################################################################################### + #Input - dataset_directory with all type of Images + #Output - mean,median and mode image size, channels type, extensions, recommendation of images etc + #Defination - img_EDA takes the all images and print the EDA results + #################################################################################### + + """""" + imgeda_dict = {} + # check input directory + + if os.path.isdir(dataset_directory)==False: + print(""folder does not exist"") + sys.exit() + + width_list=[] + height_list=[] + k=[] + c=[] + cnum=[] + v=[] + ext=[] + cnt=0 + for item in os.listdir(dataset_directory): + if (item.endswith("".jfif"") or item.endswith("".png"") or item.endswith("".jpg"") or item.endswith("".jpeg"")): + if os.path.isfile(os.path.join(dataset_directory , item)): + + im = Image.open(os.path.join(dataset_directory , item)) + c.append(im.mode) + cnum.append(len(im.mode)) + width_list.append(im.width) + height_list.append(im.height) + k.append(im.size) + v.append(im.width*im.height) + f, e = os.path.splitext(os.path.join(dataset_directory , item)) + ext.append(e) + cnt=cnt+1 + + # calculate biggest and smallest image + img_dict={} + for key, val in zip(k, v): + img_dict[key] = val + + max_key = max(img_dict, key=img_dict.get) + #max_key + + min_key = min(img_dict, key=img_dict.get) + #min_key + + imgeda_dict['Channels'] = set(c) + imgeda_dict['Extensions'] = set(ext) + imgeda_dict['Total_Images'] = cnt + imgeda_dict['Smallest_Image'] = min_key + imgeda_dict['Largest_Image'] = max_key + imgeda_dict['Mean_Width'] = int(np.mean(width_list)) + imgeda_dict['Mean_Height'] = int(np.mean(height_list)) + imgeda_dict['Median_Width'] = int(np.median(width_list)) + imgeda_dict['Median_Height'] = int(np.median(height_list)) + imgeda_dict['Mode_Width'] = int(mode(width_list)) + imgeda_dict['Mode_Height'] = int(mode(height_list)) + imgeda_dict['Recomended_Mean_Width_Height'] = (int(np.mean(width_list)),int(np.mean(height_list))) + imgeda_dict['Recomended_Median_Width_Height'] = (int(np.median(width_list)),int(np.median(height_list))) + imgeda_dict['Recomended_Mode_Width_Height'] = (int(mode(width_list)),int(mode(height_list))) + imgeda_dict['Size_Distribution'] = dict(Counter(k).items()) + imgeda_dict['Channel_Mean'] = np.mean(cnum) + imgeda_dict['Channel_Standard_Deviation'] = np.std(cnum) + ''' + print('*-----------------------<<< RESULTS >>>-------------------------*') + print() + print('%-30s | ' % 'Channels', set(c)) + print('%-30s | ' % 'Extensions', set(ext)) + print('*---------------------------------------------------------------*') + + print('%-30s | ' % 'Total Images', cnt) + + print('%-30" +"s | ' % 'Smallest Image', min_key) + + print('%-30s | ' % 'Largest Image', max_key) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'Mean Width', int(np.mean(width_list))) + + print('%-30s | ' % 'Mean Height', int(np.mean(height_list))) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'Median Width', int(np.median(width_list))) + + print('%-30s | ' % 'Median Height', int(np.median(height_list))) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'Mode Width', int(mode(width_list))) + + print('%-30s | ' % 'Mode Height', int(mode(height_list))) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'recommended size by mean(w,h)',(int(np.mean(width_list)),int(np.mean(height_list)))) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'recommended size by median(w,h)',(int(np.median(width_list)),int(np.median(height_list)))) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'recommended size by mode(w,h)',(int(mode(width_list)),int(mode(height_list)))) + + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'distribution of sizes',dict(Counter(k).items()) ) + print('*---------------------------------------------------------------*') + print('%-30s | ' % 'channel mean',np.mean(cnum)) + print('%-30s | ' % 'channel standard deviation',np.std(cnum)) + ''' + #print(imgeda_dict) + return imgeda_dict + + +def dhash(image, hashSize=8): + # convert the image to grayscale and resize the grayscale image, + # adding a single column (width) so we can compute the horizontal + # gradient + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + resized = cv2.resize(gray, (hashSize + 1, hashSize)) + # compute the (relative) horizontal gradient between adjacent + # column pixels + diff = resized[:, 1:] > resized[:, :-1] + # convert the difference image to a hash and return it + return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v]) + +def img_duplicatefinder(dataset_directory): + # grab the paths to all images in our input dataset directory and + # then initialize our hashes dictionary + print(""[INFO] computing image hashes..."") + imagePaths = list(paths.list_images(dataset_directory)) + hashes = {} + duplimg_list = [] + remove_file = 0 + # loop over our image paths + for imagePath in imagePaths: + # load the input image and compute the hash + image = cv2.imread(imagePath) + h = dhash(image) + # grab all image paths with that hash, add the current image + # path to it, and store the list back in the hashes dictionary + p = hashes.get(h, []) + p.append(imagePath) + hashes[h] = p + + # loop over the image hashes + for (h, hashedPaths) in hashes.items(): + # check to see if there is more than one image with the same hash + if len(hashedPaths) > 1: + #print(hashedPaths) + duplimg_list.append(hashedPaths) + + + return duplimg_list + +def img_plot_colour_hist(dataset_directory): + import io, base64, urllib + red_values = []; green_values = []; blue_values = []; all_channels = [] + imagePaths = list(paths.list_images(dataset_directory)) + for imagePath in imagePaths: + img = np.array(Image.open(imagePath)) + red_values.append(np.mean(img[:, :, 0])) + green_values.append(np.mean(img[:, :, 1])) + blue_values.append(np.mean(img[:, :, 2])) + all_channels.append(np.mean(img)) + + _, axes = plt.subplots(ncols=4, nrows=1, constrained_layout=True, figsize=(16, 3), sharey=True) + for ax, column, vals, c in zip( + axes, + ['red', 'green', 'blue', 'all colours'], + [red_values, green_values, blue_values, all_channels], + 'rgbk' + ): + ax.hist(vals, bins=100, color=c) + ax.set_title(f'{column} hist') + + plt.suptitle(""Image Dataset Colour Distribution"") + buf = io.BytesIO() + plt.savefig(buf, format='png') + buf.seek(0) + string = base64.b64encode(buf.read()) + uri = 'data:image/png;base64,' + urllib.parse.quote(string) + + return uri ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import keras +from keras.models import Sequential +from keras.layers import Dense, Dropout, Flatten +from keras.layers import Conv2D, MaxPooling2D +from keras.utils import to_categorical +from keras.preprocessing import image +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.model_selection import train_test_split +from keras.utils import to_categorical +from keras.layers import Input +from keras.models import Model +from keras.optimizers import Adam +from keras.applications import VGG16 +from tensorflow.keras.callbacks import EarlyStopping +from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve +import seaborn as sns +import cv2 + + +def PredictCAST(test_image, load_model_dir, model_name, hparams_config_file): + + hyperparam_config = hparams_config_file['img_classifier'] + + print(""[Info] loading imagenet weights..."") + #baseModel = keras.applications.ResNet101(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(128, 128, 3))) + if model_name == ""densenet"": + print('Loading Densenet model') + baseModel = keras.applications.DenseNet121(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98 + elif model_name == ""inception"": + print('Loading Inception model') + baseModel = keras.applications.InceptionV3(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97 + + headModel = baseModel.output + headModel = Flatten(name=""flatten"")(headModel) + headModel = Dense(1024, activation='relu')(headModel) + headModel = Dropout(0.5)(headModel) + headModel = Dense(2, activation='sigmoid')(headModel) + + model = Model(inputs=baseModel.input, outputs=headModel) + print(""[INFO] compiling model..."") + opt = Adam(lr=hyperparam_config['lr']) + model.compile(loss=""binary_crossentropy"", optimizer=opt, metrics=[""accuracy""]) + + model.load_weights(os.path.join(load_model_dir, model_name)) + + img = cv2.imread(test_image) + img = cv2.resize(img, (hyperparam_config['img_width'],hyperparam_config['img_height'])) + orig = img.copy() + img = image.img_to_array(img) + img = np.expand_dims(img, axis=0) + img = img/255 + + print(""[Info] predicting output"") + #prediction = model.predict_classes(img) + prediction = model.predict(img) + prediction = np.argmax(prediction,axis=1) + print(prediction) + if (prediction<0.5): + print(""def_front"") + cv2.putText(orig, ""def_front"", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) + else: + print(""ok_front"") + cv2.putText(orig, ""ok_front"", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) + + plt.imshow(orig) + plt.axis('off') + plt.show() + + print(""Prediction Completed..."") + + + + import numpy as np +# from learner.classificationModel import ClassifierModel +from learner.aion_matrix import aion_matrix +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error + + +class defaultParams(): + def __init__(self, modelName, paramDict, scoreParam, MakeFP0, MakeFN0,paramSize): + self.modelName = modelName + self.paramDict = paramDict + self.scoreParam = scoreParam + self.MakeFP0 = MakeFP0 + self.MakeFN0 = MakeFN0 + self.dictsize = paramSize + + def paramDictConvertion(self): + if self.dictsize != 0: + + for keys in self.paramDict.keys(): + self.paramDict[keys] = self.paramDict[keys][0] + + + def startTrainingClassification(self, trainX, trainY, testX, testY): + + threshold = -1 + + precisionscore = -1 + + recallscore = -1 + + objClf = aion_matrix() + self.paramDictConvertion() + + if self.modelName == 'LogisticRegression': + from sklearn import linear_model + estimator = linear_model.LogisticRegression() + if self.modelName == 'GaussianNB': + from sklearn.naive_bayes import GaussianNB + estimator = GaussianNB() + if self.modelName == 'SVC': + from sklearn import svm + estimator = svm.SVC() + if self.modelName == 'KNeighborsClassifier': + from sklearn.neighbors import KNeighborsClassifier + estimator = KNeighborsClassifier() + if self.modelName == 'DecisionTreeClassifier': + from sklearn.tree import DecisionTreeClassifier + estimator = DecisionTreeClassifier() + if self.modelName == 'RandomForestClassifier': + from sklearn.ensemble import RandomForestClassifier + estimator = RandomForestClassifier() + if self.modelName == 'GradientBoostingClassifier': + from sklearn.ensemble import GradientBoostingClassifier + estimator = GradientBoostingClassifier() + if self.modelName == 'XGBClassifier': + import xgboost as xgb + estimator = xgb.XGBClassifier() + if self.modelName == 'CatBoostClassifier': + from catboost import CatBoostClassifier + estimator = CatBoostClassifier() + if self.modelName == 'LGBMClassifier': + from lightgbm import LGBMClassifier + estimator = LGBMClassifier() + + if self.dictsize != 0: + estimator.set_params(**self.paramDict) + estimator.fit(trainX, trainY) + if not testX.empty: + predictedData = estimator.predict(testX) + score = objClf.get_score(self.scoreParam, testY, predictedData) + if self.MakeFP0: + self.log.info('-------- Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange, endRange, stepsize) + threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, + threshold_range, 'FP', self.modelName) + self.log.info('-------- Calculate Threshold for FP End-------') + if self.MakeFN0: + self.log.info('-------- Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange, endRange, stepsize) + threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, + threshold_range, 'FN', self.modelName) + self.log.info('-------- Calculate Threshold for FN End-------') + else: + predictedData = estimator.predict(trainX) + score = objClf.get_score(self.scoreParam, trainY, predictedData) + if self.MakeFP0: + self.log.info('-------- Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange, endRange, stepsize) + threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, + threshold_range, 'FP', self.modelName) + self.log.info('-------- Calculate Threshold for FP End-------') + if self.MakeFN0: + self.log.info('-------- Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange, endRange, stepsize) + threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, + threshold_range, 'FN', self.modelName) + self.log.info('-------- Calculate Threshold for FN End-------') + # status, bscore, bthres, brscore, bpscore = objClf.getBestModel(self.MakeFP0,self.MakeFN0, threshold, + # bestthreshold, recallscore, bestrecallscore, + # precisionscore, bestprecisionscore, score, + # bestScore) + return estimator, estimator.get_params(), self.modelName, score, threshold, precisionscore, recallscore + + def startTrainingRegression(self, trainX, trainY, testX," +"testY): + #objClf = aion_matrix() + try: + score = 0 + self.paramDictConvertion() + if self.modelName==""LinearRegression"": + from sklearn import linear_model + estimator = linear_model.LinearRegression() + if self.modelName==""Lasso"": + from sklearn import linear_model + estimator = linear_model.Lasso() + if self.modelName==""Ridge"": + from sklearn import linear_model + estimator = linear_model.Ridge() + if self.modelName==""DecisionTreeRegressor"": + from sklearn.tree import DecisionTreeRegressor + estimator = DecisionTreeRegressor() + if self.modelName==""RandomForestRegressor"": + from sklearn.ensemble import RandomForestRegressor + estimator = RandomForestRegressor() + if self.modelName== ""XGBRegressor"": + import xgboost as xgb + estimator = xgb.XGBRegressor() + if self.modelName == 'CatBoostRegressor': + from catboost import CatBoostRegressor + estimator = CatBoostRegressor() + if self.modelName == 'LGBMRegressor': + from lightgbm import LGBMRegressor + estimator = LGBMRegressor() + if self.dictsize != 0: + estimator.set_params(**self.paramDict) + estimator.fit(trainX, trainY) + + + except Exception as e: + print(e) + if not testX.empty: + predictedData = estimator.predict(testX) + if 'neg_mean_squared_error' in self.scoreParam: + meanssquatederror = mean_squared_error(testY, predictedData) + score = meanssquatederror + elif 'neg_root_mean_squared_error' in self.scoreParam: + rootmeanssquatederror = mean_squared_error(testY, predictedData, squared=False) + score = rootmeanssquatederror + elif 'mae' in self.scoreParam: + meanabsoluteerror = mean_absolute_error(testY, predictedData) + score = meanabsoluteerror + elif 'r2' in self.scoreParam: + r2score = r2_score(testY, predictedData) + score = r2score + else: + predictedData = estimator.predict(trainX) + if 'neg_mean_squared_error' in self.scoreParam: + meanssquatederror = mean_squared_error(trainY, predictedData) + score = meanssquatederror + elif 'neg_root_mean_squared_error' in self.scoreParam: + rootmeanssquatederror = mean_squared_error(trainY, predictedData, squared=False) + score = rootmeanssquatederror + elif 'mae' in self.scoreParam: + meanabsoluteerror = mean_absolute_error(trainY, predictedData) + score = meanabsoluteerror + elif 'r2' in self.scoreParam: + r2score = r2_score(trainY, predictedData) + score = r2score + return estimator, estimator.get_params(), self.modelName, score + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import boto3 +import json +import time +import requests +import datetime +import uuid +import shutil +from websocket import create_connection +from botocore.exceptions import ClientError +import tarfile +from pathlib import Path, PurePosixPath +from stat import S_ISDIR +from fabric import Connection +import time +import logging + +class awsGPUTraining(): + + + def __init__(self, config): + local_config = {""location"":{""data"":""aion/data/od"", ""code"":"""", ""pretrainedModel"":""aion/pretrainedModels""}, + ""jupyter"":{""header"":{""Authorization"":""Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2""}, ""portNo"":""8888"", + ""notebook_path"":""aion/code/AWS_GPU_OD_Training.ipynb""}} + self.serverConfig = config[""server""] + self.sshConfig = config[""ssh""] + self.log = logging.getLogger('eion') + self.codeLocation = local_config[""location""][""code""] + self.dataLocation = local_config[""location""][""data""] + self.pretrainedModelLocation = local_config[""location""][""pretrainedModel""] + self.jupyterConfig = local_config[""jupyter""] + self.serverIP = """" + if self.serverConfig[""awsAccessKeyId""] == """" or self.serverConfig[""awsSecretAccessKey""] == """": + raise ValueError(""Cloud server configuration is not available."") + if len(self.serverConfig[""InstanceIds""]) == 0 and self.serverConfig[""amiId""] == """": + raise ValueError(""Please provide either InstanceIds or amiId in server config"") + + self.instanceId = [] + self.separate_instance = False + if self.serverConfig[""amiId""] != """": + self.separate_instance = True + else: + if len(self.serverConfig[""InstanceIds""]): + if isinstance(self.serverConfig[""InstanceIds""], list): + self.instanceId = self.serverConfig[""InstanceIds""] + elif isinstance(self.serverConfig[""InstanceIds""], str): + self.instanceId = [self.serverConfig[""InstanceIds""]] + + self.ec2_client = boto3.client(self.serverConfig[""serverName""], region_name=self.serverConfig[""regionName""], aws_access_key_id=self.serverConfig[""awsAccessKeyId""], aws_secret_access_key=self.serverConfig[""awsSecretAccessKey""]) + + def __sftp_exists(self, sftp, path): + try: + sftp.stat(path) + return True + except:# IOError, e: + #if e.errno == errno.ENOENT: + return False + + def __rmtree(self, sftp, remotepath, level=0): + for f in sftp.listdir_attr(remotepath): + rpath = str(PurePosixPath(remotepath)/f.filename) + if S_ISDIR(f.st_mode): + self.__rmtree(sftp, rpath, level=(level + 1)) + sftp.rmdir(rpath) + else: + rpath = str(PurePosixPath(remotepath)/f.filename) + sftp.remove(rpath) + + def copy_files_to_server(self, location): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + client.sudo('rm -rf {}/*'.format(self.dataLocation)) + tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix("".tar.gz"")) + client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord') + client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord') + client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config') + client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt') + client.put(location+'/model.config', self.dataLocation+'/model.config') + if self.jupyterConfig != """": + client.run(""touch {}"".format(self.dataLocation+'/log.txt')) + except Exception as e: + raise ValueError(""Error in copying data to cloud server. "" + str(e)) + + def __myexec(self, ssh, cmd, timeout, want_exitcode=False): + # one channel per command + stdin, stdout, stderr = ssh.exec_command(cmd) + # get the shared channel for stdout/stderr/stdin + channel = stdout.channel + + # we do not need stdin. + stdin.close() + # indicate that we're not going to write to that channel anymore + channel.shutdown_write() + + # read stdout/stderr in order to prevent read block hangs + stdout_chunks = [] + stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer))) + # chunked read to prevent stalls + while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready(): + # stop if channel was closed prematurely, and there is no data in the buffers. + got_chunk = False + readq, _, _ = select.select([stdout.channel], [], [], timeout) + for c in readq: + + if c.recv_ready(): + stdout_chunks.append(stdout.channel.recv(len(c.in_buffer))) + got_chunk = True + if c.recv_stderr_ready(): + # make sure to read stderr to prevent stall + stderr.channel.recv_stderr(len(c.in_stderr_buffer)) + got_chunk = True + ''' + 1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file). + 2) if no data arrived in the last loop, check if we already received the exit code + 3) check if input buffers are empty + 4) exit the loop + ''' + if not got_chunk \\ + and stdout.channel.exit_status_ready() \\ + and not stderr.channel.recv_stderr_ready() \\ + and not stdout.channel.recv_ready(): + # indicate that we're not going to read from this channel anymore + stdout.channel.shutdown_read() + # close the channel + stdout.channel.close() + break # exit as remote side is finished and our bufferes are empty + + # close all the pseudofiles + stdout.close() + stderr.close() + + if want_exitcode: + # exit code is always ready at this point + return (''.join(stdout_chunks), stdout.channel.recv_exit_status()) + return ''.join(stdout_chunks) + + + def __myexec1(self, ssh, cmd, timeout, want_exitcode=False): + # one channel per command + stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True) + for line in iter(stderr.readline, """"): + print(line, end="""") + stdin.close() + stdout.close() + stderr.close() + + def executeCode(self): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation) + client.run( cmd) + except Exception as e: + raise ValueError(""Error in running code on cloud server. "" + str(e)) + + def start_executing_notebook(self): + try: + publicIp_Port = self.serverIP + "":"" + self.jupyterConfig[""portNo""] + conURL = ""ws://"" + publicIp_Port + base = 'http://' + publicIp_Port + '' + + headers = self.jupyterConfig[""header""] + url = base + '/api/kernels' + flag = True + while flag: # deadlock need to add timeout + response = requests.post(url, headers=headers) + flag = False + kernel = json.loads(response.text) + + # Load the notebook and get the code of each cell + url = base + '/api/contents/' + self.jupyterConfig[""notebook_path""] + response = requests.get(url, headers=headers) + file = json.loads(response.text) + code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ] + ws = create_connection(conURL + ""/api/kernels/"" + kernel[""id""] + ""/channels"", + header=headers) + + def send_execute_request(code): + msg_type = 'execute_request'; + content = {'code': code, 'silent': False} + hdr = {'msg_id': uuid.uuid1().hex, + 'username': 'test', + 'session': uuid.uuid1().hex, + 'data': datetime.datetime.now().isoformat(), + 'msg_type': msg_type, + 'version': '5.0'} + msg = {'header': hdr, 'parent_header': hdr, + 'metadata': {}, + 'content': content} + return msg + + for c in code: + ws.send(json.dumps(send_execute_request(c))) + + # We ignore all the other messages, we just get the code execution output + # (this needs to be improved for production to take into account errors, large cell output, images, etc.) + error_msg = '' + traceback_msg = '' + for i in range(0, len(code)): + msg_type = ''; + while msg_type != ""stream"": + rsp = json.loads(ws.recv()) + msg_type = rsp[""msg_type""] + if msg_type == 'error': + raise ValueError(""Error on Cloud machine: ""+rsp['content']['evalue']) + ws.close() + self.log.info('Status:- |...Execution Started`') + except ClientError as e: + raise ValueError(e) + + + + def __wait_for_completion(self, sftp, remoteLogFile, localLogFile): + + waiting = True + error_msg = """" + while waiting: + time.sleep(5 * 60) + try: + sftp.get(str(remoteLogFile), str(localLogFile)) + with open(localLogFile, ""r"") as f: + content = f.readlines() + for x in content: + if ""Error"" in x: + waiting = False + error_msg = x + if ""success"" in x: + waiting = False + except: + raise (str(e)) + return error_msg + + def copy_file_from_server(self, localPath): + try: + client = Connection( + host=self.serverIP, + user=self.sshConfig[""userName""], + connect_kwargs={ + ""key_filename"": self.sshConfig[""keyFilePath""], + }, + ) + + remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt' + localLogFile = Path(localPath)/'remote_log.txt' + client.get(str(remoteLogFile), str(localLogFile)) + tarFile" +"= (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix("".tar.gz"") + client.get(str(tarFile), str(Path(localPath)/tarFile.name)) + except: + raise + return str(Path(localPath)/tarFile.n" +"dims, n_timesteps, n_bottleneck,units,activation,df): + # inputs = Input(shape = (n_timesteps, n_dims)) + inputs = Input(shape = (df.shape[1], df.shape[2])) + e = keras.layers.LSTM(units, activation = activation, return_sequences = True)(inputs) + ## code layer or compressed form of data produced by the autoencoder, bottleneck layer + latent_space = keras.layers.LSTM(n_bottleneck, activation = activation, + return_sequences = False, + name = 'bottleneck_layer')(e) + e = keras.layers.RepeatVector(n_timesteps)(latent_space) + decoder = keras.layers.LSTM(n_bottleneck, activation = activation, + return_sequences = True)(e) + decoder = keras.layers.LSTM(units, activation = activation, return_sequences = True)(decoder) + outputs = keras.layers.TimeDistributed(Dense(n_dims))(decoder) + model = Model(inputs = inputs, outputs = outputs) + return model + + ## adding some model checkpoints to ensure the best values will be saved and early stopping to prevent the model from running unnecessary. + def callbacks(self, **kwargs): + self.mc = ModelCheckpoint(filepath = kwargs.get(""filename""), + save_best_only = True, verbose = 0) + + self.es = EarlyStopping(monitor = kwargs.get(""monitor""), + patience = kwargs.get(""patience"")) + + return self.es,self.mc + + ##This below function create get_datetime class python file in target->scripts folder + '''This aion_gettimegranularity class is used to retrive the time pattern (for getting time granularity) of given datetime feature.''' + def create_datetime_pyfile(self): + try: + datetimepattern_code=r""""""## +import pandas as pd +import numpy as np +class aion_gettimegranularity: + cls_name=""datetimeinformation"" + + def __init__(self,dataframe, datetimefeature): + self.df=dataframe + self.datetimefeature=datetimefeature + + def get_dfinfo(self,df): + from io import StringIO + buf = StringIO() + df.info(buf=buf) + #print(buf.getvalue()) + return buf.getvalue() + + def get_granularity(self): + try: + ##get local df + df_t=self.df + buf_info=self.get_dfinfo(df_t) + df_t.drop(df_t.filter(regex='Unname'),axis=1,inplace=True) + try: + df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) + except Exception as e: + pass + # print(""Datetime feature to python datetime format convertion error.\\n"",e) + df_t['time_diff']=df_t[self.datetimefeature].diff().shift(-1) + datetime_mean=df_t['time_diff'].mean() + totsec = datetime_mean.total_seconds() + ## Dict variable to store datetime details.Initialized all date param as False. + status_time={""h"":False,""m"":False,""s"":False,""us"":False,""ns"":False,""Y"":False,""M"":False,""D"":False} + if (datetime_mean.days == 0): + if (totsec/3600 > 1): + ## hour + status_time['h']=True + else: + if (totsec/60 >1): + ## minute + status_time['m']=True + else: + if (totsec <= 1e-06 and totsec > 1e-09): + ## microsecond + status_time['us']=True + elif (totsec<= 1e-09 and totsec >=1e-012): + ## nanosecond + status_time['ns']=True + else: + ## second + status_time['s']=True + else: + days=datetime_mean.days + if (days/365>1): + ## year + status_time['Y']=True + else: + if (days>30): + ## month + status_time['M']=True + else: + ## day + status_time['D']=True + + time_pattern=None + for k,v in status_time.items(): + if (v == True): + time_pattern=k + #print(""<----- DateTime feature pattern (year/month/day/hour/minute/second/millisecond/microsecond/nanosecond) is: \\t"",(time_pattern)) + try: + try: + df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) + except Exception as e: + pass + df_t['Time_diff'] = ((df_t[self.datetimefeature])).diff(-1).dt.floor('T').dt.total_seconds().div(60).abs() + time_threshold=1 + df_t['anomalyType'] = np.where((df_t['Time_diff'] != 1),""Point"",""Sequence"") + df_t.drop(""Time_diff"",axis=1,inplace=True) + except Exception as e: + print(""time_diff err message: "",str(e)) + except Exception as e: + pass + # print(""get_granularity err msg: "",(e)) + return df_t +"""""" + cwd=self.deployLocation + file_name='aion_granularity'+'.py' + try: + data_file=os.path.normpath(os.path.join(cwd,'script',file_name)) + with open(data_file,'w') as file: + file.write(datetimepattern_code) + except Exception as error: + self.log.info(""<---- datetimepattern_code write Error.: ---->""+str(error)) + self.log.info(""datetimepattern source code created at target folder...\\n"") + except Exception as error: + self.log.info(""<---- datetimepattern_code function Error.: ---->""+str(error)) + + ## Simple mlp based autoencoder model, not used now. + # def aetsmodel_lstm(self,X_train): + # model = keras.Sequential() + # # autoencoder encoder + # model.add(keras.layers.LSTM( + # units=64, + # input_shape=(X_train.shape[1], X_train.shape[2]) + # )) + # model.add(keras.layers.Dropout(rate=0.2)) + # model.add(keras.layers.RepeatVector(n=X_train.shape[1])) + # # autoencoder decoder + # model.add(keras.layers.LSTM(units=64, return_sequences=True)) + # model.add(keras.layers.Dropout(rate=0.2)) + # model.add( + # keras.layers.TimeDistributed( + # keras.layers.Dense(units=X_train.shape[2]) + # ) + # ) + + # return model + + ## To find optimal anomaly threshold value + def find_threshold(self,model, x_train_scaled): + reconstructions = model.predict(x_train_scaled) + # provides losses of individual instances msle + reconstruction_errors = tf.keras.losses.mae(reconstructions, x_train_scaled) + # threshold for anomaly scores + threshold = np.mean(reconstruction_errors.numpy())+ 2*np.std(reconstruction_errors.numpy()) + return threshold + + ## compiling the model with adam optimizer and mean squared error loss + def model_compile(self, model,lr, loss, opt): + if opt == ""adam"": + opt = Adam(learning_rate = lr) + else: + opt = SGD(learning_rate = lr) + model.compile(loss = loss, optimizer = opt) + + + ## save anomaly points in aion target folder + def save_anomalyvalues(self,df,file_name): + # cwd = os.path.abspath(os.path.dirname(__file__)) + cwd=self.deployLocation + file_name=file_name+'.csv' + try: + out_path=os.path.normpath(os.path.join(cwd,'output')) + if not os.path.isdir(out_path): + os.makedirs(out_path) + data_file=os.path.normpath(os.path.join(cwd,'output',file_name)) + except Exception as error: + self.log.info(""<---- autoencoder artifact_dir path. Error Msg: ---->""+str(error)) + try: + df.to_csv(data_file,index=False) + except Exception as e: + self.log.info(""<---- Saving log data frame error. Error Msg: ---->""+str(e)) + + ## model summary + def summary(self,model): + return model.summary() + ##Method to find subsequence and point anomalies aion_gettimegranularity + def find_point_subsequence_anomalies(self,datetime_column,dataframe=None): + try: + dataframe.reset_index(level=0, inplace=True) + try: + dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) + except Exception as e: + self.log.info(""Dataframe contains no datetime feature.Err.Msg: \\n""+str(e)) + pass + try: + ##Below commented part using normalize with time delta, find point anomalies.But not used,just for reference. + ##get day to check difference + #date_f = dataframe[datetime_column].dt.normalize() + ##compare successive rows and identify group size + #dataframe['anomaly_value'] = np.where(dataframe[datetime_column].groupby(date_f.ne(date_f.shift()).cumsum()).transform('size').gt(1),'subsequence_anomaly', 'Point_anomaly') + ##Using get_timepattern method + aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) + anomaly_info_df=aion_gettimegranularity_obj.get_granularity() + except Exception as e: + self.log.info(""find_point_subsequence_anomalies,: aion_gettimegranularity err msg:: \\n""+str(e)) + self.log.info(""find_point_subsequence_anomalies,: anomaly_info_df: \\n""+str(anomaly_info_df)) + except Exception as e: + self.log.info(""find_point_subsequence_anomalies,: err msg:: \\n""+str(e)) + return anomaly_info_df + + + ## Auto encoder time series function call + + ## dataframe info() not working for py logging, so workaround we can get information in buffer and log it. + def get_df_info(self,df): + from io import StringIO + buf = StringIO() + df.info(buf=buf) + #self.log.info(buf.getvalue()) + return buf.getvalue() + + ## Method to detect time series based anomalies in user data. Using both lstm and dense based autoencoder approaches. + def aionAEAnomalyTS(self,df,test_size_perc,target,time_steps,dropout,mv_unique_feature_ad): + ae_hyperparameter=self.paramSpace + anomaly_algorithm=self.anomalyMethod + + # test_size=float(self.testSize) + test_size=0.0 + # train_size=1-test_size + train_size=1-test_size + # train_size_perc=train_size*100 + train_size=int(len(df) * train_size) + try: + timeseries_layers=ae_hyperparameter['timeseries_layers'] + + ## Here we are checking whether to use only LSTM layers for dnn or dense layers. Dense layers better for predicting point as well sequence anomalies in time series. + if (timeseries_layers.lower() == 'lstm'): + try: + ## Need to get normalized data for threshold calculation. + data_mean=df.mean(axis=0) + data_std=df.std(axis=0) + data=(df-data_mean)/data_std + # train, test = df[:train_size], df[train_size:] + train, test = data[:train_size], data[train_size:] + test=train + test1=test ## Need to copy test data + train_index=train.index + test_index=test.index + cols = df.columns + # train, test = train_test_split(df, test_size=test_size,random_state=42) + X_train, y_train = self.create_dataset( + train, + train, + time_steps + ) + X_test, y_test = self.create_dataset( + test, + test, + time_steps ) + + n_dims=X_train.shape[2] + n_timesteps=X_train.shape[1] + opt=ae_hyperparameter['optimizer'] + loss_fn=ae_hyperparameter[""loss""] + epochs=int(ae_hyperparameter['epochs']) + batch_size=int(ae_hyperparameter['batch_size']) + learning_rate=float(ae_hyperparameter['learning_rate']) + n_bottleneck=int(ae_hyperparameter['latentspace_size']) + units=int(ae_hyperparameter['hidden_units']) + activation=ae_hyperparameter['activation'] + ##For task 20731 + minimum_threshold_user = str(ae_hyperparameter['min_threshold']) + maximum_threshold_user = str(ae_hyperparameter['max_threshold']) + + autoencoder=self.aetsmodel_lstm(n_dims, n_timesteps, n_bottleneck,units,activation,X_train) + ##To save file + # cwd = os.path.abspath(os.path.dirname(__file__)) + cwd=self.deployLocation + try: + artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) + if not os.path.isdir(artifact_dir): + os.makedirs(artifact_dir) + except Exception as e: + self.log.info(""<---- Autoencoder artifact_dir path error. Error Msg: ---->""+str(e)) + + #dl callback fn to get best loss fn, early stopping & model checkpoint call backs + es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = ""val_loss"") + self.model_compile(autoencoder,learning_rate, loss_fn, opt) + X_train = np.reshape(X_train,(X_train.shape[0],X_train.shape[1],X_train.shape[2])) + X_test = X_test.reshape((X_test.shape[0], X_test.shape[1],n_dim" +"s)) + # y_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_dims)) + model_hist = autoencoder.fit( + X_train, X_train, + epochs=epochs, + batch_size=batch_size, + validation_split=0.1, + shuffle=False,callbacks = [mc, es] + ) + model_info=self.summary(autoencoder) + X_train_pred = autoencoder.predict(X_train) + train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1) + ## Task 20731 + if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = float(minimum_threshold_user) + elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = float(minimum_threshold_user) + elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + else: + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) + + self.log.info(""Anomaly threshold max value based on loss fn (MAE): ""+str(threshold)) + self.log.info(""Anomaly threshold min value based on loss fn (MAE): ""+str(min_threshold)) + + + X_test_pred = autoencoder.predict(X_test) + test_mae_loss = np.mean(np.abs(X_test_pred - X_test), axis=1) + test_score_df = pd.DataFrame(index=test_index[time_steps:]) + if (n_dims >1): + columns = [f'loss_{num}' for num in range(n_dims)] + # test_score_df = pd.DataFrame(test_mae_loss, columns=columns, index=test_index[time_steps:]) + test_score_df['loss'] = test_mae_loss.mean(axis=1) + else: + test_score_df['loss'] = test_mae_loss + test_score_df['max_threshold'] = threshold + test_score_df['min_threshold'] = min_threshold + test_score_df['anomaly_value'] = (test_score_df.loss > test_score_df.max_threshold) + test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold) + ## Newly added for lstm issue + ## if coming dataframe have datetime index , copy it before concat (different indexed dfs) + import pandas.api.types as ptypes + # if (isinstance(test_score_df, pd.DatetimeIndex) and isinstance(df, pd.DatetimeIndex)): + test_cp_index=None + if (ptypes.is_datetime64_dtype(test_score_df.index) and ptypes.is_datetime64_dtype(df.index)): + # self.log.info(""test_score_df and df have datetime index cols"") + test_cp_index=test_score_df.index + df_cp_index=df.index + test_score_df=test_score_df.reset_index() + df=df.reset_index() ##self.datetimeFeature + test_score_df.dropna() + try: + test_score_df[self.datetimeFeature]=pd.to_datetime(test_score_df[self.datetimeFeature]) + df[self.datetimeFeature]=pd.to_datetime(df[self.datetimeFeature]) + except: + pass + try: + final_df=pd.DataFrame() + cols_to_use = df.columns.difference(test_score_df.columns) + final_df = pd.merge(test_score_df, df[cols_to_use], left_index=True, right_index=True, how='inner') + except Exception as e: + self.log.info(""final_df creation err msg: \\n: ""+str(e)) + else: + test_index=test_score_df.reset_index(drop=True) + test_cp_index=test_index.index + df_index=df.reset_index(drop=True) + final_df=pd.DataFrame() + final_df = test_score_df.join(df) + final_df.dropna() + ##Again set datetime index to dataframes,drop datetime feature column and set it as index. + try: + final_df.set_index(self.datetimeFeature,inplace=True) + df.set_index(self.datetimeFeature,inplace=True) + df.drop(self.datetimeFeature,axis=1,inplace=True) + final_df.drop(self.datetimeFeature,axis=1,inplace=True) + except: + pass + ## Below commented code used to print df.info() in log file (using get_df_info() methos). + # self.log.info(""anomaly final_df info: \\n"") + # buf_info=self.get_df_info(final_df) + # self.log.info(buf_info) + # final_df=pd.DataFrame() + ##Getback the datetime index back + final_df.index=test_cp_index + normal_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==False] + anomaly_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==True] + + ## Newly added for lstm issue + anomaly_prediction_df=pd.merge(anomaly_prediction_df, final_df, on=['loss', 'max_threshold','min_threshold', 'anomaly_value'], how=""left"") + # anomaly_prediction_df.fillna(anomaly_prediction_df.mean(), inplace=True) + anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) + # anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) + final_df['anomaly_value'] = final_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) + anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({True: 1, False: 0}) + final_df['anomaly_value'] = final_df['anomaly_value'].replace({True:1, False: 0}) + #make sure no nan values after dataframe operations + anomaly_prediction_df.dropna() + final_df.dropna() + # anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info. + self.log.info(""Anomaly data with loss and threshold informations: \\n""+str(anomaly_prediction_df)) + """""" Saving anomaly plots in target->output->anomaly_plot folder """""" + ## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction. + if (mv_unique_feature_ad.lower()=='false'): + for col in df.columns: + df_subset = anomaly_prediction_df[col] + fig, ax = plt.subplots() + df[col].plot(legend=False, ax=ax) + df_subset.plot(legend=False, ax=ax, color=""r"") + plot_name=col + ax.set_title(plot_name+""_Anomaly Data Plot"") + ax.set_xlabel(""DateTime"") + ax.set_ylabel(""Values"") + plot_name=plot_name+'_'+'anomalyplot.png' + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + # plt.savefig(str(plot_dir)+'/'+plot_name) + plt.clf() + plt.cla() + plt.close() + + else: + df_subset = anomaly_prediction_df + fig, ax = plt.subplots() + df.plot(legend=False, ax=ax) + ax.set_title(""Anomaly Data Plot"") + ax.set_xlabel(""X values"") + ax.set_ylabel(""Y Values"") + df_subset.plot(legend=False, ax=ax, color=""r"") + plot_name=df.columns[0] + ax.set_title(plot_name+""_Anomaly Data Plot"") + # ax.set_xlabel(""DateTime"") + # ax.set_ylabel(""Values"") + # plot_name=df.columns[0] + plot_name=plot_name+'_'+'anomalyplot.png' + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + # plt.savefig(str(plot_dir)+'/'+plot_name) + plt.clf() + plt.cla() + plt.close() + + + #process dt feature and save anomalies. + datetime_column=str(self.datetimeFeature) + try: + anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) + # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) + except: + ##If any issue in time series point anomaly detection, skip it. + self.log.info(""Detecting point anomalies have some issue,check datetime feature."") + pass + combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) + combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data') + ## If categorical features in original df, then inverse transform the values. + anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({1: ""Anomaly"", 0: ""Normal""}) + final_df['anomaly_value'] = final_df['anomaly_value'].replace({1: ""Anomaly"", 0: ""Normal""}) + ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. + if (mv_unique_feature_ad.lower()=='true'): + ## Multivariate and saving individual feature based anomalies + self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe')) + # self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe')) + try: + final_df=self.merge_pre_post_dfs(final_df) + except Exception as e: + self.log.info(""Anomaly Detection Merge df exception:\\n""+str(e)) + #If merge fails, just out! + pass + self.save_anomalyvalues(final_df,(str(feature_name)+'_ts_overall_dataframe')) + ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line + # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')) + ## Save actual test data test_score_df + #self.save_anomalyvalues(test_score_df,(str(feature_name)+'_testdata')) + else: + self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') + # self.save_anomalyvalues(combined_df,'ts_normal_anomaly_dataframe') + try: + final_df=self.merge_pre_post_dfs(final_df) + except Exception as e: + self.log.info(""Anomaly Detection Merge df exception:\\n""+str(e)) + #If merge fails, just out! + pass + self.save_anomalyvalues(final_df,'ts_overall_dataframe') + ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line + # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')) + ## Save actual test data test_score_df + #self.save_anomalyvalues(test_score_df,'testdata') + anomaly_info_df=final_df + self.log.info(""<---- Autoencoder time series data anomalies: ---->""+str(anomaly_prediction_df)) + self.log.info(""<---- Autoencoder time series:Number of anomalies in data:: ---->""+str(len(anomaly_prediction_df))) + # return model + except Exception as e: + self.log.info(""AD lstm traceback error: \\n""+str(traceback.format_exc())) + ## Dense layer based time series AD, most real world usecases, it is working best compared to lstm based.. + elif (timeseries_layers.lower() == 'dense'): + try: + feature_" +"name=df.columns + feature_name = ' '.join(map(str, feature_name)) + try: + #Passing whole data,so test size set as zero. + test_size=0.0 + # train_size=1-test_size + train_size=1-test_size + # train_size_perc=train_size*100 + train_size=int(len(df) * train_size) + train_data,test_data = df[:train_size], df[train_size:] + test_data=train_data + except: + #If any error comes,us sklearn train test split + train_data,test_data = train_test_split(df,test_size=test_size,random_state=42) + pass + test_index=test_data.index ## to get datetime index + units=int(ae_hyperparameter['hidden_units']) + latent_units=int(ae_hyperparameter['latentspace_size']) + activation=ae_hyperparameter['activation'] + ##For task 20731 + minimum_threshold_user = str(ae_hyperparameter['min_threshold']) + maximum_threshold_user = str(ae_hyperparameter['max_threshold']) + + train_data=train_data.values + test_data=test_data.values + ## tss is time series flag, true or false + autoencoder = AeDetector(train_data,test_data,units,latent_units,activation) + opt=ae_hyperparameter['optimizer'] + loss_fn=ae_hyperparameter[""loss""] + epochs=int(ae_hyperparameter['epochs']) + batch_size=int(ae_hyperparameter['batch_size']) + learning_rate=float(ae_hyperparameter['learning_rate']) + cwd=self.deployLocation + try: + artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) + if not os.path.isdir(artifact_dir): + os.makedirs(artifact_dir) + except Exception as e: + self.log.info(""<---- artifact_dir path error. Error Msg: ---->""+str(e)) + es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = ""val_loss"") + self.model_compile(autoencoder,learning_rate, loss_fn, opt) + # autoencoder.compile(optimizer='adam', loss='mae') + autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es]) + # reconstructed = autoencoder(train_data) + reconstructed = autoencoder.predict(train_data) + train_mae_loss = tf.keras.losses.mae(reconstructed, train_data) + ## Task 20731 + if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = float(minimum_threshold_user) + elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = float(minimum_threshold_user) + elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + else: + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) + self.log.info(""Anomaly threshold max value based on loss fn (MAE): ""+str(threshold)) + self.log.info(""Anomaly threshold min value based on loss fn (MAE): ""+str(min_threshold)) + test_labels=None + if (len(self.datetimeFeature) >= 1): + time_series_data=""True"" + else: + time_series_data=""False"" + pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data,min_threshold, threshold,test_labels,time_series_data,time_steps,test_index) + # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) + normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] + anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] + #Below ts_dataframe_anomaly not for production, just for testing purpose. If uncommented, comment it. + #self.save_anomalyvalues(anomaly_info_df,'ts_dataframe_normal') + + # anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info. + self.log.info(""Anomaly data with loss and threshold informations: \\n""+str(anomaly_prediction_df)) + # anomaly_prediction_df_plot=anomaly_prediction_df + """""" Saving anomaly plots in target->output->anomaly_plot folder """""" + ## Only for multivariate (all features) based anomaly data plot + ## Use of the below part if anomaly df columns came as numerical columns. + # if not (df.columns.equals(anomaly_prediction_df.columns)): + # num_cols = [] + # try: + # num_cols=[num_cols.append(float(col)) for col in anomaly_prediction_df.columns.values] + # except ValueError: + # pass + # #Dense layer scaler conversion makes column names as int values, so here find the int cols and rename to original names. + # if (num_cols): + # anomaly_prediction_df=anomaly_prediction_df[num_cols] + # anomaly_prediction_df.columns=df.columns + # normal_prediction_df=normal_prediction_df[num_cols] + # normal_prediction_df.columns=df.columns + ## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction. + + if (mv_unique_feature_ad.lower()=='false'): + # for col in df.columns: + for col in actual_data.columns: + df_subset = anomaly_prediction_df[col] + fig, ax = plt.subplots() + df[col].plot(legend=False, ax=ax) + df_subset.plot(legend=False, ax=ax, color=""r"") + plot_name=col + ax.set_title(plot_name+""_Anomaly Data Plot"") + ax.set_xlabel(""DateTime"") + ax.set_ylabel(""Values"") + plot_name=plot_name+'_'+'anomalyplot.png' + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + # plt.savefig(str(plot_dir)+'/'+plot_name) + plt.clf() + plt.cla() + plt.close() + + else: + df_subset = anomaly_prediction_df + fig, ax = plt.subplots() + df.plot(legend=False, ax=ax) + ax.set_title(""Anomaly Data Plot"") + ax.set_xlabel(""DateTime"") + ax.set_ylabel(""Values"") + df_subset.plot(legend=False, ax=ax, color=""r"") + plot_name=df.columns[0] + ax.set_title(plot_name+""_Anomaly Data Plot"") + # ax.set_xlabel(""DateTime"") + # ax.set_ylabel(""Values"") + # plot_name=df.columns[0] + plot_name=plot_name+'_'+'anomalyplot.png' + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + # plt.savefig(str(plot_dir)+'/'+plot_name) + plt.clf() + plt.cla() + plt.close() + + datetime_column=str(self.datetimeFeature) + # anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) + # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) + try: + anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) + # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) + except: + self.log.info(""Detecting point anomalies have some issue,check datetime feature."") + ##Just pass if datetime column provides issue, use without datetime column info + pass + combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) + combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data') + ## If categorical features in original df, then inverse transform the values. + try: + # anomaly_info_df['anomaly_value']=anomaly_info_df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True) + self.naming_anomalyvalues(anomaly_info_df) + except Exception as e: + self.log.info(""anomaly_info_df exception err msg: \\n""+str(e)) + + ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. + if (mv_unique_feature_ad.lower()=='true'): + ## Multivariate and saving individual feature based anomalies + self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe')) + try: + anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) + except Exception as e: + #If merge fails, just out!. + self.log.info(""Anomaly Detection Merge df exception :\\n""+str(e)) + finally: + #check merging done or not, to be imp. + pass + self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_ts_overall_dataframe')) + '''For overall ordered output,uncomment the below.''' + # self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe_ordered')) + ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line + # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt') + ## Save actual test data actual_data + #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) + else: + self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') + try: + anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) + except Exception as e: + #If merge fails, just out!. + self.log.info(""Anomaly Detection Merge df exception :\\n""+str(e)) + finally: + #check merging done or not, to be imp. + pass + self.save_anomalyvalues(anomaly_info_df,'ts_overall_dataframe') + #Ordered data + # self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered') + ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line + # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'), + ## Save actual test data test_score_df + #self.save_anomalyvalues(actual_data,'testdata') + self.log.info(""<---- Autoencoder time series anomalies : ---->""+str(anomaly_prediction_df)) + self.log.info(""<---- Autoencoder time series, Number of anomalies in data: ---->""+str(len(anomaly_prediction_df))) + # self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') + except Exception as e: + self.log.info(""dense layer anomaly error: \\n""+str(traceback.format_exc())) + else: + self.log.info(""Only LSTM and Dense layers supported for time series."") + except Exception as e: + self.log.info(""<---- time series error msg: ---->""+str(e)) + self.log.info(""<---- time series error msg (detailed): ---->""+str(traceback.format_exc())) + return autoencoder,anomaly_prediction_df,anomaly_info_df + + + + ## To normalize data,use when necessary + def normalize_data(train_data,test_data): + min_val=tf.reduce_min(train_data) + max_val=tf.reduce_max(train_data) + + train_data = (train_data - min_val" +")/(max_val - min_val) + test_data = (test_data - min_val)/(max_val - min_val) + + #converte the data into float + train_data = tf.cast(train_data, dtype=tf.float32) + test_data = tf.cast(test_data, dtype=tf.float32) + return train_data,test_data + + ## Scaling data ,Not used because of our aion preprocessing data profiler option. use when necessary. + def getScaledData(method='standard', train_df=None, test_df=None, feature_col='feature'): + from sklearn.preprocessing import StandardScaler + if method == 'standard': + scaler = StandardScaler() + else: + scaler = MinMaxScaler() + scaler = scaler.fit(train_df[[feature_col]]) + train_df['scaled_'+feature_col] = scaler.transform(train_df[[feature_col]]) + test_df['scaled_'+feature_col] = scaler.transform(test_df[[feature_col]]) + return train_df, test_df, scaler + + ## prediction fn + def prediction(self,model, data,min_threshold, threshold,test_labels,time_series_status,time_steps,test_index): + + # data1=scaler.inverse_transform(data) + try: + df_new=self.df.drop(self.datetimeFeature,axis=1,inplace=False) + except: + df_new=self.df + try: + actual_data=pd.DataFrame(self.df,columns=df_new.columns) + except Exception as e: + actual_data=pd.DataFrame(self.df) + pass + n_features=data.shape[1] + self.log.info(""prediction: number of features: \\n""+str(n_features)) + predicted_data = model.predict(data) + loss = tf.keras.losses.mae(predicted_data, data) + if (time_series_status.lower() == 'true'): + test_score_df = pd.DataFrame(index=test_index) + actual_data = actual_data.set_index(test_index) + anomaly_info_df=pd.DataFrame() + test_score_df['loss'] = loss + test_score_df['max_threshold'] = threshold + test_score_df['min_threshold'] = min_threshold + ## Task 20731 + #test_score_df['anomaly_value'] = test_score_df.apply(lambda x: x.loss > x.max_threshold or x.loss <= x.min_threshold, axis=1) + test_score_df['anomaly_value'] = np.where((test_score_df[""loss""] > test_score_df[""max_threshold""]) | (test_score_df[""loss""] <= test_score_df[""min_threshold""]), True, False) + anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1) + else: + test_score_df = pd.DataFrame() + anomaly_info_df=pd.DataFrame() + test_score_df['loss'] = loss + #test_score_df['threshold'] = threshold + test_score_df['max_threshold'] = threshold + test_score_df['min_threshold'] = min_threshold + ## Task 20731 + #test_score_df['anomaly_value'] = (test_score_df.loss >= test_score_df.max_threshold) + #test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold) + test_score_df['anomaly_value'] = np.where((test_score_df[""loss""] > test_score_df[""max_threshold""]) | (test_score_df[""loss""] <= test_score_df[""min_threshold""]), True, False) + anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1) + return tf.math.less(loss, threshold),test_score_df,actual_data,anomaly_info_df + + ##Not used now, for data ploting purpose + # def plot(self,autoencoder,data, n): + # enc_img = autoencoder.encoder(data) + # dec_img = autoencoder.decoder(enc_img) + # plt.plot(data[n], 'b') + # plt.plot(dec_img[n], 'r') + # plt.fill_between(np.arange(data.shape[1]), data[n], dec_img[n], color = 'lightcoral') + # plt.legend(labels=['Input', 'Reconstruction', 'Error']) + # plt.show() + + ## autoencoder fn for non timeseries data + def ae_nontimeseriesmodelfn(self,df,target): + autoencoder=None + mv_unique_feature_ad=self.mv_featurebased_ad_status + #For supervised non time series problems, we need to remove datetime feature. This will help scaler algs process the numeric data only. + + try: + + if (target == ''): + try: + test_size=0.0 + # train_size=1-test_size + train_size=1-test_size + # train_size_perc=train_size*100 + train_size=int(len(df) * train_size) + train_data,test_data = df[:train_size], df[train_size:] + test_data=train_data + except: + test_size=float(self.testSize) + train_data,test_data = train_test_split(df,test_size=test_size,random_state=42) + pass + + ae_hyperparameter=self.paramSpace + units=int(ae_hyperparameter['hidden_units']) + latent_units=int(ae_hyperparameter['latentspace_size']) + activation=ae_hyperparameter['activation'] + ##For task 20731 + minimum_threshold_user = str(ae_hyperparameter['min_threshold']) + maximum_threshold_user = str(ae_hyperparameter['max_threshold']) + train_data=train_data.values + test_data=test_data.values + + autoencoder = AeDetector(train_data,test_data,units,latent_units,activation) + opt=ae_hyperparameter['optimizer'] + loss_fn=ae_hyperparameter[""loss""] + # loss_fn='binary_crossentropy' + epochs=int(ae_hyperparameter['epochs']) + batch_size=int(ae_hyperparameter['batch_size']) + learning_rate=float(ae_hyperparameter['learning_rate']) + # autoencoder.save('../output/autoenc',save_format='tf') + # cwd = os.path.abspath(os.path.dirname(__file__)) + cwd=self.deployLocation + try: + artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) + if not os.path.isdir(artifact_dir): + os.makedirs(artifact_dir) + except Exception as e: + self.log.info(""<---- artifact_dir path error. Error Msg: ---->""+str(e)) + + es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = ""val_loss"") + # es,mc=self.callbacks(filename = ""../output/autoenc.sav"", patience = 3, monitor = ""val_loss"") + self.model_compile(autoencoder,learning_rate, loss_fn, opt) + # autoencoder.compile(optimizer='adam', loss='mae') + autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es]) + reconstructed = autoencoder(train_data) + train_mae_loss = tf.keras.losses.mae(reconstructed, train_data) + #threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + #min_threshold = np.mean(train_mae_loss)- 2*np.std(train_mae_loss) + ## Task 20731 + if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = float(minimum_threshold_user) + elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = float(minimum_threshold_user) + elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + else: + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) + self.log.info(""Anomaly Upper Threshold value based on loss fn (MAE): ""+str(threshold)) + self.log.info(""Anomaly lower_threshold value based on loss fn (MAE): ""+str(min_threshold)) + test_labels=None ## No test labels passed + pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data, min_threshold,threshold,test_labels,'False',None,None) + + # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) + normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] + anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] + self.log.info(""<---- Autoencoder (non timeseries) based anomaly detection, anomalies in data: ---->""+str(anomaly_prediction_df)) + self.log.info(""<---- Number of anomalies in data: ---->""+str(len(anomaly_prediction_df))) + self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') + # combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) + self.log.info(""<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->""+str(anomaly_info_df)) + # self.save_anomalyvalues(combined_df,'overall_dataframe') + ## If categorical features in original df, then inverse transform the values. + try: + ##anomaly_info_df,total dataframe. + + self.naming_anomalyvalues(anomaly_info_df) + except Exception as e: + self.log.info(""anomaly_info_df exception err msg: \\n""+str(e)) + + ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. + if (mv_unique_feature_ad.lower()=='true'): + ## Multivariate and saving individual feature based anomalies + self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe')) + try: + anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) + except Exception as e: + #If merge fails, just out!. + self.log.info(""Anomaly Detection Merge df exception :\\n""+str(e)) + finally: + #check merging done or not, to be imp. + pass + self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe')) + ## Save actual test data actual_data + #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) + else: + self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') + try: + anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) + except Exception as e: + #If merge fails, just out!. + self.log.info(""Anomaly Detection Merge df exception :\\n""+str(e)) + finally: + #check merging done or not, to be imp. + pass + self.save_anomalyvalues(anomaly_info_df,'overall_dataframe') + #Ordered data + # self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered') + ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line + # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'), + ## Save actual test data test_score_df + #self.save_anomalyvalues(actual_data,'testdata') + self.log.info(""<---- Autoencoder non time series / supervised problem anomalies : ---->""+str(anomaly_prediction_df)) + #ploting + df_subset = anomaly_prediction_df + fig, ax = plt.subplots() + df.plot(legend=False, ax=ax) + df_subset.plot(legend=False, ax=ax, color=""r"") + ax.set_title(""Anomaly Data Plot"") + ax.set_xlabel(""DateTime"") + ax.set_ylabel(""Values"") + plot_name='anomalyplot.png' + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + # plt.savefig(str(plot_dir)+'/'+plot_name) + plt.clf() + plt.cla() + plt.close() + + else: + y=df[target] + X=df.drop(target, axis=1) + train_data,test_data,train_labels,test_labels=train_test_split(X,y,test_size=0.2,random_state=42) + count_classes = pd.value_counts(df[target], sort = True) + num_of_classes= len(count_classes) + self." +"log.info(""train_data info: \\n""+str(train_data.info())) + if (num_of_classes >= 2): + # scaler = StandardScaler() + + # train_data = scaler.fit_transform(train_data) + # test_data = scaler.fit_transform(test_data) + # self.saveScaler(scaler) + train_labels = train_labels.astype(bool) + test_labels = test_labels.astype(bool) + n_train_data = train_data[train_labels] + n_test_data = test_data[test_labels] + # data1=scaler.inverse_transform(n_test_data) + n_test_data_actual=pd.DataFrame(n_test_data) + + ##anomaly data + an_train_data = train_data[~train_labels] + an_test_data = test_data[~test_labels] + n_train_data = train_data[train_labels] + n_test_data = test_data[test_labels] + ae_hyperparameter=self.paramSpace + # autoencoder = AeDetector(n_train_data,n_test_data) + activation=ae_hyperparameter['activation'] + units=int(ae_hyperparameter['hidden_units']) + latent_units=int(ae_hyperparameter['latentspace_size']) + ##For task 20731 + minimum_threshold_user = str(ae_hyperparameter['min_threshold']) + maximum_threshold_user = str(ae_hyperparameter['max_threshold']) + autoencoder = AeDetector(n_train_data,n_test_data,units,latent_units,activation) + opt=ae_hyperparameter['optimizer'] + loss_fn=ae_hyperparameter[""loss""] + batch_size=int(ae_hyperparameter['batch_size']) + # loss_fn='binary_crossentropy' + epochs=int(ae_hyperparameter['epochs']) + learning_rate=float(ae_hyperparameter['learning_rate']) + + cwd=self.deployLocation + try: + artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) + if not os.path.isdir(artifact_dir): + os.makedirs(artifact_dir) + except Exception as e: + self.log.info(""<---- artifact_dir path error. Error Msg: ---->""+str(e)) + + es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = ""val_loss"") + self.model_compile(autoencoder,learning_rate, loss_fn, opt) + + # autoencoder.compile(optimizer='adam', loss='mae') + autoencoder.fit(n_train_data, n_train_data, epochs = epochs, batch_size=batch_size, validation_data=(n_test_data, n_test_data),callbacks = [mc, es]) + model_info=self.summary(autoencoder) + self.log.info(""<---- Auto encoder anomaly detection model information: ---->""+str(model_info)) + # reconstructed = autoencoder(n_train_data) + reconstructed = autoencoder.predict(n_train_data) + #threshold = self.find_threshold(autoencoder, n_train_data) + train_mae_loss = tf.keras.losses.mae(reconstructed, n_train_data) + pred=tf.math.less(train_mae_loss, threshold) + ## Task 20731 + if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = float(minimum_threshold_user) + elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = float(minimum_threshold_user) + elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): + threshold = float(maximum_threshold_user) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + else: + threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) + min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) + self.log.info(""Anomaly threshold max value based on loss fn (MAE): ""+str(threshold)) + self.log.info(""Anomaly threshold min value based on loss fn (MAE): ""+str(min_threshold)) + pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, n_test_data, min_threshold,threshold,test_labels,'False',None,None) + # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) + normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] + # normal_prediction_df.to_csv('normal_prediction_df.csv') + # anomaly_prediction_df=(anomaly_info_df[anomaly_info_df['anomaly_value']]) + anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] + self.log.info(""<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->""+str(anomaly_info_df)) + # self.save_anomalyvalues(combined_df,'overall_dataframe') + ## If categorical features in original df, then inverse transform the values. + try: + ##anomaly_info_df,total dataframe. + + self.naming_anomalyvalues(anomaly_info_df) + except Exception as e: + self.log.info(""anomaly_info_df exception err msg: \\n""+str(e)) + + ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. + if (mv_unique_feature_ad.lower()=='true'): + ## Multivariate and saving individual feature based anomalies + self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe')) + try: + anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) + except Exception as e: + #If merge fails, just out!. + self.log.info(""Anomaly Detection Merge df exception :\\n""+str(e)) + finally: + #check merging done or not, to be imp. + pass + self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe')) + ## Save actual test data actual_data + #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) + else: + self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') + try: + anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) + except Exception as e: + #If merge fails, just out!. + self.log.info(""Anomaly Detection Merge df exception :\\n""+str(e)) + finally: + #check merging done or not, to be imp. + pass + self.save_anomalyvalues(anomaly_info_df,'overall_dataframe') + + ## Save actual test data test_score_df + #self.save_anomalyvalues(actual_data,'testdata') + self.log.info(""<----Number of anomalies in data: ---->""+str(len(anomaly_prediction_df))) + """""" Plot to show case anomalies, now commented, for testing purpose uncomment and check visually anomalies. """""" + #ploting + df_subset = anomaly_prediction_df + fig, ax = plt.subplots() + df.plot(legend=False, ax=ax) + df_subset.plot(legend=False, ax=ax, color=""r"") + # plt.show() + ax.set_title(""Anomaly Data Plot"") + ax.set_xlabel(""DateTime"") + ax.set_ylabel(""Values"") + + plot_name='anomalyplot.png' + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + # plt.savefig(str(plot_dir)+'/'+plot_name) + plt.clf() + plt.cla() + plt.close() + + + else: + self.log.info(""<---- Check dataset and basic configurations. ---->"") + + except Exception as e: + self.log.info(""<---- Non time series anomaly detection error msg: ---->""+str(e)) + self.log.info(""<---- Non time series anomaly detection error msg (detailed): ---->""+str(traceback.format_exc())) + return autoencoder,anomaly_prediction_df,anomaly_info_df + + ## Hyperparameter tuning autoencoders, not implemented + def hyperparamtuning_AE(self): + try: + self.log.info (""autoencoder hyperparam tuning.not implemented."") + except Exception as e: + self.log.info(""autoencoder hyperparam tuning error: ""+str(e)) + pass + + ## randomsearch for dbscan + def hyperparamtuning_dbscan(self,model,tuner,Parameter_Trials,data): + + params=model.get_params().keys() + try: + labels = model.labels_ + #df = pd.DataFrame(labels) + try: + scorer = metrics.silhouette_score(data, labels) + except: + pass + if (tuner.lower() == 'randomsearch'): + # Parameters to try + cluster_labels = model.labels_ + Random_Search = RandomizedSearchCV(model, Parameter_Trials, n_iter=50,cv=5, scoring='adjusted_rand_score', refit=True, n_jobs=1, verbose=5) + RandomSearchResults=Random_Search.fit(data) + # Fetching the best hyperparameters + best_params=RandomSearchResults.best_params_ + # All the parameter combinations tried by RandomizedSearchCV + RandomSearchResults.cv_results_['params'] + except Exception as e: + self.log.info(""<---- dbscan hpt error msg: ---->""+str(e)) + self.log.info(""<---- dbscan hpt error msg (detailed): ---->""+str(traceback.format_exc())) + + return best_params + + ## Reading aion postprocess data from target->AION_usecaseNo->data->postprocess data + def read_inputdata(self): + cwd=self.deployLocation + try: + in_path=os.path.normpath(os.path.join(cwd,'data')) + if not os.path.isdir(in_path): + self.log.info(""<---- Anomaly detection target data folder not available.--->\\n"") + postprocesseddata=os.path.normpath(os.path.join(cwd,'data','postprocesseddata.csv')) + postprocessed_df=pd.read_csv(postprocesseddata) + except Exception as e: + self.log.info(""<---- Anomaly detection target data folder not available, Reading postprocess csv file issue. Error Msg: ---->""+str(e)) + return postprocessed_df + + ## Get original dataframe values using preprocess pipe after output data created. + ##get_label_dict fn not used now. Use if preprocess_pipe based transform needed. + def get_label_dict(self, pipe): + label_dict = {} + dict_pipe={} + for (comp_name, component) in pipe.transformer_list: + if 'labelencoding' in comp_name: + i=1 + for step in component.steps: + key='val'+'_'+str(i) + ordinalencoder=step[1] + dict_pipe[f'val_{i}']=ordinalencoder + # dict_pipe[key].append(ordinalencoder) + label_dict.update(dict_pipe) + i=i+1 + + return label_dict + + else: + continue + return label_dict + + ## Decode label features using aion preprocessed_pipe model,not used now. If we need to use preprocess pipe for inverse transform,use below block. + def decoder_labeled_features(self,df): + import joblib + try: + cwd=self.deployLocation + # in_path=os.path.normpath(os.path.join(cwd,'data')) + if not os.path.isdir(in_path): + self.log.info(""<---- Anomaly detection target model folder not available.--->\\n"") + preprocessed_pipe=os.path.normpath(os.path.join(cwd,'model','preprocess_pipe.pkl')) + model = joblib.load(preprocessed_pipe) + label_dict = get_label_dict(model) + encoder=label_dict.get('val_4') + num_cols = orig_data.select_dtypes(include=np.number).columns.tolist() + cat_cols = orig_data.select_dtypes(exclude=np.number).columns.tolist() + cat_col_actual=[] + for col in cat_cols: + try: + df1=encoder.inverse_transform(df[col]) + cat_col_actual.append(col) + except: + pass + df1=pd.DataFrame(data=df1) + df1.columns=cat_cols + df2=df[num_cols] + df_anomalyinfo_col=df['anomaly_value'] + df_list = [df2, df1, df_anomalyinfo_col] # List of your dataframes + combined_df = pd.concat(df_list, join='outer', axis=1).fillna(0) + except: + combined_df=None + pass + return combined_df + + ## save predicted data and actual data columns. For get back user original data features + # + " +"def merge_pre_post_dfs(self,out_df=None): + cwd=self.deployLocation + anomaly_algorithm=str(self.anomalyMethod) + try: + in_path=os.path.normpath(os.path.join(cwd,'data')) + if not os.path.isdir(in_path): + self.log.info(""<---- Anomaly detection target data folder not available.--->\\n"") + preprocessed_file=os.path.normpath(os.path.join(cwd,'data','preprocesseddata.csv')) + preprocessed_df=pd.read_csv(preprocessed_file) + ## cat_cols will get categorical col from preprocessed, cat_diff_cols will get common cat col between output df and preprocessed. + cat_cols=preprocessed_df.select_dtypes(exclude=np.number).columns.tolist() + num_cols = preprocessed_df.select_dtypes(include=np.number).columns.tolist() + cat_diff_cols=list(set(cat_cols).intersection(out_df.columns.tolist())) + diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns)) + if (cat_diff_cols): + if (len(preprocessed_df) == len(out_df)): + #Drop each categorical col of original data from output df (which have numerical converted values). So, in merging can be done on perfect columns + try: + ## get common categorical col name between actual and output df + for col in cat_diff_cols : + out_df.drop(col,axis=1,inplace=True) + except: + self.log.info(""drop col not possible, pass the step."") + #Just continue + pass + diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns)) + try: + ## Check any datetime column in output df and preprocesseddata + import pandas.api.types as ptypes + outdf_dt_index_check=ptypes.is_datetime64_dtype(out_df.index) + #Is output df have datetime col + if (outdf_dt_index_check): + if ((self.datetimeFeature.lower() !='na' and self.datetimeFeature)): + try: + preprocessed_df[self.datetimeFeature] = pd.to_datetime(preprocessed_df[self.datetimeFeature]) + preprocessed_df.set_index(self.datetimeFeature, inplace=True) + except Exception as e: + self.log.info(""Given data not contain datetime specified.""+str(traceback.format_exc())) + ## Below step ,making datetime index to date time column. for merging and droping purpose. + preprocessed_df.reset_index(inplace=True) + preprocessed_df.rename(columns={""index"":self.datetimeFeature},inplace=True) + out_df.reset_index(inplace=True) + out_df.rename(columns={""index"":self.datetimeFeature},inplace=True) + else: + ## If no datetime column, we need to keep both dataframe index columns as unique. so making them as int index. + preprocessed_df.reset_index(inplace=True, drop=True) + out_df.reset_index(inplace=True, drop=True) + pass + ## below part is to get status of index columns type (datetime,int or str), commented now. If needed for debug,pls use. + # dt_index_check=ptypes.is_datetime64_dtype(out_df.index) + # int_index_check=ptypes.is_numeric_dtype(out_df.index) + # str_index_check=ptypes.is_string_dtype(out_df.index) + ## Get common column between preprocess and output df + try: + if (anomaly_algorithm.lower() == 'autoencoder'): + common_cols=out_df.drop(['loss','max_threshold','min_threshold','anomaly_value'],axis=1) + common_cols.drop(common_cols.filter(regex=""Unname""),axis=1, inplace=True) + merge_on_cols=common_cols.columns.tolist() + combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner') + ## Drop duplicate based on columns except time + # drop_duplicate_on=merge_on_cols.extend(cat_diff_cols) + drop_duplicate_on=merge_on_cols+cat_diff_cols + combined_df = combined_df.drop_duplicates(drop_duplicate_on, keep=False) + else: + ## otherwise, it is dbscan algorithm + common_cols=out_df.drop(['cluster','anomaly_value'],axis=1) + common_cols.drop(common_cols.filter(regex=""Unname""),axis=1, inplace=True) + merge_on_cols=common_cols.columns.tolist() + combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner') + ## Drop duplicate based on columns except time + # drop_duplicate_on=merge_on_cols+cat_diff_cols + combined_df = combined_df.drop_duplicates(merge_on_cols, keep='last') + except: + combined_df=out_df + pass + ## Just for reference, in future if you want different merge/join option + # combined_df = pd.merge(preprocessed_df[diff_cols],out_df, left_index=True, right_index=True, how='inner') + + except Exception as e: + self.log.info(""<---- merge error msg : ---->""+str(e)) + self.log.info(""<---- merge error msg (detailed): ---->""+str(traceback.format_exc())) + pass + ## if both data frame have different columns (preprocessed and outdf) + else: + self.log.info(""User data is preprocessed and data cleaning happened.So, actual data and processed data length mismatch. So,data records range may vary."") + + try: + # combined_df=self.decoder_labeled_features(out_df) + combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') + # combined_df = combined_df.drop_duplicates(cat_cols, keep='last') + combined_df = combined_df.drop_duplicates(num_cols, keep='last') + except: + ## If nothing in merge works,then make outdf as final dataframe. + try: + ## If above merge fails, change drop_duplicate hyperparam keep='last' last appearance of key occurance. + combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') + combined_df = combined_df.drop_duplicates(cat_cols, keep=False) + except: + #If nothing is works, just keep out_df as combined df + combined_df=out_df + ## if no common categorical col found between preprocessed and outdf. + else: + ## If merge not works,then make outdf as final dataframe. + if (len(cat_cols) > 0): + try: + combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') + combined_df = combined_df.drop_duplicates(cat_cols, keep='last') + except: + #make safe for return + combined_df=out_df + else: + ##If no categorical features available + combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') + combined_df = combined_df.drop_duplicates(num_cols, keep=False) + except Exception as e: + self.log.info(""<---- Anomaly detection target data folder not available, dataframe merging issue. Error Msg: ---->""+str(e)) + self.log.info(""making output df as final merged data, no categorical column found in output anomaly data. It is user responsibility to check the anomaly data."") + #make safe for return + combined_df=out_df + return combined_df + + ## for module reusability, this below naming fn created. + def naming_anomalyvalues(self,df): + replace_values_T='|'.join(['TRUE','True','true']) + replace_values_F='|'.join(['FALSE','False','false']) + try: + df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_T,'AnomalyDataPoint', regex=True) + except: + df['anomaly_value']=df['anomaly_value'].replace(replace_values_T,'AnomalyDataPoint', regex=True) + + df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True) + return df + + ## DBScan based anomaly detection + def dbscan_ad(self,data,eps,min_samples,cols): + try: + tuner='randomsearch' + Parameter_Trials={'eps':eps, + 'min_samples':min_samples} + model = DBSCAN(algorithm='auto') + hist = model.fit(data) + pred = model.fit_predict(data) + best_params = self.hyperparamtuning_dbscan(model,tuner,Parameter_Trials,data) + self.log.info(""<---- Best hyper parameters for dbscan: ---->""+str(best_params)) + best_eps=best_params['eps'] + best_min_samples=best_params['min_samples'] + if (best_min_samples < len(cols)): + min_samples=len(cols)+1 + if (best_eps < 0.2): + best_eps=0.2 + self.log.info(""best_eps: \\n""+str(best_eps)) + self.log.info(""best_min_samples: \\n""+str(best_min_samples)) + best_model=DBSCAN(algorithm='auto',eps = best_eps, min_samples = best_min_samples) + hist = best_model.fit(data) + pred = best_model.fit_predict(data) + + best_labels=best_model.labels_ + cluster_name = [""Cluster""+str(i) for i in set(best_labels)] + # outliers = data[best_model.labels_ == -1] + outlier_df = data.copy() + outlier_df.loc[:,'cluster'] = best_model.labels_ + outliers_final=outlier_df[outlier_df['cluster']==-1] + outliers_final['anomaly_value']=outliers_final['cluster']==-1 + normaldata= outlier_df[outlier_df['cluster']!=-1] + self.log.info(""<---- DBScan: Anomalies in data: ---->""+str(outliers_final)) + self.log.info(""<---- DBScan: Number of anomalies in data: ---->""+str(len(outliers_final))) + # num_cat_features=len(self.cat_cols) + try: + self.save_anomalyvalues(outliers_final,'dbscan_anomaly_dataframe') + self.save_anomalyvalues(normaldata,'dbscan_normaldata_dataframe') + outlier_df['anomaly_value']=outlier_df['cluster']==-1 + + outlier_df=self.naming_anomalyvalues(outlier_df) + ##Convert results to original input data form for end user ease of understanding + try: + outlier_df=self.merge_pre_post_dfs(outlier_df) + except Exception as e: + self.log.info(""Anomaly Detection Merge df exception:\\n""+str(e)) + #If merge fails, just out! + pass + self.save_anomalyvalues(outlier_df,'dbscan_overall_dataframe') + except Exception as e: + self.log.info(""DBScan inversetransform err. msg: \\n""+str(e)) + no_clusters = len(set(best_labels)) - (1 if -1 in best_labels else 0) + self.log.info(""<---- DBScan: No of clusters: ---->""+str(no_clusters)) + n_noise_ = list(best_labels).count(-1) + ## Ploting the dbscan clusters + plot_name='dbscan_anomalyplot.png' + fig, ax = plt.subplots() + ax.set_title(""DBScan Clusters"") + ax.scatter(data.iloc[:, 0], data.iloc[:, 1], c=best_labels) + outliers_plot = data[best_model.labels_ == -1] + ax.scatter(outliers_plot.iloc[:, 0], outliers_plot.iloc[:, 1], c='red') + cwd=self.deployLocation + try: + plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) + if not os.path.isdir(plot_dir): + os.makedirs(plot_dir) + plotpath=str(plot_dir)+'/'+plot_name + except Exception as e: + self.log.info(""<---- plot_dir path error. Error Msg: ---->""+str(e)) + if os.path.exists(plotpath): + os.remove(plotpath) + plt.savefig(plotpath) + plt.clf() + plt.cla() + plt.close() + except Exception as e: + self.log.info(""<---- dbscan error msg: ---->""+str(e)) + self.log.info(""<---- dbscan error msg (detailed): ---->""+str(traceback.format_exc())) + return best_model,outliers_final + + ## Inverse transform fn for categorical data + def inverse_transform(self,df,cat_cols,le_model): + df_new=pd.DataFrame() + df_new.index=df.index + df_reset_index=df.reset_index(drop=True) + for col in cat_cols: + df_reset_index[col] = le_model.inverse_transform(df_reset_index[col].astype(int)) + df_reset_index.index=df_new.index + df=df_reset_index + return df + + ##If data comes without going via aion data profiler, we can use this below preprcessing fn () + ##Preprocess fn for categorical data , not used now. + def preprocessfn_categorical(self,df): + try: + cat_cols=self.cat_cols + preprocessed_df=None + le=preprocessing.LabelEncoder() + self.le_model=le + label_encoded_df = df.copy() + for col in cat_cols: + label_encoded_df[col]=le.fit_transform(label_encoded_df[col]) + except Exception as e: + self.log.info(""preprocessfn_categorical error traceback.""+str(traceback.format_exc())) + + return label_encoded_df,cat_cols + + ## Design pattern: Factory,Adapter. Detect antoencoder object or dbscan object based on input params. The interface can be used for anyother extention. Not created any abstract class. + ##Main autoencoder based anomaly detection function, from here, sub modules will be called. + def mainAnomalyDetectionfn(self): + df=self.df + ## reading post processed data from target" +"->usecase->data directory + # df=self.read_inputdata() + ## Below line overwrite incoming df with postprocesseddata + self.log.info(""<----------- In autoencoder based anomaly detection algorithm main process module, the incoming datafra" +"join(home,'HCLT','AION','PreTrainedModels','ObjectDetection') + pipeline_config = str(modelPath/self.modelDirName/""pipeline.config"") + checkPoint = ""ckpt-0"" + with open(str(modelPath/self.modelDirName/""checkpoint/checkpoint"")) as f: + line = f.readline() + checkPoint = line.split(':')[1].strip()[1:-1] #(model_checkpoint_path: ""ckpt-301"") to ckpt-301 + checkPoint = ""checkpoint/""+checkPoint + from object_detection.utils import config_util + configs = config_util.get_configs_from_pipeline_file(pipeline_config) + model_config = configs['model'] + if detectionModel.lower() == 'ssd': + model_config.ssd.num_classes = num_classes + configs['train_config'].fine_tune_checkpoint_type = ""detection"" + elif detectionModel.lower() == 'centernet': + model_config.center_net.num_classes = num_classes + configs['train_config'].fine_tune_checkpoint_type = ""fine_tune"" + elif detectionModel.lower() == 'fasterrcnn': + model_config.faster_rcnn.num_classes = num_classes + configs['train_config'].fine_tune_checkpoint_type = ""detection"" + else: + raise ValueError(""{} Model is not supported for object detection.\\n"".format(detectionModel)) + if self.gpu: + checkpointPath = str(self.gpuPretrainedModelPath / checkPoint) + else: + checkpointPath = str(modelPath/self.modelDirName/checkPoint) + configs['train_config'].fine_tune_checkpoint = checkpointPath + configs['train_config'].num_steps = n_epoch + configs['train_config'].batch_size = batch_size + configs['train_input_config'].tf_record_input_reader.input_path[:] = [str(self.tfRecordLoc/""train.tfrecord"")] + configs['train_input_config'].label_map_path = str(self.labelMapLoc/""label_map.pbtxt"") + configs['eval_input_config'].tf_record_input_reader.input_path[:] = [self.dataLocation + ""/test.tfrecord""] + configs['eval_input_config'].label_map_path = self.dataLocation + ""/label_map.pbtxt"" + # Save new pipeline config + new_pipeline_proto = config_util.create_pipeline_proto_from_configs(configs) + config_util.save_pipeline_config(new_pipeline_proto, self.dataLocation) + + + def __exportModel(self): + self.log.info('-------> exporting trained Model') + from object_detection.protos import pipeline_pb2 + from object_detection import exporter_lib_v2 + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.io.gfile.GFile(str(self.pipelineLoc/""pipeline.config""), 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge('', pipeline_config) + exporter_lib_v2.export_inference_graph( + 'image_tensor', pipeline_config, self.dataLocation, + str(self.modelOutput)) + + def startObjectDetector(self): + if self.gpu: + self.log.info('-------> Training on the cloud machine') + self.log.info('Status:- |...Remote Machine Training') + with open(self.dataLocation+'\\model.config', 'w')as f: + json.dump( self.modelURLDict, f) + awsGpu = awsGPUTraining(self.serverConfig) + try: + awsGpu.start_instance() + awsGpu.copy_files_to_server(self.dataLocation) + awsGpu.start_executing_notebook() + self.log.info('-------> Downloading trained model file') + tarFile = awsGpu.copy_file_from_server(self.dataLocation) + with tarfile.open(tarFile) as tar: + tar.extractall(self.dataLocation) + awsGpu.stop_server_instance() + except: + awsGpu.stop_server_instance() + raise + extractedPath = Path(self.dataLocation)/Path(tarFile).name.split('.')[0] + filesList = extractedPath.glob('**/*') + for file in filesList: + if file.parent == extractedPath: + if file.name == ""export"": + shutil.copytree(file, self.modelOutput) + elif file.is_dir(): + shutil.copytree(file, Path(self.dataLocation)/file.name) + else: + shutil.copy2(file, self.dataLocation) + shutil.rmtree(extractedPath) + Path(tarFile).unlink() + shutil.copy2(self.dataLocation + ""/label_map.pbtxt"", str(self.modelOutput)) + else: + self.log.info('-------> Training on the local machine') + self.log.info('Status:- |...Local Machine Training') + tf.config.set_soft_device_placement(True) + strategy = tf.compat.v2.distribute.MirroredStrategy() + with strategy.scope(): + try: + from object_detection import model_lib_v2 + model_lib_v2.train_loop( + pipeline_config_path=str(self.pipelineLoc/""pipeline.config""), + model_dir=str(self.dataLocation)) + except Exception: + raise + self.__exportModel() + shutil.copy2(str(self.labelMapLoc/""label_map.pbtxt""), str(self.modelOutput)) + + def evaluateObjectDetector(self, model_dir, pipeline_config_dir=None, checkpoint_dir=None): + if checkpoint_dir == None: + checkpoint_dir = model_dir + if pipeline_config_dir == None: + pipeline_config_dir = model_dir + self.log.info('-------> Evaluation started') + from object_detection import model_main_tf2 + cmd = '""{}"" ""{}"" --model_dir=""{}"" --pipeline_config_path=""{}/pipeline.config"" --checkpoint_dir=""{}"" --eval_timeout=6'.format(sys.executable, model_main_tf2.__file__, model_dir, model_dir, checkpoint_dir) + result = subprocess.run(cmd , capture_output=True, text=True,shell=True) + precisionParam = ['Average Precision', 'Average Recall'] + text = result.stdout.split('\\n') + stats = {} + keys = [] + try: + for x in text: + for y in precisionParam: + indx = x.find(y) + if indx != -1: + keyValue = x[indx:].split(' = ') + stats[keyValue[0]] = keyValue[1] + keys.append(keyValue[0]) + except Exception as e: + raise ValueError(""Error in evaluation: "" + str(e)) + self.log.info('-------> Evaluation statistics:') + self.log.info(stats) + return stats, keys + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from learner.optimizetechnique import OptimizationTq +import warnings +from learner.parameters import parametersDefine +from learner.defaultAlgos import defaultParams +from hyperopt import fmin, tpe, hp, STATUS_OK, Trials +import time +import logging +import os +import sys +import json +from sklearn.svm import SVR +from sklearn.linear_model import LinearRegression +from sklearn.linear_model import Lasso +from sklearn.linear_model import Ridge +from sklearn.tree import DecisionTreeRegressor +from sklearn.ensemble import RandomForestRegressor +from xgboost import XGBRegressor +from lightgbm import LGBMRegressor +from catboost import CatBoostRegressor +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +from learner.aion_matrix import aion_matrix +from uncertainties.aionUQ import aionUQ +import mlflow + +class RegressionModel(): + def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation): + self.modelList =modelList + self.params =params + self.trainX =trainX + self.trainY =trainY + self.testX = testX + self.testY = testY + self.method =method + self.scoreParam=scoreParam + self.cvSplit=cvSplit + self.numIter=numIter + self.geneticParam=geneticParam + self.log = logging.getLogger('eion') + self.deployLocation = deployLocation + self.uq_x_train = trainX + self.uq_x_test = testX + self.uq_y_train = trainY + self.uq_y_test = testY + self.AlgorithmNames={'Linear Regression':'LinearRegression','Lasso':'Lasso','Ridge':'Ridge','Decision Tree':'DecisionTreeRegressor','Random Forest':'RandomForestRegressor','Extreme Gradient Boosting (XGBoost)':'XGBRegressor','Light Gradient Boosting (LightGBM)': 'LGBMRegressor', + 'Categorical Boosting (CatBoost)': 'CatBoostRegressor','Bagging (Ensemble)':'BaggingRegressor','Stacking (Ensemble)':'StackingRegressor','Voting (Ensemble)':'VotingRegressor','Neural Architecture Search':'NAS'} + self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} + + def logMlflow(self, runName, params, metrices, estimator, algoName=None): + with mlflow.start_run(run_name = runName): + for k,v in params.items(): + mlflow.log_param(k, v) + for k,v in metrices.items(): + mlflow.log_metric(k, v) + if algoName == 'CatBoostRegressor': + mlflow.catboost.log_model(estimator, ""model"") + else: + mlflow.sklearn.log_model(estimator, ""model"") + model_uri = mlflow.get_artifact_uri(""model"") + """""" for some dataset evaluate takes more than 90 min, so commenting till some solution is not found + evaluate_data = self.testX.copy() + evaluate_data['target'] = self.testY.copy() + mlflow.evaluate(model_uri, data=evaluate_data, targets='target', model_type=""regressor"") + del evaluate_data + """""" + + def regressionModelling(self,modelOrFeatureBased, code_configure): + paramObj=parametersDefine() + bestModel='' + bestParams={} + import sys + bestScore=-sys.float_info.max #bugfix 11656 + scoredetails = '' + self.log.info('\\n---------- Regression Model has started ----------') + try: + self.log.info('Status:- |... Search Optimization Method applied: '+self.method) + for modelName in self.modelList: + objClf = aion_matrix() + if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Neural Architecture Search']: + if modelName == 'Bagging (Ensemble)': + from ensemble.ensemble_bagging import ensemble_bagging + ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,0,0) + estimator,modelParams,score,model = ensemble_bagging_obj.ensemble_bagging__regressor(self.trainX,self.trainY,self.testX,self.testY) + if modelName == 'Stacking (Ensemble)': + from ensemble.ensemble_stacking import ensemble_stacking + ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam) + estimator,modelParams,score,model = ensemble_stacking_obj.ensemble_stacking__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList) + if modelName == 'Voting (Ensemble)': + from ensemble.ensemble_voting import ensemble_voting + ensemble_voting_obj = ensemble_voting(self.params[modelName],self.scoreParam) + estimator,modelParams,score,model = ensemble_voting_obj.ensemble_voting__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList) + ''' + if modelName == 'Neural Architecture Search': + from nas.aionNAS import aionNAS + objNAS = aionNAS('Regression',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation) + estimator,modelParams,score,model=objNAS.nasMain(self.scoreParam) + ''' + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(score)+',""ModelUncertainty"":""NA""}' + if self.scoreParam == ""r2"": + if score > bestScore" +": + bestScore =score + bestModel =model + bestParams=modelParams + bestEstimator=estimator + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: + bestScore =abs(score) + bestModel =model + bestParams=modelParams + bestEstimator=estimator + self.log.info('Status:- |... ML Algorithm applied: '+modelName) + self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n') + continue + if modelName not in self.params: + continue + paramSpace=self.params[modelName].copy() + algoName = self.AlgorithmNames[modelName] + paramDict =paramObj.paramDefine(paramSpace,self.method) + if self.method == 'bayesopt': + code_configure.add_model(algoName,paramSpace) + else: + paramDictCopy = paramDict + # numpy array is not json serializable + #numpy is already imported but still np.ndarray raise error + import numpy as np + for key,value in paramDictCopy.items(): + if isinstance(value, np.ndarray): + paramDictCopy[key] = paramDictCopy[key].tolist() + code_configure.add_model(algoName,paramDictCopy) + + if not self.method == 'bayesopt': + paramSize = paramObj.getParamSpaceSize(paramDict) + else: + paramSize = 0 + if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=1): + try: + start = time.time() + #function call + defObj = defaultParams(algoName,paramDict,self.scoreParam,0,0,paramSize) + estimator, modelParams, model,score =defObj.startTrainingRegression(self.trainX,self.trainY,self.testX,self.testY) + executionTime = time.time() - start + self.log.info('---------> Total Execution: ' + str(executionTime) + '\\n') + if (scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""' + self.modelToAlgoNames[model] + '"",""FeatureEngineering"":""' + str( + modelOrFeatureBased) + '"",""Score"":' + str(score) + ',""ModelUncertainty"":""NA""}' + if self.scoreParam == ""r2"": + if score > bestScore: + bestScore = score + bestModel = model + bestParams = modelParams + bestEstimator = estimator + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: + bestScore = abs(score) + bestModel = model + bestParams = modelParams + bestEstimator = estimator + self.log.info('Status:- |... ML Algorithm applied: ' + modelName) + self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str( + round(score, 2)) + '\\n') + + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + continue + trainingStatus = 'Success' + if self.method =='grid': + try: + self.log.info(""-------> Optimization Method :Grid Search"") + self.log.info(""-------> Model Name: ""+str(modelName)) + opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) + start = time.time() + model,modelParams,score,estimator=opTq.gridSearchOpt() + executionTime=time.time() - start + if not self.testX.empty: + predictedData = estimator.predict(self.testX) + + if 'neg_mean_squared_error' in self.scoreParam: + meanssquatederror = mean_squared_error(self.testY,predictedData) + score = meanssquatederror + elif 'neg_root_mean_squared_error' in self.scoreParam: + rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) + score = rootmeanssquatederror + elif 'mae' in self.scoreParam: + meanabsoluteerror=mean_absolute_error(self.testY,predictedData) + score = meanabsoluteerror + elif 'r2' in self.scoreParam: + r2score=r2_score(self.testY,predictedData) + score = r2score + problemName = estimator.__class__.__name__ + runName = algoName + '_' + modelOrFeatureBased + metrices = {} + metrices[""score""] = score + try: + self.logMlflow(runName, modelParams, metrices, estimator,algoName) + except Exception as e: + self.log.info('\\n-----> ML flow error!!!.' + str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + # raise + pass + uq_jsonobject = '' + try: + if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: + self.log.info('-----> Model Uncertainty Not Supported') + else: + problemName = estimator.__class__.__name__ + uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,""aionuq"") + self.log.info(""-------> model_confidence: ""+str(total_picp_percentage)+str('%')) + self.log.info(""-------> model_uncertainty: ""+str(total_Uncertainty_percentage)+str('%')) + except: + pass + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(abs(score))+',""ModelUncertainty"":'+str(json.dumps(uq_jsonobject))+'}' + self.log.info('---------> Total Execution: '+str(executionTime)+'\\n') + if self.scoreParam == ""r2"": + if score > bestScore: + bestScore =score + bestModel =model + bestParams=modelParams + bestEstimator=estimator + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: + bestScore =abs(score) + bestModel =model + bestParams=modelParams + bestEstimator=estimator + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + trainingStatus = 'Error (Exception)' + + elif self.method == 'random': + try: + self.log.info(""-------> Optimization Method :Random Search"") + self.log.info(""-------> Model Name: ""+str(modelName)) + opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) + start = time.time() + model,modelParams,score,estimator=opTq.randomSearchOpt() + executionTime=time.time() - start + if not self.testX.empty: + predictedData = estimator.predict(self.testX) + if 'neg_mean_squared_error' in self.scoreParam: + meanssquatederror = mean_squared_error(self.testY,predictedData) + score = meanssquatederror + elif 'neg_root_mean_squared_error' in self.scoreParam: + rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) + score = rootmeanssquatederror + elif 'mae' in self.scoreParam: + meanabsoluteerror=mean_absolute_error(self.testY,predictedData) + score = meanabsoluteerror + elif 'r2' in self.scoreParam: + r2score=r2_score(self.testY,predictedData) + score = r2score + + if self.scoreParam == ""r2"": + if score>bestScore: + bestScore =score + bestModel =model + bestParams=modelParams + bestEstimator=estimator + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: + bestScore =abs(score) + bestModel =model + bestParams=modelParams + bestEstimator=estimator + problemName = estimator.__class__.__name__ + runName = algoName + '_' + modelOrFeatureBased + metrices = {} + metrices[""score""] = score + try: + self.logMlflow(runName, modelParams, metrices, estimator,algoName) + except Exception as e: + self.log.info('\\n-----> ML flow error!!!.' + str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + # raise + pass + + uq_jsonobject = '' + try: + if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: + self.log.info('-----> Model Uncertainty Not Supported') + else: + problemName = estimator.__class__.__name__ + uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_json" +"object=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,""aionuq"") + self.log.info(""-------> model_confidence: ""+str(total_picp_percentage)+str('%')) + self.log.info(""-------> model_uncertainty: ""+str(total_Uncertainty_percentage)+str('%')) + except Exception as e: + print(e) + pass + if(scoredetails != ''): + scoredetails += ',' + + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(abs(score))+',""ModelUncertainty"":'+str(json.dumps(uq_jsonobject))+'}' + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + trainingStatus = 'Error (Exception)' + elif self.method == 'bayesopt': + try: + self.log.info(""-------> Optimization Method :BayesOpt Search"") + self.log.info(""-------> Model Name: ""+str(modelName)) + opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) + fun=opTq.f + trials = Trials() + start = time.time() + best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=100, trials=trials) + executionTime=time.time() - start + results = sorted(trials.results, key = lambda x: x['loss']) + bestresult=results[0] + model=bestresult['model'] + score=bestresult['score'] + modelParams=bestresult['params'] + res = ', '.join(""{!s}={!r}"".format(key,val) for (key,val) in modelParams.items()) + modelObj=eval(model+'('+res+')') + estimator = modelObj.fit(self.trainX,self.trainY) + if not self.testX.empty: + predictedData = estimator.predict(self.testX) + + if 'neg_mean_squared_error' in self.scoreParam: + meanssquatederror = mean_squared_error(self.testY,predictedData) + score = meanssquatederror + elif 'neg_root_mean_squared_error' in self.scoreParam: + rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) + score = rootmeanssquatederror + elif 'mae' in self.scoreParam: + meanabsoluteerror=mean_absolute_error(self.testY,predictedData) + score = meanabsoluteerror + elif 'r2' in self.scoreParam: + r2score=r2_score(self.testY,predictedData) + score = r2score + problemName = estimator.__class__.__name__ + runName = algoName + '_' + modelOrFeatureBased + metrices = {} + metrices[""score""] = score + try: + self.logMlflow(runName, modelParams, metrices, estimator,algoName) + except Exception as e: + self.log.info('\\n-----> ML flow error!!!.' + str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + # raise + pass + if self.scoreParam == ""r2"": + if score>bestScore: + bestScore =score + bestModel =model + bestParams=modelParams + bestEstimator=estimator + else: + if abs(score) < bestScore or bestScore == -sys.float_info.max: + bestScore =abs(score) + bestModel =model + bestParams=modelParams + bestEstimator=estimator + + uq_jsonobject = '' + try: + if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: + self.log.info('-----> Model Uncertainty Not Supported') + else: + problemName = estimator.__class__.__name__ + uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,""aionuq"") + self.log.info(""-------> model_confidence: ""+str(total_picp_percentage)+str('%')) + self.log.info(""-------> model_uncertainty: ""+str(total_Uncertainty_percentage)+str('%')) + except: + pass + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(abs(score))+',""ModelUncertainty"":'+str(json.dumps(uq_jsonobject))+'}' + self.log.info('---------> Total Execution: '+str(executionTime)+'\\n') + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + trainingStatus = 'Error (Exception)' + else: + trainingStatus = 'Error (HyperTunning Algo Not Supported)' + pass + self.log.info('Status:- |... ML Algorithm applied: '+modelName) + if trainingStatus.lower() == 'success': + self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n') + else: + self.log.info('Status:- |... Training Error : '+trainingStatus+'\\n') + if bestModel != 'None': + self.log.info('---------- Regression Model End ---------- \\n') + self.log.info('\\n------- Best Model and its parameters -------------') + self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) + self.log.info(""-------> Best Name: ""+str(bestModel)) + self.log.info(""-------> Best Score: ""+str(bestScore)) + else: + raise Exception(""Sorry, no model is trained"") + return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails + except Exception as inst: + self.log.info( '\\n-----> regressionModel failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +warnings.filterwarnings('ignore') +import pandas as pd +from sklearn.metrics import confusion_matrix +from sklearn.metrics import classification_report +from sklearn.metrics import roc_curve, auc +from sklearn.metrics import roc_auc_score +from sklearn.metrics import accuracy_score +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +from sklearn.metrics import recall_score +from sklearn.metrics import precision_score +from sklearn.metrics import f1_score +import logging +import numpy as np +from sklearn.preprocessing import binarize +from sklearn.preprocessing import LabelBinarizer +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +class aion_matrix: + def __init__(self): + self.log = logging.getLogger('eion') + + def get_print_score(self,matrix): + if 'accuracy' in str(matrix).lower(): + return 'Accuracy' + elif 'recall' in str(matrix).lower(): + return 'Recall' + elif 'precision' in str(matrix).lower(): + return 'Precision' + elif 'f1_score' in str(matrix).lower(): + return 'F1_Score' + elif 'roc_auc' in str(matrix).lower(): + return 'ROC_AUC' + elif 'mse' in str(matrix).lower() or 'neg_mean_squared_error' in str(matrix).lower(): + return 'Mean Squared Error(MSE)' + elif 'rmse' in str(matrix).lower() or 'neg_root_mean_squared_error' in str(matrix).lower(): + return 'Root Mean Suared Error(RMSE)' + elif 'mae' in str(matrix).lower() or 'neg_mean_absolute_error' in str(matrix).lower(): + return 'Mean Absolute Error (MAE)' + elif 'r2' in str(matrix).lower(): + return 'R-Squared(R2)' + else: + return 'Unknown' + + def get_score(self,matrix,actual,predict): + if 'accuracy' in str(matrix).lower(): + ensemble_score = accuracy_score(actual,predict) + ensemble_score = ensemble_score*100 + elif 'recall' in str(matrix).lower(): + ensemble_score = recall_score(actual,predict,average='macro') + ensemble_score = ensemble_score*100 + elif 'precision' in str(matrix).lower(): + ensemble_score = precision_score(actual,predict,average='macro') + ensemble_score = ensemble_score*100 + elif 'f1_score' in str(matrix).lower(): + ensemble_score = f1_score(actual,predict, average='macro') + ensemble_score = ensemble_score*100 + elif 'roc_auc' in str(matrix).lower(): + try: + ensemble_score = roc_auc_score(actual,predict,average=""macro"") + except: + try: + actual = pd.get_dummies(actual) + predict = pd.get_dummies(predict) + ensemble_score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr') + except: + ensemble_score = 0 + ensemble_score = ensemble_score*100 + elif ('mse' in str(matrix).lower()) or ('neg_mean_squared_error' in str(matrix).lower()): + ensemble_score = mean_squared_error(actual,predict) + elif ('rmse' in str(matrix).lower()) or ('neg_root_mean_squared_error' in str(matrix).lower()): + ensemble_score=mean_squared_error(actual,predict,squared=False) + elif ('mae' in str(matrix).lower()) or ('neg_mean_absolute_error' in str(matrix).lower()): + ensemble_score=mean_absolute_error(actual,predict) + elif 'r2' in str(matrix).lower(): + ensemble_score=r2_score(actual,predict) + return round(ensemble_score,2) + def getClassificationPerformaceMatrix(self,le_trainY,predictedData,labelMaps): + + setOfyTrue = set(le_trainY) +" +" unqClassLst = list(setOfyTrue) + if(str(labelMaps) != '{}'): + inv_mapping_dict = {v: k for k, v in labelMaps.items()} + unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) + unqClassLst2 = list(unqClassLst2) + else: + unqClassLst2 = unqClassLst + indexName = [] + columnName = [] + targetnames=[] + for item in unqClassLst2: + indexName.append(""act:""+str(item)) + columnName.append(""pre:""+str(item)) + targetnames.append(str(item)) + + matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) + #pd.set_option('expand_frame_repr', False) + pd.set_option('display.max_columns',len(targetnames)+2) + self.log.info('-------> Confusion Matrix: ') + self.log.info(matrixconfusion) + pd.reset_option('display.max_columns') + #pd.reset_option('expand_frame_repr') + #self.log.info('-------> Confusion Matrix With Labels: ') + #self.log.info(confusion_matrix(le_trainY,predictedData, labels = unqClassLst)) + #print(unqClassLst2) + classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose() + self.log.info('-------> Classification Report: ') + self.log.info(classificationreport) + lb = LabelBinarizer() + lb.fit(le_trainY) + transformTarget= lb.transform(le_trainY) + transformPredict = lb.transform(predictedData) + rocaucscore = roc_auc_score(transformTarget,transformPredict,average=""macro"") + self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) + matrixconfusion = matrixconfusion.to_json(orient='index') + classificationreport = classificationreport.to_json(orient='index') + matrix = '""ConfusionMatrix"":'+matrixconfusion+',""ClassificationReport"":'+classificationreport+',""ROC_AUC_SCORE"":'+str(rocaucscore) + return(matrix) + + def get_regression_matrix(self,targetData,predictedData): + r2score=r2_score(targetData, predictedData) + self.log.info('-------> R2_score :'+str(r2score)) + meanabsoluteerror=(mean_absolute_error(targetData, predictedData)) + self.log.info('-------> MAE :'+str(meanabsoluteerror)) + meanssquatederror=mean_squared_error(targetData, predictedData) + self.log.info('-------> MSE :'+str(meanssquatederror)) + rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False) + self.log.info('-------> RMSE :'+str(rootmeanssquatederror)) + targetArray, predictedArray = np.array(targetData), np.array(predictedData) + try: + EPSILON = 1e-10 + meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100 + except ZeroDivisionError: + meanpercentageerror = 0 + self.log.info('-------> MAPE :'+str(meanpercentageerror)) + try: + normalised_rmse_percentage = round(((rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100), 4) + except Exception as e: + normalised_rmse_percentage = -1 + self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage)) + matrix = '""MAE"":'+str(meanabsoluteerror)+',""R2Score"":'+str(r2score)+',""MSE"":'+str(meanssquatederror)+',""MAPE"":'+str(meanpercentageerror)+',""RMSE"":'+str(rootmeanssquatederror)+',""Normalised RMSE(%)"":'+str(normalised_rmse_percentage) + return matrix + + def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2): + best_feature_model = 'Model1' + self.log.info('\\n ---------- Summary Start ------------') + if modelType.lower() == ""classification"": + if(threshold1 == -1 and threshold2 == -1): + if score1> score2: + self.log.info('-------> Best Features: Model1') + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model1' + else: + self.log.info('-------> Best Features: Model2') + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model2' + elif(threshold1 == -1): + self.log.info('-------> Best Features: Model2') + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model2' + elif(threshold1 == -2): + self.log.info('-------> Best Features: Model1') + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model1' + else: + if pscore1 == pscore2: + if rscore1 > rscore2: + self.log.info('-------> Best Features: Model1') + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model1' + else: + self.log.info('-------> Best Features: Model2') + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model2' + elif rscore1 == rscore2: + if pscore1 > pscore2: + self.log.info('-------> Best Features: Model1') + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model1' + else: + self.log.info('-------> Best Features: Model2') + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model2' + + elif modelType.lower() == ""regression"": + if scoreParam == ""r2"" or scoreParam == ""explained_variance"": + if score1> score2 : + self.log.info('-------> Best Features: Model1') + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model1' + else: + self.log.info('-------> Best Features: Model2') + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model2' + else: + if score1< score2 : + self.log.info('-------> Best Features: Model1') + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model1' + else: + self.log.info('-------> Best Features: Model2') + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = 'Model2' + self.log.info('---------- Summary End ------------\\n') + return(best_feature_model) + + def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): + thresholdx = -1 + for threshold in threshold_range: + predictedData = estimator.predict_proba(testX) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437 + p_score = precision_score(testY, predictedData) + #self.log.info('-------------> Precision:'+str(p_score)) + r_score = recall_score(testY, predictedData) + #self.log.info('-------------> Rscore:'+str(r_score)) + #self.log.info(confusion_matrix(testY, predictedData)) + tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() + if(checkParameter.lower() == 'fp'): + if fp == 0: + if(p_score == 1): + thresholdx = threshold + self.log.info('---------------> Best Threshold:'+str(threshold)) + self.log.info('---------------> Best Precision:'+str(p_score)) + self.log.info('---------------> Best Recall:'+str(r_score)) + self.log.info('---------------> TN:'+str(tn)) + self.log.info('---------------> FP:'+str(fp)) + self.log.info('---------------> FN:'+str(fn)) + self.log.info('---------------> TP:'+str(tp)) + break + + if(checkParameter.lower() == 'fn'): + if fn == 0: + if(r_score == 1): + thresholdx = threshold + self.log.info('---------------> Best Threshold:'+str(threshold)) + self.log.info('---------------> Best Precision:'+str(p_score)) + self.log.info('---------------> Best Recall:'+str(r_score)) + self.log.info('---------------> TN:'+str(tn)) + self.log.info('---------------> FP:'+str(fp)) + self.log.info('---------------> FN:'+str(fn)) + self.log.info('---------------> TP:'+str(tp)) + break + return(thresholdx,p_score,r_score) + + def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): + cmodel = False + if(threshold != -1): + if(bestthreshold == -1): + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif fp0: + if rscore > brscore: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif rscore == brscore: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif fn0: + if pscore > bpscore: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif pscore == bpscore: + if tscore > btscore or btscore == -0" +"xFFFF: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + else: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + btscore = tscore + else: + if(bestthreshold == -1): + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + btscore = tscore + + return cmodel,btscore,bestthreshold,brscore,bpscore ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import time +import os +import sys +import logging +from sklearn.metrics import accuracy_score, make_scorer +from sklearn.model_selection import train_test_split +from sklearn.svm import OneClassSVM +from sklearn.ensemble import IsolationForest +import pickle +from sklearn import metrics +import numpy as np +import pandas as pd +from learner.aion_matrix import aion_matrix +from learner.parameters import parametersDefine +from sklearn.metrics import f1_score +from sklearn import model_selection +from learner.anomalyDetectionAE import anomalyDetectionAE +class anomalyDetector(object): + def __init__(self): + self.log = logging.getLogger('eion') + + def startanomalydetector(self,df,target,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status): + try: + self.log.info(""startanomalydetector.... \\n"") + from io import StringIO + buf = StringIO() + df.info(buf=buf) + #self.log.info(buf.getvalue()) + self.log.info(""User data info : \\n""+str(buf.getvalue())) + try: + df[datetimeFeature] = pd.to_datetime(df[datetimeFeature]) + df.set_index(datetimeFeature, inplace=True) + #If still the datetime column exist in feature list, drop it. Because we already made datetime feature as index (datetimeindex) + df.drop(datetimeFeature,axis=1,inplace=True) + except Exception as e: + pass + ae_df=df + paramObj=parametersDefine() + anomalyMethod=anomalyMethod + inlierLabels=inlierLabels + anomalyDetectionType="""" + inlierLabelList=inlierLabels.split("","") + self.log.info(""<---- inlierLabels ---->""+inlierLabels) + self.log.info(""<---- anomalyMethod ---->""+str(anomalyMethod)) + if target != """": + self.log.info('Status:- |... AnomalyDetection: Supervised') + self.log.info(""One class based anomaly Detection by relabeling data to fit one class models"") + combinedString="""" + dfStr="""" + anomalyDetectionType=""supervised"" + if not anomalyMethod.lower() == ""autoencoder"": ##Added for auto encoder + self.log.info(""startanomalydetector: df: \\n""+str(df)) #task 12627 + if labelMaps == {}: + for inlierVal in inlierLabelList: + inlier=inlierVal + dfStr = ""x =="" + inlier + "" or "" + combinedString+= dfStr + func= combinedString.strip("" or "") + else: + for inlierVal in inlierLabelList: + try: + if inlierVal.isnumeric(): + inlierVal = int(inlierVal) + # inlier=str(labelMaps[inlierVal]) ##Wrongly assigned inlier values to labelMaps dict key. + inlier = str(inlierVal) + dfStr = ""x =="" + str(inlier) + "" or "" + combinedString+= dfStr + except Exception as e: + raise Exception(e) + + func= combinedString.strip("" or "") + labelMaps={'InlierLabel':1,'NonInlierLabel':-1} + targetData=df[target] + df['anomaly'] = df[target].apply(lambda x: 1 if eval(func) else -1 ) + anomtargetData=df['anomaly'] + self.log.info(""dataframe after relabeling the data"") + self.log.info(df.head()) + self.log.info(""target column value counts with inliers and outliers"") + self.log.info(df['anomaly'].value_counts()) + df.drop([target, ""anomaly""], axis=1, inplace=True) + outliers = anomtargetData[anomtargetData == -1] + self.log.info(""outliers in data"") + self.log.info(outliers.shape[0]) + self.log.info(""outlier fraction"") + self.log.info(outliers.shape[0]/targetData.shape[0]) + if int(testPercentage) != 0: + testSize= testPercentage/100 + xtrain, xtest, ytrain, ytest = train_test_split(df, anomtargetData, test_size = testSize) + else: + xtrain =df + xtest =df + ytrain=anomtargetData + ytest =anomtargetData + + if anomalyMethod.lower() == ""isolationforest"": + modelName=""isolationforest"" + paramSpace=anomalyconfig['modelParams']['IsolationForest'] + paramDict =paramObj.paramDefine(paramSpace,'random') + ftwo_scorer = make_scorer(accuracy_score) + isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=ftwo_scorer, n_iter=10) + mod = isolation_forest.fit(xtrain,ytrain) + model = mod.best_estimator_ + elif anomalyMethod.lower() == ""oneclasssvm"": + modelName=""oneClassSVM"" + fthree_scorer = make_scorer(accuracy_score) + paramSpace=anomalyconfig['modelParams']['oneclassSVM'] + paramDict =paramObj.paramDefine(paramSpace,'random') + one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=fthree_scorer, n_iter=10) + mod = one_class.fit(xtrain,ytrain) + model = mod.best_estimator_ + elif anomalyMethod.lower() == ""autoencoder"": + modelName='autoencoder' + testSize=testPercentage/100 + self.log.info(""Aion Autoencoder anomaly detection started.."") + paramSpace=anomalyconfig['modelParams']['AutoEncoder'] + adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) + model=adae_obj.mainAnomalyDetectionfn() + self.log.info(""Aion Autoencoder anomaly detection completed.."") + + else: + self.log.info(""IsolationForest, OneClassSVM and autoencoder are supported models"") + modelName = """" + model = """" + + + else: + self.log.info('Status:- |... AnomalyDetection: Unsupervised') + self.log.info(""unsupervised anomaly detection"") + anomalyDetectionType=""unsupervised"" + model=None + xtrain =df + xtest = df + ytrain = pd.DataFrame() + if anomalyMethod.lower() == ""isolationforest"": + paramSpace=anomalyconfig['modelParams']['IsolationForest'] + paramDict =paramObj.paramDefine(paramSpace,'random') + modelName=""isolationforest"" + def scorer_f(estimator, X): + return np.mean(estimator.score_samples(X)) + isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=scorer_f, n_iter=10) + + mod = isolation_forest.fit(xtrain) + self.log.info('---------> Best Param: '+str(mod.best_params_)) + model = mod.best_estimator_ + elif anomalyMethod.lower() == ""oneclasssvm"": + paramSpace=anomalyconfig['modelParams']['oneclassSVM'] + paramDict =paramObj.paramDefine(paramSpace,'random') + modelName=""oneClassSVM"" + def scorer_f1(estimator, X): + return np.mean(estimator.score_samples(X)) + one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=scorer_f1, n_iter=10) + model = one_class.fit(xtrain) + self.log.info('---------> Best Param: '+str(model.best_params_)) + model = model.best_estimator_ + elif anomalyMethod.lower() == ""autoencoder"": + ae_df.drop(ae_df.filter(regex=""Unname""),axis=1, inplace=True) + modelName='autoencoder' + testSize= testPercentage/100 + self.log.info(""Aion Autoencoder anomaly detection started.."") + paramSpace=anomalyconfig['modelParams']['AutoEncoder'] + adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) + model=adae_obj.mainAnomalyDetectionfn() + self.log.info(""Aion Autoencoder anomaly detection completed.."") + elif anomalyMethod.lower() == ""dbscan"": + testSize=testPercentage/100 + ae_df.drop(ae_df.filter(regex=""Unname""),axis=1, inplace=True) + modelName='dbscan' + self.log.info(""Aion DBScan anomaly detection started.."") + paramSpace=anomalyconfig['modelParams']['DBScan'] + adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) + model=adae_obj.mainAnomalyDetectionfn() + self.log.info(""Aion DBScan anomaly detection completed.."") + else: + self.log.info(""IsolationForest,OneClassSVM,autoencoder and DBScan are supported models"") + modelName = """" + model = """" + self.log.info('Status:- |... AnomalyDetection Algorithm applied: '+modelName) + + if (anomalyMethod.lower() == ""autoencoder"" or anomalyMethod.lower() == ""dbscan""): + if (anomalyMethod.lower() == ""autoencoder""): + ## Since autoencoder is implemented using tf.keras, saving the model in h5 format. If we save it in .sav format will give 'TensorSliceReader constructor' error. + saved_model=saved_model.replace('.sav','') + filename = os.path.join(deployLocation,'model',saved_model) + model.save(filename,save_format=""tf"") + elif (anomalyMethod.lower() == ""dbscan""): + filename = os.path.join(deployLocation,'model',saved_model) + pickle.dump(model, open(filename, 'wb')) + + matrix='' + trainmatrix='' + accuracy = 0 + else: + filename = os.path.join(deployLocation,'model',saved_model) + pickle.dump(model, open(filename, 'wb')) + loaded_model=pickle.load(open(filename, 'rb')) + pred_testData=loaded_model.predict(xtest) + pred_trainData = loaded_model.predict(xtrain) + pred_trainScore = loaded_model.decision_function(xtrain) + self.log.info(""<--- predicted values of testdata --->"") + self.log.info(pred_testData) + if anomalyDetectionType == ""supervised"" : + df_predicted = pd.DataFrame() + df_predicted['actual'] = ytest + df_predicted['predict'] = pred_testData + df_predicted.to_csv(predicted_data_file) + preds = pred_testData + targs = ytest + unique_elements_ytest, counts_elements_ytest = np.unique(targs, return_counts=True) + unique_elements_pred, counts_elements_pred = np.unique(preds, return_counts=True) + ''' + for i in range(0,len(unique_elements_ytest)): + self.log.info(""unique value :"" +str(unique_elements_ytest[i]) + "" count in input testdata: "" + str(counts_elements_ytest[i]) +"" count in predicted testdata: "" + str(counts_elements_pred[i])) + self.log.info(""\\n"") + ''' + self.log.info(""\\n======= Performance matrix on Test Data ======"") + aion_matrixobj = aion_matrix() + self.log.info(""-------> Test Matrix: "") + matrix = aion_matrixobj.getClassificationPerformaceMatrix(targs,preds,labelMaps) + self.log.info(""-------> Train Matrix: "") + trainmatrix = aion_matrixobj.getClassificationPerformaceMatrix(ytrain,pred_trainData,labelMaps) + + #self.log.info(""-------> Confusion Matrix: "") + + self.log.info(metrics.confusion_matrix(targs,preds)) + self.log.info(""-------> accuracy for inliers: "") + accuracy = metrics.accuracy_score(targs, preds) + self.log.info(metrics.accuracy_score(targs, preds)) + self.log.info(""-------> precision for inliers --->"") + self.log.info(metrics.precision_score(targs, preds)) + self.log.info(""-------> recall for inliers ---> "") + self.log.info(metrics.recall_score(targs, preds)) + self.log.info(""-------> f1 for inliers--->"") + self.log.info(metrics.f1_score(targs, preds)) + self.log.info(""-------> area under curve (auc) for inliers --->"") + self.log.info(metrics.roc_auc_score(targs, preds)) + self.log.info(""-------> precision for outliers --->" +""") + self.log.info(1-metrics.precision_score(targs, preds)) + self.log.info(""-------> recall for outliers ---> "") + self.log.info(1-metrics.recall_score(targs, preds)) + self.log.info(""-------> f1 for outliers--->"") + self.log.info(1-metrics.f1_score(targs, preds)) + self.log.info(""======= Performance matrix on Test Data End ======\\n"") + else: + df_predicted = xtrain + df_predicted['predict'] = pred_trainData + df_predicted['score'] = pred_trainScore + df_predicted.to_csv(predicted_data_file, index=False) + matrix = '' + trainmatrix = '' + accuracy = 'NA' + return modelName,model,matrix,trainmatrix,accuracy,labelMaps + + except Exception as inst: + self.log.info(""Error: anomalyDetector failed ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from sklearn.model_selection import GridSearchCV +from sklearn.model_selection import RandomizedSearchCV +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier +from sklearn.linear_model import SGDRegressor, PassiveAggressiveRegressor +from sklearn.linear_model import SGDClassifier +from sklearn.tree import DecisionTreeClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.model_selection import cross_val_score +from sklearn.svm import SVC +from hyperopt import fmin, tpe, hp, STATUS_OK, Trials +from sklearn.svm import SVR +import xgboost as xgb +from xgboost import XGBClassifier +from lightgbm import LGBMClassifier +from catboost import CatBoostClassifier +from xgboost import XGBRegressor +from lightgbm import LGBMRegressor +from catboost import CatBoostRegressor +from sklearn.linear_model import LinearRegression +from sklearn.linear_model import Lasso +from sklearn.linear_model import Ridge +from sklearn.tree import DecisionTreeRegressor +from sklearn.ensemble import RandomForestRegressor +import warnings +warnings.filterwarnings('ignore') +import time +import logging +import sys,os +class StreamToLogger(object): + def __init__(self, logger, log_level=logging.INFO): + self.logger = logger + self.log_level = log_level + self.linebuf = '' + + def write(self, buf): + for line in buf.rstrip().splitlines(): + self.logger.log(self.log_level, 'Model:- Iteration:: '+line.rstrip()) + +class OptimizationTq(): + def __init__(self,modelName,tuneParams,cvSplit,scoreParam,nIter,trainX,trainY,geneticParam=None): + self.data = None + self.model=modelName + self.params =tuneParams + self.cvSplit=cvSplit + self.scoreParam=scoreParam + self.trainX =trainX + self.trainY = trainY + self.geneticParam=geneticParam if geneticParam else {} + self.nIter =nIter + self.count =0 + self.best =0 + self.log = logging.getLogger('eion') + def gridSearchOpt(self): + try: + sl = StreamToLogger(self.log, logging.INFO) + oldStdout = sys.stdout + sys.stdout = sl + self.log.info('Model:-Model Name:: '+str(self.model)) + modelObj=eval(self.model+'()') + gridOp = GridSearchCV(modelObj, param_grid=self.params,scoring=self.scoreParam, cv=self.cvSplit,verbose=10) + gridFit=gridOp.fit(self.trainX,self.trainY) + self.log.info('Model:-Model Name:: '+str(self.model)) + self.log.info('Model:-ScoringType:: '+str(gridFit.scorer_)) + self.log.info('Model:-Best Param:: '+str(gridFit.best_params_)) + self.log.info('Model:-Validation Score:: '+str(gridFit.best_score_)) + self.log.info('Model:-CV Result:: '+str(gridFit.cv_results_)) + self.log.info('Model:-Best Estimator:: '+str(gridFit.best_estimator_)) + sys.stdout = oldStdout + return self.model,gridFit.best_params_,gridFit.best_score_,gridFit.best_estimator_ + except Exception as inst: + self.log.info(""gridSearchOpt failed ==>""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + def randomSearchOpt(self): + try: + sl = StreamToLogger(self.log, logging.INFO) + oldStdout = sys.stdout + sys.stdout = sl + self.log.info('Model:-Model Name:: '+str(self.model)) + modelObj=eval(self.model+'()') + randomOp = RandomizedSearchCV(modelObj, param_distributions=self.params,scoring=self.scoreParam,n_iter=self.nIter,cv=self.cvSplit,verbose=10) + randomFit=randomOp.fit(self.trainX,self.trainY) + self.log.info('Model:-Model Name:: '+str(self.model)) + self.log.info('Model:-ScoringType:: '+str(randomFit.scorer_)) + self.log.info('Model:-Best Param:: '+str(randomFit.best_params_)) + self.log.info('Model:-Validation Score:: '+str(randomFit.best_score_)) + self.log.info('Model:-CV Result:: '+str(randomFit.cv_results_)) + self.log.info('Model:-Best Estimator:: '+str(randomFit.best_estimator_)) + sys.stdout = oldStdout + return self.model,randomFit.best_params_,randomFit.best_score_,randomFit.best_estimator_ + except Exception as inst: + self.log.info(""RandomsearchOptimization failed ==>""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + def bayesianOpt(self,params): + modelObj=eval(self.model+'(**'+str(params)+')') + score=cross_val_score(modelObj, self.trainX, self.trainY,scoring=self.scoreParam,cv=self.cvSplit) + return score.mean() + + def f(self,params): + best=self.best + count=self.count + parameters=params + count += 1 + classObj=OptimizationTq(self.model,self.params,self.cvSplit,self.scoreParam,self.nIter,self.trainX,self.trainY) + acc = classObj.bayesianOpt(parameters.copy()) + return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.model,'params': params} + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +warnings.filterwarnings('ignore') +import logging +import sklearn +from sklearn.neighbors import NearestNeighbors +from sklearn.cluster import KMeans +from sklearn.cluster import DBSCAN +from random import sample +from numpy.random import uniform +import numpy as np +import math +import pickle +import os +from math import isnan +from sklearn.preprocessing import binarize +from sklearn.preprocessing import LabelEncoder +from sklearn.metrics import davies_bouldin_score +from utils.file_ops import save_csv_compressed +from sklearn.metrics import silhouette_score +try: + from sklearn.metrics import calinski_harabasz_score as calinski_harabaz_score +except: + from sklearn.metrics import calinski_harabaz_score +import pandas as pd +from sklearn.metrics import confusion_matrix +from sklearn.metrics import classification_report +from sklearn.metrics import roc_curve, auc +from sklearn.metrics import roc_auc_score +from sklearn.metrics import matthews_corrcoef +from sklearn.metrics import brier_score_loss +from sklearn.preprocessing import LabelBinarizer +from sklearn.model_selection import train_test_split +from sklearn.decomposition import LatentDirichletAllocation +from learner.classificationModel import ClassifierModel +from learner.regressionModel import RegressionModel +from sklearn.metrics import r2_score +from sklearn.metrics import mean_absolute_error,make_scorer +from sklearn.metrics import mean_squared_error +from sklearn.metrics import RocCurveDisplay, auc, roc_curve +import matplotlib.pyplot as plt + +#print(""1"") +#from alibi.explainers import ALE,plot_ale +#pd.set_option('display.max_columns', 10) +#pd.set_option('display.width', None) +def get_prediction( model, loaded_model, xtrain, xtest=None): + train_prob = None + test_prob = None + predictedData = [] + if xtest.empty: + is_xtest = False + else: + is_xtest = True + if model.lower() == 'lda': + if is_xtest: + predictedData = loaded_model.transform(xtest).argmax(axis=1) + trainPredictedData = loaded_model.transform(xtrain) + elif model.lower() == 'dbscan': + if is_xtest: + predictedData = loaded_model.fit_predict(xtest) + predictedData = loaded_model.labels_ + trainPredictedData = loaded_model.fit_predict(xtrain) + trainPredictedData = loaded_model.labels_ + elif model == 'Neural Architecture Search': + train_prob = estimator.predict(xtrain) + if train_prob.shape[1] == 1: + train_prob = np.hstack(( 1-train_prob, train_prob)) + trainPredictedData = np.argmax(train_prob, axis=1) + if is_xtest: + test_prob = estimator.predict(xtest) + if test_prob.shape[1] == 1: + test_prob = np.hstack(( 1-test_prob, test_prob)) + predictedData = np.argmax(test_prob, axis=1) + elif model in ['Deep Q Network','Dueling Deep Q Network']: + from tf_agents.trajectories import time_step + from tensorflow import constant + q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False) + train_prob = q.numpy() + if train_prob.shape[1] == 1: + train_prob = np.hstack(( 1-train_prob, train_prob)) + trainPredictedData = np.argmax(train_prob, axis=1) + predictedData = np.argmax(test_prob, axis=1) + if is_xtest: + q,_ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) + test_prob = q.numpy() + if test_prob.shape[1] == 1: + test_prob = np.hstack(( 1-test_prob, test_prob)) + predictedData = np.argmax(test_prob, axis=1) + else: + if is_xtest: + predictedData = loaded_model.predict(xtest) + trainPredictedData = loaded_model.predict(xtrain) + if hasattr(loaded_model, 'predict_proba'): + train_prob = loaded_model.predict_proba(xtrain) + if is_xtest: + test_prob = loaded_model.predict_proba(xtest) + return trainPredictedData, predictedData, train_prob, test_prob + +class machinelearning(object): + def __init__(self): + self.features=[] + self.log = logging.getLogger('eion') + self.plots = [] + def cluster_tendency(self,featureData): + self.log.info(""\\n------------- Cluster Tendency Check -------------"") + d = featureData.shape[1] + n = len(featureData) + m = int(0.1 * n) + nbrs = NearestNeighbors(n_neighbors=1).fit(featureData.values) + rand_X = sample(range(0, n, 1), m) + ujd = [] + wjd = [] + for j in range(0, m): + u_dist, _ = nbrs.kneighbors(uniform(np.amin(featureData,axis=0),np.amax" +"(featureData,axis=0),d).reshape(1, -1), 2, return_distance=True) + ujd.append(u_dist[0][1]) + if isinstance(featureData.iloc[rand_X[j]].values, pd.core.arrays.sparse.array.SparseArray): + featureData_reshaped = np.asarray(featureData.iloc[rand_X[j]].values).reshape(1, -1) + else: + featureData_reshaped = featureData.iloc[rand_X[j]].values.reshape(1, -1) + w_dist, _ = nbrs.kneighbors(featureData_reshaped, 2, return_distance=True) + wjd.append(w_dist[0][1]) + try: + clusetTendency = sum(ujd) / (sum(ujd) + sum(wjd)) + except: + clusetTendency = 0 + if isnan(clusetTendency): + clusetTendency = 0 + self.log.info(""-------> Cluster Tendency value using Hopkins Statistic: ""+str(clusetTendency)) + self.log.info(""------------- Cluster Tendency Check End-------------\\n"") + return (clusetTendency) + + def calculateNumberofCluster(self,featureData): + self.log.info(""\\n------------- Calculate Number of Cluster -------------"") + Sum_of_squared_distances = [] + K = range(1,15) + for k in K: + km = KMeans(n_clusters=k) + km = km.fit(featureData) + Sum_of_squared_distances.append(km.inertia_) + x1, y1 = 1, Sum_of_squared_distances[0] + x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances)-1] + + distances = [] + for inertia in range(len(Sum_of_squared_distances)): + x0 = inertia+2 + y0 = Sum_of_squared_distances[inertia] + numerator = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1) + denominator = math.sqrt((y2 - y1)**2 + (x2 - x1)**2) + distances.append(numerator/denominator) + n_clusters=distances.index(max(distances)) + 2 + self.log.info(""-------> n_clusters: ""+str(n_clusters-1)) + self.log.info(""------------- Calculate Number of Cluster End-------------\\n"") + return(n_clusters-1) + + def getclusterMatrix(self,featureData,targetData): + silhouetteAvg = silhouette_score(featureData,targetData) + self.log.info(""-------> SilHouette_Avg: ""+str(silhouetteAvg)) + daviesBouldinScore=davies_bouldin_score(featureData, targetData) + self.log.info(""-------> DaviesBouldinScore: ""+str(daviesBouldinScore)) + calinskiHarabazScore=calinski_harabaz_score(featureData,targetData) + self.log.info(""-------> CalinskiHarabazScore: ""+str(calinskiHarabazScore)) + matrix = '""SilHouette_Avg"":'+str(silhouetteAvg)+',""DaviesBouldinScore"":'+str(daviesBouldinScore)+',""CalinskiHarabazScore"":'+str(calinskiHarabazScore) + return(matrix) + + def get_regression_matrix(self,targetData,predictedData): + try: + r2score=r2_score(targetData, predictedData) + self.log.info('-------> R2_score :'+str(r2score)) + except Exception as e: + self.log.info('\\n--------- r2_score ',str(e)) + r2score = 0 + try: + meanabsoluteerror=(mean_absolute_error(targetData, predictedData)) + self.log.info('-------> MAE :'+str(meanabsoluteerror)) + except Exception as e: + self.log.info('\\n---------Error: meanabsoluteerror ',str(e)) + meanabsoluteerror = 0 + try: + meanssquatederror=mean_squared_error(targetData, predictedData) + self.log.info('-------> MSE :'+str(meanssquatederror)) + except Exception as e: + self.log.info('\\n---------Error: meanssquatederror ',str(e)) + meanssquatederror = 0 + try: + rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False) + self.log.info('-------> RMSE :'+str(rootmeanssquatederror)) + except Exception as e: + self.log.info('\\n---------Error: rootmeanssquatederror ',str(e)) + rootmeanssquatederror = 0 + try: + normalised_rmse_percentage = (rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100 + self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage)) + except Exception as e: + self.log.info('\\n---------Error: Normalised RMSE percentage ',str(e)) + normalised_rmse_percentage = -1 + try: + targetArray, predictedArray = np.array(targetData), np.array(predictedData) + try: + EPSILON = 1e-10 + meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100 + except ZeroDivisionError: + meanpercentageerror = 0 + self.log.info('-------> MAPE :'+str(meanpercentageerror)) + except Exception as e: + self.log.info('\\n---------Error: meanpercentageerror ',str(e)) + meanpercentageerror = 0 + matrix = '""MAE"":'+str(round(meanabsoluteerror,2))+',""R2Score"":'+str(round(r2score,2))+',""MSE"":'+str(round(meanssquatederror,2))+',""MAPE"":'+str(round(meanpercentageerror,2))+',""RMSE"":'+str(round(rootmeanssquatederror,2))+',""Normalised RMSE(%)"":'+str(round(normalised_rmse_percentage,2)) + return matrix + + def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps): + setOfyTrue = set(le_trainY) + unqClassLst = list(setOfyTrue) + if len(unqClassLst) <= 20: + if str(labelMaps) != '{}': + inv_mapping_dict = {v: k for k, v in labelMaps.items()} + unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) + unqClassLst2 = list(unqClassLst2) + else: + unqClassLst2 = unqClassLst + indexName = [] + columnName = [] + targetnames=[] + for item in unqClassLst2: + indexName.append(""act:""+str(item)) + columnName.append(""pre:""+str(item)) + targetnames.append(str(item)) + + matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) + pd.set_option('display.max_columns',len(targetnames)+2) + self.log.info('-------> Confusion Matrix: ') + self.log.info(matrixconfusion) + pd.reset_option('display.max_columns') + classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose() + self.log.info('-------> Classification Report: ') + self.log.info(classificationreport) + matrixconfusion = matrixconfusion.to_json(orient='index') + classificationreport = classificationreport.to_json(orient='index') + else: #bugid: 14540 + self.log.info('-------> As the number of class is more than 20, skipping the creation of confusion_matrix and classification Report') + return """" + lb = LabelBinarizer() + lb.fit(le_trainY) + transformTarget= lb.transform(le_trainY) + if transformTarget.shape[-1] == 1: + transformTarget = le_trainY + prob = np.delete( prob, 0, 1) + rocaucscore = roc_auc_score(transformTarget,prob,average=""macro"") + brier_score = None + mcc_score = matthews_corrcoef(le_trainY,predictedData) + if len(unqClassLst) > 2: + brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1)) + else: + brier_score = brier_score_loss(transformTarget,prob) + self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) + self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}') + self.log.info(f'-------> BRIER SCORE : {brier_score}') + + matrix = f'""ConfusionMatrix"": {matrixconfusion},""ClassificationReport"": {classificationreport},""ROC_AUC_SCORE"": {rocaucscore},""MCC_SCORE"": {mcc_score},""BRIER_SCORE"": {brier_score}' + return(matrix) + + def split_into_train_test_data(self,featureData,targetData,testPercentage,modelType='classification'): + ''' + if cvSplit == None: + ''' + self.log.info('\\n-------------- Test Train Split ----------------') + + if testPercentage == 0: + xtrain=featureData + ytrain=targetData + xtest=featureData + ytest=targetData + else: + + testSize=testPercentage/100 + if modelType == 'regression': + self.log.info('-------> Split Type: Random Split') + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) + else: + try: + self.log.info('-------> Split Type: Stratify Split') + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True) + except: + self.log.info('-------> Split Type: Random Split') + xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) + + self.log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') + self.log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->') + self.log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->') + self.log.info('-------------- Test Train Split End ----------------\\n') + ''' + else: + xtrain=featureData + ytrain=targetData + xtest=featureData + ytest=targetData + ''' + return(xtrain,ytrain,xtest,ytest) + + def checkForClassBalancing(self,targetData): + imbalancedCount=0 + valueCount=targetData.value_counts() + self.log.info(""---------- Checking for Class Imbalance on Train Data---------"") + self.log.info(""-------> Categories and Count:"") + self.log.info(valueCount) + categoryList=valueCount.keys().tolist() + categoryCountList=valueCount.tolist() + for i in range(0,len(categoryCountList)): + if float(categoryCountList[i])<=float(0.5*max(categoryCountList)): + self.log.info(""-------> Found Imbalanced class: '""+str(categoryList[i])+""' Count: ""+str(categoryCountList[i])) + imbalancedCount=imbalancedCount+1 + if imbalancedCount == 0: + self.log.info(""-------> Status: Balanced"") + self.log.info('Status:- |... Check for Data balancing done: Balanced') + else: + self.log.info(""-------> Status: Unbalanced"") + self.log.info('Status:- |... Check for Data balancing done: Unbalanced') + self.log.info(""---------- Checking for Class Imbalance on Train Data End---------"") + return(imbalancedCount) + + def ExecuteClassBalancing(self,featureData,targetData,balancingMethod): + from imblearn.over_sampling import SMOTE + from imblearn.under_sampling import TomekLinks + from collections import Counter + self.log.info('\\n------------ Balancing Start --------------') + if balancingMethod.lower() == ""oversample"": + self.log.info(""-------> Method: SMOTE OverSampling Technique"") + k=1 + seed=100 + try: + oversample = SMOTE(sampling_strategy='auto', k_neighbors=k, random_state=seed) + balfeatureData, baltargetData = oversample.fit_resample(featureData, targetData) + self.log.info(baltargetData.value_counts()) + except Exception as inst: + self.log.info(""\\n!!!!!!!!! OverSampling Fails ""+str(inst)+"" !!!!!!!!!!!!!!\\n"") + balfeatureData = featureData + baltargetData = targetData + elif balancingMethod.lower() == ""undersample"": + self.log.info(""-------> Method: Tomelinks UnderSampling Technique"") + tLinks = TomekLinks() + " +" balfeatureData, baltargetData= tLinks.fit_resample(featureData, targetData) + #Added for checking balancing act by the algorithm. + counter = Counter(baltargetData) + self.log.info(""Class counter:\\t""+str(baltargetData.value_counts())) + max_class = max(counter,key=counter.get) + max_value = max(counter.values()) + self.log.info(""Max samples: ""+str(max_value)+ "" in the class: ""+str(max_class)) + for k,v in counter.items(): + if v < (max_value*98/100): + self.log.info(""Undersampling is not able to do perfect data balancing."") + self.log.info(""The method is used to identify the desired samples of data from the majority class that is having the lowest Euclidean distance with the minority class data. Downsampling may not balance the class after applying this method.\\n"") + self.log.info(baltargetData.value_counts()) + else: + balfeatureData = featureData + baltargetData = targetData + self.log.info(""-------> Method: Balancing Not Applied"") + self.log.info('-------> Memory Usage by Training DataFrame After Class Balancing '+str(featureData.memory_usage(deep=True).sum())) + self.log.info('Status:- |... Data balancing done: '+str(balancingMethod)) + self.log.info('------------ Balancing End --------------\\n') + return(balfeatureData,baltargetData) + + def combine_text_features(self,dataFrame,dataColumns): + column_merge_flag = False + merge_columns = [] + if(len(dataColumns) > 1): + dataFrame['combined'] = dataFrame[dataColumns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + merge_columns = dataColumns + features = ['combined'] + column_merge_flag = True + self.log.info(""After Text Concatenation"") + self.log.info(dataFrame['combined'].head(10)) + self.log.info(""List of Combined Columns ---> ""+ str(dataColumns) +""\\n"") + else: + features = dataColumns + return(dataFrame,features,column_merge_flag,merge_columns) + ''' + def create_preprocessing_pipeline(self,X): + textDataProfilerObj=textDataProfiler() + tfidfVector = TfidfVectorizer(tokenizer = textDataProfilerObj.textTokenizer) + pipe = Pipeline([(""cleaner"", TextCleaner()),('vectorizer', tfidfVector)]) + vectors=pipe.fit(X) + transformedVector=pipe.transform(X) + return(pipe,transformedVector) + ''' + def get_topics(self, model, feature_names, no_top_words): + topicDict = {} + for topic_idx, topic in enumerate(model.components_): + wordDict = {} + topicProb = [(feature_names[i],topic[i]/topic.sum()) for i in topic.argsort()[:-no_top_words - 1:-1]] + for word, prob in topicProb: + if word.endswith('_vect'): + word = word[:-len('_vect')] + wordDict[word] = prob + topicDict[ topic_idx] = wordDict + return topicDict + + def transform_target_feature(self,dataFrame,targetColumn): + targetDataType=dataFrame[targetColumn].dtypes + pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] + labelMapping= {} + if targetDataType not in pandasNumericDtypes: + le = LabelEncoder() + le.fit(dataFrame[targetColumn]) + le_trainY = le.transform(dataFrame[targetColumn]) + labelMapping = dict(zip(le.classes_, le.transform(le.classes_))) + self.log.info("" \\n encoded Values of predicator column ===>""+str(labelMapping)) + else: + le_trainY = dataFrame[targetColumn] + return le_trainY,labelMapping + def setScoreParams(self,scoreParam,modelType,categoryCountList): + if modelType == 'classification' or modelType == 'TextClassification': + allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] + if(scoreParam.lower() not in allowedmatrix): + scoreParam = 'accuracy' + elif scoreParam.lower() == 'none': + scoreParam = 'accuracy' + elif scoreParam.lower() == ""recall"": + if len(categoryCountList) > 2: + scoreParam = make_scorer(sklearn.metrics.recall_score, average = 'weighted') + else: + scoreParam = make_scorer(sklearn.metrics.recall_score) + elif scoreParam.lower() == ""precision"" : + if len(categoryCountList) > 2: + scoreParam = make_scorer(sklearn.metrics.precision_score, average = 'weighted') + else: + scoreParam = make_scorer(sklearn.metrics.precision_score) + elif scoreParam.lower() == ""f1_score"" : + if len(categoryCountList) > 2: + scoreParam = make_scorer(sklearn.metrics.f1_score, average = 'weighted') + else: + scoreParam = make_scorer(sklearn.metrics.f1_score) + elif scoreParam.lower() == ""roc_auc"" : + if len(categoryCountList) > 2: + scoreParam = make_scorer(sklearn.metrics.roc_auc_score,needs_proba=True,multi_class='ovr',average='weighted') + else: + scoreParam = make_scorer(sklearn.metrics.roc_auc_score) + else: + scoreParam = scoreParam + else: + allowedmatrix = ['mse','r2','rmse','mae'] + if(scoreParam.lower() not in allowedmatrix): + scoreParam = 'neg_mean_squared_error' + elif scoreParam.lower() == 'none': + scoreParam = 'neg_mean_squared_error' + elif scoreParam.lower() == 'mse': + scoreParam = 'neg_mean_squared_error' + elif scoreParam.lower() == 'rmse': + #scoreParam = make_scorer(sklearn.metrics.mean_squared_error, squared = False) + scoreParam='neg_root_mean_squared_error' + elif scoreParam.lower() == 'mae': + scoreParam = 'neg_mean_absolute_error' + elif scoreParam.lower() == 'r2': + scoreParam = 'r2' + else: + scoreParam = scoreParam + #self.log.info('Status:- !... Scoring parameters selected') + self.log.info(""-------> Scoring parameter: ""+str(scoreParam)) + return(scoreParam) + def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,featuresset1,featureset2): + best_feature_model = featuresset1 + self.log.info('\\n ---------- ML Summary ------------') + if modelType.lower() == ""classification"": + if(threshold1 == -1 and threshold2 == -1): + if score1> score2: + self.log.info('-------> Best Features: '+str(featuresset1)) + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featuresset1 + else: + self.log.info('-------> Best Features:'+str(featureset2)) + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featureset2 + elif(threshold1 == -1): + self.log.info('-------> Best Features: '+str(featureset2)) + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featureset2 + elif(threshold1 == -2): + self.log.info('-------> Best Features: '+str(featuresset1)) + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model =featuresset1 + else: + if pscore1 == pscore2: + if rscore1 > rscore2: + self.log.info('-------> Best Features: '+str(featuresset1)) + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featuresset1 + else: + self.log.info('-------> Best Features: '+str(featureset2)) + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featureset2 + elif rscore1 == rscore2: + if pscore1 > pscore2: + self.log.info('-------> Best Features: '+str(featuresset1)) + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featuresset1 + else: + self.log.info('-------> Best Features: '+str(featureset2)) + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featureset2 + + elif modelType.lower() == ""regression"": + if scoreParam == ""r2"" or scoreParam == ""explained_variance"": + if score1> score2 : + self.log.info('-------> Best Features: '+str(featuresset1)) + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featuresset1 + else: + self.log.info('-------> Best Features: '+str(featureset2)) + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featureset2 + else: + if score1< score2 : + self.log.info('-------> Best Features: '+str(featuresset1)) + self.log.info('-------> Best Model: '+str(model1)) + self.log.info('-------> Best Score: '+str(score1)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featuresset1 + else: + self.log.info('-------> Best Features: '+str(featureset2)) + self.log.info('-------> Best Model: '+str(model2)) + self.log.info('-------> Best Score: '+str(score2)) + self.log.info('-------> Scoring Param: '+str(scoreParam)) + best_feature_model = featureset2 + self.log.info('---------- ML Summary End ------------\\n') + return(best_feature_model) + + + def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,modelFeatures,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,featuresBasedOn,code_configure,featureEngineeringSelector,modelEvaluationConfig,imageFolderLocation): + model = 'None' + params = 'None' + score = 0xFFFF + estimator = None + model_tried = '' + threshold = -1 + pscore = -1 + rscore = -1 + topics = {} + if(targetColumn != ''): + targetData = dataFrame[targetColumn] + datacolumns=list(dataFrame.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + + if(modelType != 'clustering') and (modelType != 'TopicMod" +"elling'): + scoreParam = self.setScoreParams(scoreParam,modelType,categoryCountList) + + if len(topFeatures) > 0: + self.log.info('\\n-------------- Training ML: Top/StatisticalBased Features Start --------------') + modelbasedon = 'StatisticalBased' + if featureEngineeringSelector.lower() == 'true': + self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection started') + modelbasedon = 'DimensionalityReduction' + else: + self.log.info('Status:- |... Algorithm analysis based on statistical based feature selection started') + model_type1,model1,params1, score1, estimator1,model_tried1,xtrain1,ytrain1,xtest1,ytest1,threshold1,pscore1,rscore1,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelbasedon,code_configure,modelEvaluationConfig) + + if model_tried != '': + model_tried += ',' + model_tried += model_tried1 + topFeaturesStatus = True + if featureEngineeringSelector.lower() == 'true': + self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection completed') + else: + self.log.info('Status:- |... Algorithm analysis for statistical based feature completed') + self.log.info('-------------- Training ML: Top/StatisticalBased Features End --------------\\n') + else: + topFeaturesStatus = False + + if len(modelFeatures) > 0: + self.log.info('\\n-------------- Training ML: Models Based Selected Features Start --------------') + self.log.info('Status:- |... Algorithm analysis based on model based feature selection started') + model_type2,model2,params2, score2, estimator2,model_tried2,xtrain2,ytrain2,xtest2,ytest2,threshold2,pscore2,rscore2,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,modelFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, ""ModelBased"",code_configure,modelEvaluationConfig) + #model_tried2['Features'] = 'ModelBased' + if model_tried != '': + model_tried += ',' + model_tried += model_tried2 + modelFeaturesStatus = True + self.log.info('Status:- |... Algorithm analysis for model based selected features completed') + self.log.info('-------------- Training ML: Models Based Selected Features End --------------\\n') + else: + modelFeaturesStatus = False + + if len(allFeatures) > 0: + self.log.info('Status:- |... Algorithm analysis based on all features Start') + model_type3,model3,params3, score3, estimator3,model_tried3,xtrain3,ytrain3,xtest3,ytest3,threshold3,pscore3,rscore3,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, ""AllFeatures"",code_configure,modelEvaluationConfig) + #model_tried3['Features'] = 'AllFeatures' + allFeaturesStatus = True + if model_tried != '': + model_tried += ',' + model_tried += model_tried3 + self.log.info('Status:- |... Algorithm analysis based all features completed') + else: + allFeaturesStatus = False + #print(topFeaturesStatus,modelFeaturesStatus,allFeaturesStatus) + if topFeaturesStatus: + if modelFeaturesStatus: + best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,'StatisticalBased','ModelBased') + if best_feature_model == 'StatisticalBased' and allFeaturesStatus: + best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures') + if best_feature_model == 'ModelBased' and allFeaturesStatus: + best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures') + elif allFeaturesStatus: + best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures') + else: + best_feature_model = 'StatisticalBased' + if featureEngineeringSelector.lower() == 'true': + best_feature_model = 'DimensionalityReduction' + else: + if modelFeaturesStatus and allFeaturesStatus: + best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures') + elif modelFeaturesStatus: + best_feature_model = 'ModelBased' + elif allFeaturesStatus: + best_feature_model = 'AllFeatures' + + if (best_feature_model == 'StatisticalBased' or best_feature_model == 'DimensionalityReduction'): + model_type = model_type1 + model = model1 + params = params1 + score = score1 + estimator = estimator1 + #model_tried = model_tried1 + xtrain = xtrain1 + ytrain = ytrain1 + xtest = xtest1 + ytest = ytest1 + features = topFeatures + threshold = threshold1 + pscore = pscore1 + rscore = rscore1 + elif (best_feature_model == 'AllFeatures'): + model_type = model_type3 + model = model3 + params = params3 + score = score3 + estimator = estimator3 + #model_tried = model_tried3 + xtrain = xtrain3 + ytrain = ytrain3 + xtest = xtest3 + ytest = ytest3 + features = allFeatures + threshold = threshold3 + pscore = pscore3 + rscore = rscore3 + else: + model_type = model_type2 + model = model2 + params = params2 + score = score2 + estimator = estimator2 + #model_tried = model_tried2 + xtrain = xtrain2 + ytrain = ytrain2 + xtest = xtest2 + ytest = ytest2 + threshold = threshold2 + pscore = pscore2 + rscore = rscore2 + features = modelFeatures + + if score != 'NA': + self.log.info('Status:- |... Final Best Algorithm selected: '+model+' having score='+str(round(score,2))+' based on '+best_feature_model+' feature selection') + filename = os.path.join(deployLocation,'model',iterName+'_'+iterVersion+'.sav') + saved_model = iterName+'_'+iterVersion+'.sav' + if model == 'Neural Architecture Search': + loaded_model = estimator + try: + estimator.save(filename, save_format=""tf"") + except Exception: + filename = os.path.join(deployLocation,'model','autoKerasModel.h5') + estimator.save(filename) + saved_model = 'autoKerasModel.h5' + else: + pickle.dump(estimator, open(filename, 'wb')) + loaded_model = pickle.load(open(filename, 'rb')) + if not xtest.empty: + df_test = xtest.copy() + else: + df_test = xtrain.copy() + if threshold == -1: + if model.lower() == 'lda': + predictedData = loaded_model.transform(xtest).argmax(axis=1) + trainPredictedData = loaded_model.transform(xtrain) + elif model.lower() == 'dbscan': + predictedData = loaded_model.fit_predict(xtest) + predictedData = loaded_model.labels_ + trainPredictedData = loaded_model.fit_predict(xtrain) + trainPredictedData = loaded_model.labels_ + elif model == 'Neural Architecture Search': + test_prob = estimator.predict(xtest) + train_prob = estimator.predict(xtrain) + if train_prob.shape[1] == 1: + train_prob = np.hstack(( 1-train_prob, train_prob)) + test_prob = np.hstack(( 1-test_prob, test_prob)) + predictedData = np.argmax(test_prob, axis=1) + trainPredictedData = np.argmax(train_prob, axis=1) + elif model in ['Deep Q Network','Dueling Deep Q Network']: + from tf_agents.trajectories import time_step + from tensorflow import constant + from sklearn.preprocessing import MinMaxScaler + q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) + test_prob = MinMaxScaler().fit_transform( q.numpy()) + q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False) + train_prob = MinMaxScaler().fit_transform( q.numpy()) + predictedData = np.argmax(test_prob, axis=1) + trainPredictedData = np.argmax(train_prob, axis=1) + elif modelType == 'clustering': + if not xtest.empty: + predictedData = loaded_model.predict(xtest) + trainPredictedData = loaded_model.predict(xtrain) + else: + if not xtest.empty: + predictedData = loaded_model.predict(xtest) + trainPredictedData = loaded_model.predict(xtrain) + if hasattr(loaded_model, 'predict_proba'): + train_prob = loaded_model.predict_proba(xtrain) + if not xtest.empty: + test_prob = loaded_model.predict_proba(xtest) + else: + self.log.info(""-------> Threshold :""+str(threshold)) + + if not xtest.empty: + #bug 12437 + if 'predict_proba' in dir(loaded_model): + test_prob = loaded_model.predict_proba(xtest) + predictedData = binarize(test_prob[:,1].reshape(-1, 1),threshold=threshold) + else: + raise Exception('--------- Loaded model does not support predict_proba ---------\\n') + + train_prob = loaded_model.predict_proba(xtrain) + trainPredictedData = binarize(train_prob[:,1].reshape(-1, 1),threshold=threshold) + + matrix = '' + try: + if(model_type == 'Classification'): + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + train_matrix = self.getClassificationPerformaceMatrix(ytrain,trainPredictedData,train_prob,labelMaps) + self.log.info('--------- Performance Matrix with Train Data End ---------\\n') + if not xtest.empty: + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performancematrix = self.getClassificationPerformaceMatrix(ytest,predictedData,test_prob,labelMaps) + df_test['actual'] = ytest + df_test['predict'] = predictedData + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + matrix = performancematrix + if hasattr( loaded_model, 'predict_proba'): + predictedData_fit = loaded_model.predict_proba(xtest) + elif model == 'Neural Architecture Search': + predictedData_fit = estimator.predict(xtest) + elif model in ['Deep Q Network','Dueling Deep Q Network']: + from tf_agents.trajectories import time_step + from tensorflow import constant + q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) + predictedData_fit = q.numpy() + else: + predictedData_fit = loaded_model.predict(xtest) + if predictedData_fit.shape[1] == 1: + predictedData_fit = np.hstack((1 - predictedData_fit, predictedData_fit)) + self.auc" +"_roccurve(ytest,predictedData_fit,labelMaps,imageFolderLocation) + else: + + df_test['actual'] = ytrain + df_test['predict'] = trainPredictedData + + + + elif(model_type == 'Regression'): + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + train_matrix = self.get_regression_matrix(ytrain, trainPredictedData) + self.log.info('--------- Performance Matrix with Train Data End ---------\\n') + if not xtest.empty: + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + matrix = self.get_regression_matrix(ytest, predictedData) + df_test['actual'] = ytest + df_test['predict'] = predictedData + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + else: + df_test['actual'] = ytrain + df_test['predict'] = trainPredictedData + elif(model_type == 'Clustering'): + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + train_matrix = self.getclusterMatrix(xtrain,trainPredictedData) + self.log.info('--------- Performance Matrix with Train Data End ---------\\n') + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performacematrix = self.getclusterMatrix(xtest,predictedData) + df_test['predict'] = predictedData + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + matrix = performacematrix + elif(model_type.lower() == 'topicmodelling'): + self.log.info('\\n--------- Performance Matrix with Train Data ---------') + train_matrix = """" + self.log.info('--------- Performance Matrix with Train Data End ---------\\n') + self.log.info('\\n--------- Performance Matrix with Test Data ---------') + performacematrix = """" + df_test['predict'] = predictedData + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + matrix = performacematrix + except Exception as Inst: + self.log.info('--------- Error Performance Matrix ---------\\n') + self.log.info(str(Inst)) + df_test['predict'] = predictedData + matrix = """" + train_matrix = """" + self.log.info('--------- Performance Matrix with Test Data End ---------\\n') + + save_csv_compressed(df_test, predicted_data_file, encoding='utf-8') + return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params + + def auc_roccurve(self,y_true,y_score,classee,imageFolderLocation): + from keras.utils import to_categorical + from sklearn.preprocessing import label_binarize + import re + n_classes = len(classee) + y_true = to_categorical(y_true,num_classes = n_classes) + + fpr ={} + tpr={} + roc_auc={} + class_names = list(classee.keys()) + typeofclass = list(classee.values()) + n_class = len(typeofclass) + + for i in range(n_classes): + fpr[i],tpr[i],_ = roc_curve(y_true[:,i], y_score[:,i]) + roc_auc[i]= auc(fpr[i],tpr[i]) + plt.figure() + plt.plot(fpr[i],tpr[i],label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})') + plt.plot([0,1],[0,1], linestyle='--') + plt.xlabel('False positive rate') + plt.ylabel('True positive rate') + plt.title(f'{class_names[i]} ROC Curve') + plt.legend() + img_location = os.path.join(imageFolderLocation,str(i)+'_roc.png') #15092 + plt.savefig(img_location) + + + + + + def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelFeatureBased,code_configure,modelEvaluationConfig): + matrix = '' + threshold = -1 + pscore = -1 + rscore = -1 + method = mlconfig['optimizationMethod'] + method = method.lower() + geneticParam = '' + topics = {} + optimizationHyperParameter = mlconfig['optimizationHyperParameter'] + cvSplit = optimizationHyperParameter['trainTestCVSplit'] + nIter = int(optimizationHyperParameter['iterations']) + if(method.lower() == 'genetic'): + geneticParam = optimizationHyperParameter['geneticparams'] + scoreParam = scoreParam + if 'thresholdTunning' in mlconfig: + thresholdTunning = mlconfig['thresholdTunning'] + else: + thresholdTunning = 'NA' + + + if len(topFeatures) !=0: + self.features=topFeatures + else: + datacolumns=list(xtrain.columns) + if targetColumn in datacolumns: + datacolumns.remove(targetColumn) + self.features =datacolumns + self.log.info(f'-------> Number of Features Used For Training the Model: {len(self.features)}') + features_names = str(self.features) + if len(features_names) > 500: + features_names = ','.join(self.features[:2]) + ', ..... ,' + ','.join(self.features[-2:]) + self.log.info(f'-------> Features Used For Training the Model: {features_names}') + xtrain = xtrain[self.features] + if not xtest.empty: + xtest = xtest[self.features] + if cvSplit == """": + cvSplit =None + else: + cvSplit =int(cvSplit) + + if modelType == 'classification': + model_type = ""Classification"" + MakeFP0 = False + MakeFN0 = False + if(len(categoryCountList) == 2): + self.log.info(""\\n -------------- Check for FP or FN -------------- "") + self.log.info(""-------> Binary Classification"") + if(thresholdTunning.lower() == 'fp0'): + self.log.info(""-------> Threshold Tuning: False Positive"") + MakeFP0 = True + elif(thresholdTunning.lower() == 'fn0'): + self.log.info(""-------> Threshold Tuning: False Negative"") + MakeFN0 = True + if MakeFP0 == False and MakeFN0 == False: + self.log.info(""-------> Threshold Tuning: Not Any"") + self.log.info(""-------------- Check for FP or FN End-------------- \\n"") + elif(len(categoryCountList) > 2): #bug 12438 + self.log.info(""\\n -------------- Check for FP or FN -------------- "") + self.log.info(""-------> Multiclass Classification"") + if(thresholdTunning.lower() == 'fp0' or thresholdTunning.lower() == 'fn0'): + self.log.info(""-------> Threshold Tuning: Not supported"") + else: + self.log.info(""-------> Threshold Tuning: Not Any"") + self.log.info(""-------------- Check for FP or FN End-------------- \\n"") + objClf = ClassifierModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation) + model, params, score, estimator,model_tried,threshold,pscore,rscore = objClf.classModelling( modelFeatureBased,code_configure) + + elif modelType == 'regression': + model_type = ""Regression"" + objClf = RegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation) + model,params,score,estimator,model_tried = objClf.regressionModelling(modelFeatureBased,code_configure) + + elif modelType =='clustering': + model_type = 'Clustering' + print(modelList) + if 'KMeans' in modelList: + clustendency = self.cluster_tendency(xtrain) + model='KMeans' + model_tried = '{""Model"":""KMeans"",""Score"":""NA""}' + kmeanmodelparams=modelParams['KMeans'] + n_clusters = kmeanmodelparams['n_clusters'] + if n_clusters == None or n_clusters == 0 or n_clusters == '': + n_clusters = self.calculateNumberofCluster(xtrain) + kmeanmodelparams['n_clusters'] = n_clusters + kmeans=KMeans(n_clusters=n_clusters) + targetData=kmeans.fit_predict(xtrain) + self.log.info('Status:- |... ML Algorithm applied: KMeans') + self.log.info('\\n------------ Centers Points Start------------') + values = kmeans.cluster_centers_.squeeze() + #print(values) + centers = pd.DataFrame(kmeans.cluster_centers_,columns= xtrain.columns) + filename = os.path.join(deployLocation,'centers.csv') + centers.to_csv(filename) + labels = kmeans.labels_ + i=0 + for value_row in values: + j=0 + self.log.info('------->Label: '+str(i)) + for value in value_row: + self.log.info('---------->Feature: ""'+str(self.features[j])+'"" Center Point: '+str(value)) + j = j+1 + i = i+1 + self.log.info('------------ Centers Points Start------------\\n') + score='NA' + scoreParam=None + params=kmeanmodelparams + estimator=kmeans + if 'DBSCAN' in modelList: + DBSCAN_ModelParams=modelParams['DBSCAN'] + db = DBSCAN(eps=DBSCAN_ModelParams['eps'],min_samples = DBSCAN_ModelParams['min_samples']).fit(xtrain) + #targetData=db.fit_predict(xtrain) + self.log.info('Status:- |... ML Algorithm applied: DBSCAN') + labels = db.labels_ + n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) + n_noise_ = list(labels).count(-1) + self.log.info('------->Labels: '+str(labels)) + self.log.info('------->No Of Cluster: '+str(n_clusters_)) + self.log.info('------->No Of Noise Point: '+str(n_noise_)) + score='NA' + scoreParam=None + params='' + estimator=db + model='DBSCAN' + model_tried = '{""Model"":""DBSCAN"",""Score"":""NA""}' + + elif modelType == 'topicmodelling': + model_type = 'TopicModelling' + model='LDA' + model_tried = '{""Model"":""LDA"",""Score"":""NA""}' + LDAmodelparams=modelParams['LDA'] + n_topics = LDAmodelparams['n_topics'] + n_words_per_topic = LDAmodelparams['n_words_per_topic'] + if n_topics == None or n_topics == 0 or n_topics == '': + n_topics = 10 + LDAmodelparams['n_topics'] = n_topics + if n_words_per_topic == None or n_words_per_topic == 0 or n_words_per_topic == '': + n_words_per_topic = 10 + LDAmodelparams['n_words_per_topic'] = n_words_per_topic + + lda = LatentDirichletAllocation(n_components=n_topics,random_state=0) + self.log.info('Status:- |... ML Algorithm applied: LDA') + targetData=lda.fit_transform(xtrain) + topics = self.get_topics(lda, topFeatures, n_words_per_topic) + self.log.info(topics) + score='NA' + scoreParam=None + params=LDAmodelparams + estimator=lda + + return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, topics ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL" +"Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import time +import os +import sys +import numpy as np +from numpy import arange +from numpy import argmax +import json +from sklearn.metrics import accuracy_score +from sklearn.metrics import confusion_matrix +from sklearn.metrics import recall_score +from sklearn.metrics import precision_score +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.tree import DecisionTreeClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.linear_model import SGDClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.metrics import roc_curve +from sklearn.metrics import roc_auc_score +from sklearn.metrics import f1_score +from sklearn.svm import SVC +from xgboost import XGBClassifier +from lightgbm import LGBMClassifier +from catboost import CatBoostClassifier +from sklearn.preprocessing import binarize +from learner.optimizetechnique import OptimizationTq +from learner.defaultAlgos import defaultParams +from learner.parameters import parametersDefine +from hyperopt import fmin, tpe, hp, STATUS_OK, Trials +import logging +from learner.aion_matrix import aion_matrix +import mlflow +from pathlib import Path +from uncertainties.aionUQ import aionUQ +# apply threshold to positive probabilities to create labels +def to_labels(pos_probs, threshold): + return (pos_probs >= threshold).astype('int') +class ClassifierModel(): + def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation): + self.modelList =modelList + self.params =params + self.trainX =trainX + self.X =trainX + self.trainY =trainY + self.testX = testX + self.testY = testY + self.method =method + self.scoreParam=scoreParam + self.cvSplit=cvSplit + self.numIter=numIter + self.geneticParam=geneticParam + self.MakeFP0= MakeFP0 + self.MakeFN0=MakeFN0 + self.log = logging.getLogger('eion') + self.modelType = modelType + self.uq_x_train = trainX + self.uq_x_test = testX + self.uq_y_train = trainY + self.uq_y_test = testY + self.deployLocation = deployLocation + self.AlgorithmNames={'Logistic Regression':'LogisticRegression','Stochastic Gradient Descent':'SGDClassifier','Naive Bayes':'GaussianNB','Support Vector Machine':'SVC','K Nearest Neighbors':'KNeighborsClassifier','Decision Tree':'DecisionTreeClassifier','Random Forest':'RandomForestClassifier','Gradient Boosting':'GradientBoostingClassifier','Extreme Gradient Boosting (XGBoost)':'XGBClassifier','Categorical Boosting (CatBoost)': 'CatBoostClassifier','Light Gradient Boosting (LightGBM)': 'LGBMClassifier','Bagging (Ensemble)':'BaggingClassifier','Stacking (Ensemble)':'StackingClassifier','Voting (Ensemble)':'VotingClassifier','Deep Q Network':'DQN','Dueling Deep Q Network':'DDQN','Neural Architecture Search':'NAS'} + self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} + + def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): + thresholdx = -1 + for threshold in threshold_range: + predictedData = estimator.predict_proba(testX) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) + p_score = precision_score(testY, predictedData) + #self.log.info('-------------> Precision:'+str(p_score)) + r_score = recall_score(testY, predictedData) + #self.log.info('-------------> Rscore:'+str(r_score)) + #self.log.info(confusion_matrix(testY, predictedData)) + tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() + if(checkParameter.lower() == 'fp'): + if fp == 0: + if(p_score == 1): + thresholdx = threshold + self.log.info('---------------> Best Threshold:'+str(threshold)) + self.log.info('---------------> Best Precision:'+str(p_score)) + self.log.info('---------------> Best Recall:'+str(r_score)) + self.log.info('---------------> TN:'+str(tn)) + self.log.info('---------------> FP:'+str(fp)) + self.log.info('---------------> FN:'+str(fn)) + self.log.info('---------------> TP:'+str(tp)) + break + + if(checkParameter.lower() == 'fn'): + if fn == 0: + if(r_score == 1): + thresholdx = threshold + self.log.info('---------------> Best Threshold:'+str(threshold)) + self.log.info('---------------> Best Precision:'+str(p_score)) + self.log.info('---------------> Best Recall:'+str(r_score)) + self.log.info('---------------> TN:'+str(tn)) + self.log.info('---------------> FP:'+str(fp)) + self.log.info('---------------> FN:'+str(fn)) + self.log.info('---------------> TP:'+str(tp)) + break + return(thresholdx,p_score,r_score) + def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): + cmodel = False + if(threshold != -1): + if(bestthreshold == -1): + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif fp0: + if rscore > brscore: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif rscore == brscore: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif fn0: + if pscore > bpscore: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + elif pscore == bpscore: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + bestthreshold = threshold + brscore = rscore + bpscore = pscore + btscore = tscore + else: + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + btscore = tscore + else: + if(bestthreshold == -1): + if tscore > btscore or btscore == -0xFFFF: + cmodel = True + btscore = tscore + + return cmodel,btscore,bestthreshold,brscore,bpscore + + + def logMlflow(self, runName, params, metrices, estimator, algoName=None): + with mlflow.start_run(run_name = runName): + for k,v in params.items(): + mlflow.log_param(k, v) + for k,v in metrices.items(): + mlflow.log_metric(k, v) + if algoName == 'CatBoostClassifier': + mlflow.catboost.log_model(estimator, ""model"") + else: + mlflow.sklearn.log_model(estimator, ""model"") + model_uri = mlflow.get_artifact_uri(""model"") + """""" for some dataset evaluate takes more than 90 min, so commenting till some solution is not found + evaluate_data = self.testX.copy() + evaluate_data['label'] = self.testY.copy() + mlflow.evaluate(model_uri, data=evaluate_data, targets='label', model_type=""classifier"") + del evaluate_data + """""" + + def classModelling(self, modelOrFeatureBased,code_configure): + paramObj=parametersDefine() + bestModel='None' + bestParams={} + bestScore=-0xFFFF + bestEstimator = 'None' + bestpipelineModel='None' + scoredetails = '' + threshold = -1 + bestthreshold = -1 + precisionscore =-1 + bestprecisionscore=-1 + recallscore = -1 + bestrecallscore=-1 + self.log.info('\\n---------- ClassifierModel has started ----------') + objClf = aion_matrix() + try: + self.log.info('Status:- |... Search Optimization Method applied: '+self.method) + for modelName in self.modelList: + if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Dueling Deep Q Network','Deep Q Network','Neural Architecture Search']: + if modelName == 'Bagging (Ensemble)': + from ensemble.ensemble_bagging import ensemble_bagging + ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,self.MakeFP0,self.MakeFN0) + estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_bagging_obj.ensemble_bagging_classifier(self.trainX,self.trainY,self.testX,self.testY) + if modelName == 'Stacking (Ensemble)': + from ensemble.ensemble_stacking import ensemble_stacking + ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam) + estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_stacking_obj.ensemble_stacking_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList) + if modelName == 'Voting (Ensemble)': + from ensemble.ensemble_voting import ensemble_voting + ensemble_voting_obj = ensemble_voting("""",self.scoreParam) + #bug 12437 + status,estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_voting_obj.ensemble_voting_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList) + if status != ""SUCCESS"": #bug 12437 + continue + if modelName == 'Deep Q Network': + from reinforcement.DRL_train import ReinformentLearning + rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification') + estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DQN',self.deployLocation) + if modelName == 'Dueling Deep Q Network': + from reinforcement.DRL_train import ReinformentLearning + rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification') + estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DDQN',self.deployLocation) + ''' + if modelName == 'Neural Architecture Search': + from nas.aionNAS import aionNAS + objNAS = aionNAS('Classification',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation) + estimator,modelParams,score,model,threshold,precisionscore,recallscore=objNAS.nasMain(self.scoreParam) + ''' + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(score)+',""ModelUncertainty"":""NA""}' + status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) + if status: + bestScore =bscore + bestModel =model + bestParams=modelParams + bestEstimator=estimator + bestthreshold = bthres + " +" bestrecallscore = brscore + bestprecisionscore = bpscore + self.log.info('Status:- |... ML Algorithm applied: '+modelName) + self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n') + continue + paramSpace=self.params[modelName].copy() + algoName = self.AlgorithmNames[modelName] + paramDict =paramObj.paramDefine(paramSpace,self.method) + if not self.method == 'bayesopt': + paramSize = paramObj.getParamSpaceSize(paramDict) + else: + paramSize = 0 + if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=0): + try: + start = time.time() + #function call + defObj = defaultParams(algoName,paramDict,self.scoreParam,self.MakeFP0, self.MakeFN0,paramSize) + estimator, modelParams, model,score, threshold, precisionscore, recallscore =defObj.startTrainingClassification(self.trainX,self.trainY,self.testX,self.testY) + executionTime = time.time() - start + if (scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""' + self.modelToAlgoNames[model] + '"",""FeatureEngineering"":""' + str( + modelOrFeatureBased) + '"",""Score"":' + str(score) + ',""ModelUncertainty"":""NA""}' + status, bscore, bthres, brscore, bpscore = self.getBestModel(self.MakeFP0, self.MakeFN0,threshold, bestthreshold,recallscore, bestrecallscore,precisionscore, bestprecisionscore,score, bestScore) + self.log.info('---------> Total Execution: ' + str(executionTime) + '\\n') + if status: + bestScore = bscore + bestModel = model + bestParams = modelParams + bestEstimator = estimator + bestthreshold = bthres + bestrecallscore = brscore + bestprecisionscore = bpscore + self.log.info('Status:- |... ML Algorithm applied: ' + modelName) + self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str( + round(score, 2)) + '\\n') + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + + continue + # call algorithms with default valuepass + + if self.method == 'bayesopt': + code_configure.add_model(algoName,paramSpace) + else: + paramDictCopy = paramDict + # numpy array is not json serializable + #numpy is already imported but still np.ndarray raise error + import numpy as np + for key,value in paramDictCopy.items(): + if isinstance(value, np.ndarray): + paramDictCopy[key] = paramDictCopy[key].tolist() + code_configure.add_model(algoName,paramDictCopy) + trainingStatus = 'Success' + if self.method =='grid': + try: + self.log.info(""-------> Optimization Method :Grid Search"") + self.log.info(""-------> Model Name: ""+str(modelName)) + + opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) + start = time.time() + model,modelParams,score,estimator=opTq.gridSearchOpt() + executionTime=time.time() - start + if not self.testX.empty: + predictedData = estimator.predict(self.testX) + score = objClf.get_score(self.scoreParam,self.testY,predictedData) + else: + score = score*100 + + problemName = estimator.__class__.__name__ + runName = algoName + '_' + modelOrFeatureBased + metrices = {} + metrices[""score""] = score + try: + self.logMlflow(runName, modelParams, metrices, estimator, algoName) + except Exception as e: + self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish + pass + output_jsonobject = """" + problemName = estimator.__class__.__name__ + self.log.info('----------> Testing Score: '+str(score)) + try: + if ((estimator.__class__.__name__ == ""ABCMeta"") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): + self.log.info('-----> Model Uncertainty Not Supported') + else: + uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) + accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,""aionuq"") + self.log.info(""-------> model_confidence: ""+str(model_confidence_per)+str('%')) + self.log.info(""-------> model_uncertainty: ""+str(model_uncertainty_per)+str('%')) + except: + pass + if(scoredetails != ''): + scoredetails += ',' + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(score)+',""ModelUncertainty"":'+str(json.dumps(output_jsonobject))+'}' + self.log.info('----------> Testing Score: '+str(score)) + + import numpy as np + if self.MakeFP0: + self.log.info('-------- Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName) + self.log.info('-------- Calculate Threshold for FP End-------') + if self.MakeFN0: + self.log.info('-------- Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName) + self.log.info('-------- Calculate Threshold for FN End-------') + self.log.info('----------> Total Execution: '+str(executionTime)+'\\n') + + status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) + if status: + bestScore =bscore + bestModel =model + bestParams=modelParams + bestEstimator=estimator + bestthreshold = bthres + bestrecallscore = brscore + bestprecisionscore = bpscore + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + trainingStatus = 'Error (Exception)' + elif self.method == 'random': + try: + self.log.info(""-------> Optimization Method :Random Search"") + self.log.info(""-------> Model Name: ""+str(modelName)) + start = time.time() + opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) + model,modelParams,score,estimator=opTq.randomSearchOpt() + executionTime=time.time() - start + if not self.testX.empty: + predictedData = estimator.predict(self.testX) + score = objClf.get_score(self.scoreParam,self.testY,predictedData) + else: + score = score*100 + + problemName = estimator.__class__.__name__ + runName = algoName + '_' + modelOrFeatureBased + metrices = {} + metrices[""score""] = score + try: + self.logMlflow(runName, modelParams, metrices, estimator, algoName) + except Exception as e: + self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish + pass + import numpy as np + if self.MakeFP0: + self.log.info('-------- Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName) + self.log.info('-------- Calculate Threshold for FP End-------') + + if self.MakeFN0: + self.log.info('-------- Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName) + self.log.info('-------- Calculate Threshold for FN End-------') + + if threshold != -1: + if not self.testX.empty: + predictedData = estimator.predict_proba(self.testX) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) + score = objClf.get_score(self.scoreParam,self.testY,predictedData) + else: + predictedData = estimator.predict_proba(self.trainX) + predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) + score = objClf.get_score(self.scoreParam,self.trainY,predictedData) + + self.log.info('---------> Total Execution: '+str(executionTime)+'\\n') + output_jsonobject = """" + problemName = estimator.__class__.__name__" +" + self.log.info('----------> Testing Score: '+str(score)) + try: + if ((estimator.__class__.__name__ == ""ABCMeta"") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): + self.log.info('-----> Model Uncertainty Not Supported') + else: + uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) + accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,""aionuq"") + self.log.info(""-------> model_confidence: ""+str(model_confidence_per)+str('%')) + self.log.info(""-------> model_uncertainty: ""+str(model_uncertainty_per)+str('%')) + except Exception as e: + pass + + if(scoredetails != ''): + scoredetails += ',' + + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(score)+',""ModelUncertainty"":'+str(json.dumps(output_jsonobject))+'}' + + + + status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) + if status: + bestScore =bscore + bestModel =model + bestParams=modelParams + bestEstimator=estimator + bestthreshold = threshold + bestrecallscore = recallscore + bestprecisionscore = precisionscore + + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + trainingStatus = 'Error (Exception)' + elif self.method == 'bayesopt': + try: + self.log.info(""-------> Optimization Method :BayesOpt"") + self.log.info(""-------> Model Name: ""+str(modelName)) + + opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) + fun=opTq.f + trials = Trials() + start = time.time() + + best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=self.numIter, trials=trials) + executionTime=time.time() - start + results = sorted(trials.results, key = lambda x: x['loss']) + + bestresult=results[0] + model=bestresult['model'] + score=bestresult['score'] + modelParams=bestresult['params'] + + executionTime=time.time() - start + res = ', '.join(""{!s}={!r}"".format(key,val) for (key,val) in modelParams.items()) + + modelObj=eval(model+'('+res+')') + + estimator = modelObj.fit(self.trainX,self.trainY) + if not self.testX.empty: + predictedData = estimator.predict(self.testX) + score = objClf.get_score(self.scoreParam,self.testY,predictedData) + + problemName = estimator.__class__.__name__ + runName = algoName + '_' + modelOrFeatureBased + metrices = {} + metrices[""score""] = score + try: + self.logMlflow(runName, modelParams, metrices, estimator, algoName) + except Exception as e: + self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish + pass + output_jsonobject = """" + problemName = estimator.__class__.__name__ + self.log.info('----------> Testing Score: '+str(score)) + try: + if ((estimator.__class__.__name__ == ""ABCMeta"") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): + self.log.info('-----> Model Uncertainty Not Supported') + else: + uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) + accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,""aionuq"") + self.log.info(""-------> model_confidence: ""+str(model_confidence_per)+str('%')) + self.log.info(""-------> model_uncertainty: ""+str(model_uncertainty_per)+str('%')) + except: + pass + + if(scoredetails != ''): + scoredetails += ',' + + scoredetails += '{""Model"":""'+self.modelToAlgoNames[model]+'"",""FeatureEngineering"":""'+str(modelOrFeatureBased)+'"",""Score"":'+str(score)+',""ModelUncertainty"":'+str(json.dumps(output_jsonobject))+'}' + + ''' + test_accuracy = accuracy_score(self.testY,predictedData) + test_precision = precision_score(self.testY,predictedData,average='macro') + self.log.info('---------> Test Accuracy: '+str(test_accuracy)) + self.log.info('---------> Test Precision: '+str(test_precision)) + ''' + import numpy as np + if self.MakeFP0: + self.log.info('-------- Calculate Threshold for FP Start-------') + startRange = 0.0 + endRange = 1.0 + stepsize = 0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FP',algoName) + self.log.info('-------- Calculate Threshold for FP End-------') + if self.MakeFN0: + self.log.info('-------- Calculate Threshold for FN Start-------') + startRange = 1.0 + endRange = 0.0 + stepsize = -0.01 + threshold_range = np.arange(startRange,endRange,stepsize) + threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FN',algoName) + self.log.info('-------- Calculate Threshold for FN End-------') + self.log.info('---------> Total Execution: '+str(executionTime)+'\\n') + status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) + if status: + bestScore =score + bestModel =model + bestParams=modelParams + res = ', '.join(""{!s}={!r}"".format(key,val) for (key,val) in bestParams.items()) + modelObj=eval(bestModel+'('+res+')') + bestEstimator=estimator + bestthreshold = threshold + bestrecallscore = recallscore + bestprecisionscore = precisionscore + except Exception as inst: + self.log.info('\\n < ---------- Model Execution Failed Start--------->') + self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + trainingStatus = 'Error (Exception)' + else: + trainingStatus = 'Error (HyperTunning Algo Not Supported)' + pass + self.log.info('Status:- |... ML Algorithm applied: '+modelName) + if trainingStatus.lower() == 'success': + self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n') + else: + self.log.info('Status:- |... Training Error : '+trainingStatus+'\\n') + self.log.info('---------- ClassifierModel End ---------- \\n') + if bestModel != 'None': + self.log.info('\\n------- Best Model and its parameters -------------') + self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) + self.log.info(""-------> Best Name: ""+str(bestModel)) + self.log.info(""-------> Best Score: ""+str(bestScore)) + return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore + else: + raise Exception(""Sorry, no model is trained"") + + except Exception as inst: + self.log.info( '\\n-----> ClassifierModel failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from hyperopt import fmin, tpe, hp, STATUS_OK, Trials +import numpy as np +import logging +import sys +import os + +class parametersDefine(): + def __init__(self): + self.paramDict = None + self.log = logging.getLogger('eion') + def getParamSpaceSize(self,paramDict): + size=1 + if(len(paramDict)==0): + return 0 + for keys in paramDict.keys(): + size=size*len(paramDict[keys]) + return size + def paramDefine(self, paramSpace, method): + paramDict = {} + for j in list(paramSpace.keys()): + inp = paramSpace[j] + try: + isLog = False + isLin = False + isRan = False + isList = False + isString = False + try: + # check if functions are given as input and reassign paramspace + v = paramSpace[j] + if 'logspace' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isLog = True + elif 'linspace' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isLin = True + elif 'range' in paramSpace[j]: + param" +"Space[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isRan = True + elif 'list' in paramSpace[j]: + paramSpace[j] = v[v.find(""("") + 1:v.find("")"")].replace("" "", """") + isList = True + elif '[' and ']' in paramSpace[j]: + paramSpace[j] = v.split('[')[1].split(']')[0].replace("" "", """") + isList = True + x = paramSpace[j].split(',') + except Exception as e: + if isinstance(paramSpace[j], (int, float)): + paramSpace[j] = str(paramSpace[j]) + x = [] + x.append(paramSpace[j]) + str_arg = paramSpace[j] + + # check if arguments are string + + try: + test = eval(x[0]) + except: + isString = True + + if isString: + paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x}) + else: + res = eval(str_arg) + if isLin: + y = eval('np.linspace' + str(res)) + paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) + elif isLog: + y = eval('np.logspace' + str(res)) + paramDict.update( + {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y}) + elif isRan: + y = eval('np.arange' + str(res)) + paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) + # check datatype of argument + elif isinstance(eval(x[0]), bool): + y = list(map(lambda i: eval(i), x)) + paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y}) + elif isinstance(eval(x[0]), float): + res = eval(str_arg) + if len(str_arg.split(',')) == 3 and not isList: + y = eval('np.linspace' + str(res)) + #print(y) + paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) + else: + y = list(res) if isinstance(res, tuple) else [res] + paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) + else: + res = eval(str_arg) + if len(str_arg.split(',')) == 3 and not isList: + y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res)) + else: + y = list(res) if isinstance(res, tuple) else [res] + paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) + except Exception as inst: + self.log.info('\\n-----> Parameter parsing failed!!!.' + str(inst)) + self.log.info(""The entered parameter is invalid: {""+ j +':'+ inp+'}') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + raise + return paramDict + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import sys +import logging +import json +import joblib +from pathlib import Path +import platform +from datetime import datetime as dt +import time +from pathlib import Path +import argparse +from operator import itemgetter +import re +import fitz +from io import StringIO +from nltk.tokenize import sent_tokenize +import pandas as pd +from scipy import spatial + +import urllib.request +import zipfile +import shutil + +requirements = """""" +scipy +pandas +pathlib +"""""" + +def pdf2txtInternal(pdffile): + keyword = ['Discussion','4. Discussion','DISCUSSION','Results','RESULTS','Introduction','introduction','methods', + 'method','result','results','limitation','Conclusions','conclusion','Conclusions','Acknowledgements', + 'Acknowledgement','ACKNOWLEDGMENT','ACKNOWLEDGMENTS','References','REFERENCES'] + print(pdffile) + filename1 = Path(pdffile) + csvInpClassFileName = filename1.stem + csvOutpClassFileName = ""ClassResult"" + filename1.stem +"".csv"" + styles = {} + font_counts = {} + granularity=False + doc = fitz.open(pdffile) + for i in range(1,len(doc)+1): + page = doc[i-1] + blocks = page.get_text(""dict"")[""blocks""] + for b in blocks: # iterate through the text blocks + if b['type'] == 0: # block contains text + for l in b[""lines""]: # iterate through the text lines + for s in l[""spans""]: # iterate through the text spans + if granularity: + identifier = ""{0}_{1}_{2}_{3}"".format(s['size'], s['flags'], s['font'], s['color']) + styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'], + 'color': s['color']} + else: + identifier = ""{0}"".format(s['size']) + styles[identifier] = {'size': s['size'], 'font': s['font']} + + font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count the fonts usage + + font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True) + doc.close() + if len(font_counts) < 1: + raise ValueError(""Zero discriminating fonts found!"") + + p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph) + p_size = p_style['size'] + results = [] # list of tuples that store the information as (text, font size, font name) + total_data =[] + para_data =[] + search_data =[] + only_text =[] + v={} + pdf = fitz.open(pdffile) # filePath is a string that contains the path to the pdf + for page in pdf: + dict = page.get_text(""dict"") + blocks = dict[""blocks""] + for block in blocks: + if ""lines"" in block.keys(): + spans = block['lines'] + for span in spans: + data = span['spans'] + for lines in data: + if lines['size']>=p_size: + total_data.append([[lines['text']], [lines['size'], lines['font']]]) + search_data.append([[lines['text']], [str(int(lines['size']))]]) + para_data.append([lines['text']]) #, [lines['size']]]) + + for keywords in keyword: + if keywords == lines['text']: # only store font information of a specific keyword + results.append([[lines['text']], [lines['size'], lines['font']]]) + only_text.append([lines['text']]) + pdf.close() + headers=[''] + intros =['Abstract','abstract'] + header = [''] + headers_info =[] + for line in total_data: + if results[-1][1] == line[1]: + headers_info.append(line) + headers.extend(line[0]) + if str(results[-1][0]).isupper(): + headers =([item for item in headers if re.findall(r""(?=(p_size) or float(rec)>= header_size: + check.extend(al[0]) + str3 = str1.join(check) + str3 = str1.join(check) + repl = [['- ', '-'], [' +', ' '], [' \\.', '.']] + for i in repl: + str3 = re.sub(i[0], i[1], str3) + + dataTosend=[] + data = [] + for cols in range(2,len(headers)+1): + start = headers[cols-2] #.replace(' ','') #'SUBJECTS AND METHODS' + end = headers[cols-1] + if start in ['Acknowledgements', 'Acknowledgement', 'ACKNOWLEDGMENT','ACKNOWLEDGMENTS', 'References', 'REFERENCES']: + break + if start=='': #.replace(' ','') + res=(str2[str2.find(start)+len(start):str2.rfind(end)]) + data.append(['Abstract', res]) + tmp='Abstract' + ':'+ ' ' + res + dataTosend.append(tmp) + else: + res=(str2[str2.rfind(start)+len(start):str2.rfind(end)]) + data.append([start, res]) + tmp=start + ':'+ ' ' + res + dataTosend.append(tmp) + + tokens = [] # sent tokenization and csv file creation updated + for idx in range(len(data)): + head = data[idx][0] + para = data[idx][1] + exp = sent_tokenize(para) + for val in exp: + tokens.append([head, val]) + + sent_data = [] + for head, sent in tokens: + break_sent = r'\\. [A-Z]|\\.[A-Z]' # break senteance if 2 or more in a same column. + match = re.findall(break_sent, sent) + + if len(match) >= 1: + for i in range (len(match)): + idx, _ = re.search(break_sent, sent).span() + sent_data.append( sent[:int(idx)+1].strip()) + sent = sent[int(idx)+1:].strip() + + if (re.search('^[a-z]|^[,;]', sent)): # add incomplete sentence + if sent_data != []: + last_val = sent_data.pop() + new_val = last_val[1] +' '+ sent + sent_data.append( new_val) + else: + sent_data.append( sent) + return sent_data + +def get_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def extract_data(location): + files = [x for x in Path(location).iterdir() if x.suffix == '.pdf'] + if not files: + raise ValueError(f'no pdf file found in directory {location}') + sentences = [] + for file in files: + data=pdf2txtInternal(file) + sentences.append(data) + return [item for sublist in sentences for item in sublist] + +def keyWordGeneration( keywords,deploy_loc, pretrained_loc): + keywords = keywords.split() + noOfKeyword = len(keywords) + embeddings = {} + word = '' + print(pretrained_loc) + with open(pretrained_loc, 'r', encoding=""utf8"") as f: + header = f.readline() + header = header.split(' ') + vocab_size = int(header[0]) + embed_size = int(header[1]) + for i in range(vocab_size): + data = f.readline().strip().split(' ') + word = data[0] + embeddings[word] = [float(x) for x in data[1:]] + readData=pd.DataFrame([],columns=['Keyword']) + for i in range(noOfKeyword): + neighbours = (sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6] + readData = readData.append({'Keyword': keywords[i]}, ignore_index=True) + for j in range(len(neighbours)): + readData = readData.append({'Keyword': neighbours[j]}, ignore_index=True) + readData.to_csv( Path(deploy_loc)/""keywordDataBase.csv"",encoding='utf-8',index=False) + return set( readData['Key" +"word']) + +def dataClassifyWithKw(sentences, keywords): + df = pd.DataFrame(sentences, columns=['File']) + pattern = '|'.join(keywords) + df['Label'] = df.File.str.contains(pattern) + return df + +def to_dataframe(data_loc, keywords, pretrained_type, embedding_size=300, deploy_loc=None, train=True): + pretrained_loc = checkAndDownloadPretrainedModel(pretrained_type, embedding_size) + sentences = extract_data(data_loc) + if train: + keywords = keyWordGeneration( keywords,deploy_loc, pretrained_loc) + df = dataClassifyWithKw(sentences, keywords) + return df + +def get_pretrained_model_path(): + from AION.appfe.appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath + +def checkAndDownloadPretrainedModel(preTrainedModel, embedding_size=300): + + models = {'glove':{50:'glove.6B.50d.w2vformat.txt',100:'glove.6B.100d.w2vformat.txt',200:'glove.6B.200d.w2vformat.txt',300:'glove.6B.300d.w2vformat.txt'}, 'fasttext':{300:'wiki-news-300d-1M.vec'}} + supported_models = [x for y in models.values() for x in y.values()] + embedding_sizes = {x:y.keys() for x,y in models.items()} + if embedding_size not in embedding_sizes[preTrainedModel]: + raise ValueError(f""Embedding size '{embedding_size}' not supported for {preTrainedModel}"") + selected_model = models[preTrainedModel.lower()][embedding_size] + modelsPath = get_pretrained_model_path() + p = Path(modelsPath).glob('**/*') + modelsDownloaded = [x.name for x in p if x.name in supported_models] + local_file_path = None + if selected_model not in modelsDownloaded: + if preTrainedModel.lower() == ""glove"": + try: + location = Path(modelsPath) + local_file_path = location/f""glove.6B.{embedding_size}d.w2vformat.txt"" + file_test, header_test = urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.{embedding_size}d.w2vformat.txt', local_file_path) + except Exception as e: + raise ValueError(""Error: unable to download glove pretrained model, please try again or download it manually and placed it at {}. "".format(location)+str(e)) + + elif preTrainedModel.lower() == ""fasttext"": + try: + location = Path(modelsPath) + local_file_path = location/""wiki-news-300d-1M.vec.zip"" + url = 'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/wiki-news-300d-1M.vec.zip' + file_test, header_test = urllib.request.urlretrieve(url, local_file_path) + with zipfile.ZipFile(local_file_path) as zip_ref: + zip_ref.extractall(location) + Path(local_file_path).unlink() + except Exception as e: + raise ValueError(""Error: unable to download fastText pretrained model, please try again or download it manually and placed it at {}. "".format(location)+str(e)) + return Path(modelsPath)/selected_model + +def get_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_params(profiler): + pretrained_model = get_true_option(profiler.get('textConversionMethod', {}), 'Glove') + embedding_size = get_true_option(profiler['embeddingSize'][pretrained_model], 50) + pretrained_model = pretrained_model.lower() + if pretrained_model == 'fasttext': + embedding_size = 300 + elif pretrained_model == 'glove': + sizes = {'default':300, '50d':50, '100d':100,'200d':200, '300d':300} + embedding_size = sizes[embedding_size] + keywords = profiler['KeyWords'] + return ""delhi dialysis"", pretrained_model, embedding_size + +def deploy(deploy_path, pretrained_model, embedding_size, output_columns,model_file, bert_length): + from AION.mlac.ml.core.imports import importModule + def create_predict(pretrained_model, embedding_size): + importer = importModule() + common_importes = [ + {'module': 'sys', 'mod_from': None, 'mod_as': None}, + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'} + ] + for mod in common_importes: + importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + local_importes = [ + {'module': 'selector', 'mod_from': 'script.selector', 'mod_as': None}, + {'module': 'inputprofiler', 'mod_from': 'script.inputprofiler', 'mod_as': None}, + {'module': 'trained_model', 'mod_from': 'script.trained_model', 'mod_as': None}, + {'module': 'summarize', 'mod_from': None, 'mod_as': None} + ] + for mod in local_importes: + importer.addLocalModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + + text = f"""""" +def predict(data): + try: + dataLocation = Path(data) + if not dataLocation.is_dir(): + raise ValueError('Input should be a valid directory') + keywords_file = Path(__file__).parent/'keywordDataBase.csv' + if not keywords_file.exists(): + raise ValueError('keywordDataBase.csv is missing in trained model output') + keywords_df = pd.read_csv(keywords_file) + if 'Keyword' not in keywords_df.columns: + raise ValueError('keywordDataBase.csv file in output folder is corrupt') + pretrained_type = '{pretrained_model.lower()}' + embedding_sz = {embedding_size} + keywords = keywords_df['Keyword'].tolist() + df = summarize.to_dataframe(dataLocation, keywords, pretrained_type, embedding_sz, train=False) + df0 = df.copy() + profilerobj = inputprofiler() + df = profilerobj.apply_profiler(df) + selectobj = selector() + df = selectobj.apply_selector(df) + modelobj = trained_model() + output = modelobj.predict(df,df0) + outputjson = {{""status"":""SUCCESS"",""data"":output}} + print(""predictions:"",outputjson) + except KeyError as e: + output = {{""status"":""FAIL"",""message"":str(e).strip('""')}} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + output = {{""status"":""FAIL"",""message"":str(e).strip('""')}} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = predict(sys.argv[1]) + """""" + code = importer.getCode() + code += text + return code + + def create_profiler(output_columns): + importer = importModule() + common_importes = [ + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'} + ] + for mod in common_importes: + importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + + text = f"""""" +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + if self.model: + df = self.model.transform(df) + if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns={output_columns}) + else: + df = pd.DataFrame(df, columns={output_columns}) + return(df) + """""" + code = importer.getCode() + code += text + return code + + def create_selector(output_columns): + importer = importModule() + common_importes = [ + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'} + ] + for mod in common_importes: + importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + + text = f"""""" +class selector(object): + def apply_selector(self,df): + df = df[{output_columns}] + return(df) + """""" + code = importer.getCode() + code += text + return code + + def create_train(model_file, bert_length): + importer = importModule() + common_importes = [ + {'module': 'os', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, + {'module': 'Summarizer', 'mod_from': 'summarizer', 'mod_as': None } + ] + for mod in common_importes: + importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) + + text = f"""""" +class trained_model(object): + def __init__(self): + self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{model_file}')) + + def predict(self, X, df_org): + X = X.astype(np.float32) + df_org['predicted'] = pd.DataFrame(self.model.predict(X)) + textToSum="""" + for i in df_org.index: + if (df_org['predicted'][i] or df_org['Label'][i]) : + textToSum=textToSum + "" "" + df_org[""File""][i] + bert_model = Summarizer() + bert_summary=bert_model(textToSum, min_length={bert_length}) + return bert_summary +"""""" + code = importer.getCode() + code += text + return code + deploy_path = Path(deploy_path) + aion_prediction = deploy_path/'aion_predict.py' + profiler_file = deploy_path/'script'/'inputprofiler.py' + selector_file = deploy_path/'script'/'selector.py' + trainer_file = deploy_path/'script'/'trained_model.py' + + with open(aion_prediction, 'w') as f: + f.write(create_predict(pretrained_model, embedding_size)) + + with open(profiler_file, 'w') as f: + f.write(create_profiler(output_columns)) + + with open(selector_file, 'w') as f: + f.write(create_selector(output_columns)) + + with open(trainer_file, 'w') as f: + f.write(create_train(model_file, bert_length)) + + cwf = Path(__file__) + shutil.copy(cwf, deploy_path/cwf.name) + +# require dataLocation for reading files +# require deployLocation for saving keywords +# require pretrained model location +# require pretrained model type +# require keywwords +if __name__ == '__main__': + dataLocation = r'C:\\Harish\\aion\\task\\task\\summarization\\reference\\pdfs' + deployLocation = r'C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\uses' + pretrained_loc = r""C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\PreTrainedModels\\TextProcessing"" + pretrained_type = 'glove' + keywords = 'delhi dialysis' + + data = to_dataframe(dataLocation, keywords, pretrained_type,300, deployLocation, train=True) + print(data) + data.to_csv(Path(deployLocation)/'output.csv', index=False) # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""""Code for generating the feature_statistics proto from generic data. + +The proto is used as input for the Overview visualization. +"""""" + +from facets_overview.base_generic_feature_statistics_generator import BaseGenericFeatureStatisticsGenerator +import facets_overview.feature_statistics_pb2 as fs + + +class GenericFeatureStatisticsGenerator(BaseGenericFeatureStatisticsGenerator): + """"""Generator of stats proto from generic data."""""" + + def __init__(self): + BaseGenericFeatureStatisticsGenerator.__init__( + self" +", fs.FeatureNameStatistics, fs.DatasetFeatureStatisticsList, + fs.Histogram) + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: feature_statistics.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='feature_statistics.proto', + package='featureStatistics', + syntax='proto3', + serialized_pb=_b('\\n\\x18\\x66\\x65\\x61ture_statistics.proto\\x12\\x11\\x66\\x65\\x61tureStatistics\\""]\\n\\x1c\\x44\\x61tasetFeatureStatisticsList\\x12=\\n\\x08\\x64\\x61tasets\\x18\\x01 \\x03(\\x0b\\x32+.featureStatistics.DatasetFeatureStatistics\\""\\x99\\x01\\n\\x18\\x44\\x61tasetFeatureStatistics\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x14\\n\\x0cnum_examples\\x18\\x02 \\x01(\\x04\\x12\\x1d\\n\\x15weighted_num_examples\\x18\\x04 \\x01(\\x01\\x12:\\n\\x08\\x66\\x65\\x61tures\\x18\\x03 \\x03(\\x0b\\x32(.featureStatistics.FeatureNameStatistics\\""\\x8b\\x03\\n\\x15\\x46\\x65\\x61tureNameStatistics\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12;\\n\\x04type\\x18\\x02 \\x01(\\x0e\\x32-.featureStatistics.FeatureNameStatistics.Type\\x12\\x39\\n\\tnum_stats\\x18\\x03 \\x01(\\x0b\\x32$.featureStatistics.NumericStatisticsH\\x00\\x12;\\n\\x0cstring_stats\\x18\\x04 \\x01(\\x0b\\x32#.featureStatistics.StringStatisticsH\\x00\\x12\\x39\\n\\x0b\\x62ytes_stats\\x18\\x05 \\x01(\\x0b\\x32\\"".featureStatistics.BytesStatisticsH\\x00\\x12\\x38\\n\\x0c\\x63ustom_stats\\x18\\x06 \\x03(\\x0b\\x32\\"".featureStatistics.CustomStatistic\\""1\\n\\x04Type\\x12\\x07\\n\\x03INT\\x10\\x00\\x12\\t\\n\\x05\\x46LOAT\\x10\\x01\\x12\\n\\n\\x06STRING\\x10\\x02\\x12\\t\\n\\x05\\x42YTES\\x10\\x03\\x42\\x07\\n\\x05stats\\""x\\n\\x18WeightedCommonStatistics\\x12\\x17\\n\\x0fnum_non_missing\\x18\\x01 \\x01(\\x01\\x12\\x13\\n\\x0bnum_missing\\x18\\x02 \\x01(\\x01\\x12\\x16\\n\\x0e\\x61vg_num_values\\x18\\x03 \\x01(\\x01\\x12\\x16\\n\\x0etot_num_values\\x18\\x04 \\x01(\\x01\\""w\\n\\x0f\\x43ustomStatistic\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x03num\\x18\\x02 \\x01(\\x01H\\x00\\x12\\r\\n\\x03str\\x18\\x03 \\x01(\\tH\\x00\\x12\\x31\\n\\thistogram\\x18\\x04 \\x01(\\x0b\\x32\\x1c.featureStatistics.HistogramH\\x00\\x42\\x05\\n\\x03val\\""\\xaa\\x02\\n\\x11NumericStatistics\\x12\\x39\\n\\x0c\\x63ommon_stats\\x18\\x01 \\x01(\\x0b\\x32#.featureStatistics.CommonStatistics\\x12\\x0c\\n\\x04mean\\x18\\x02 \\x01(\\x01\\x12\\x0f\\n\\x07std_dev\\x18\\x03 \\x01(\\x01\\x12\\x11\\n\\tnum_zeros\\x18\\x04 \\x01(\\x04\\x12\\x0b\\n\\x03min\\x18\\x05 \\x01(\\x01\\x12\\x0e\\n\\x06median\\x18\\x06 \\x01(\\x01\\x12\\x0b\\n\\x03max\\x18\\x07 \\x01(\\x01\\x12\\x30\\n\\nhistograms\\x18\\x08 \\x03(\\x0b\\x32\\x1c.featureStatistics.Histogram\\x12L\\n\\x16weighted_numeric_stats\\x18\\t \\x01(\\x0b\\x32,.featureStatistics.WeightedNumericStatistics\\""\\x8c\\x03\\n\\x10StringStatistics\\x12\\x39\\n\\x0c\\x63ommon_stats\\x18\\x01 \\x01(\\x0b\\x32#.featureStatistics.CommonStatistics\\x12\\x0e\\n\\x06unique\\x18\\x02 \\x01(\\x04\\x12\\x44\\n\\ntop_values\\x18\\x03 \\x03(\\x0b\\x32\\x30.featureStatistics.StringStatistics.FreqAndValue\\x12\\x12\\n\\navg_length\\x18\\x04 \\x01(\\x02\\x12\\x38\\n\\x0erank_histogram\\x18\\x05 \\x01(\\x0b\\x32 .featureStatistics.RankHistogram\\x12J\\n\\x15weighted_string_stats\\x18\\x06 \\x01(\\x0b\\x32+.featureStatistics.WeightedStringStatistics\\x1aM\\n\\x0c\\x46reqAndValue\\x12\\x1b\\n\\x0f\\x64\\x65precated_freq\\x18\\x01 \\x01(\\x04\\x42\\x02\\x18\\x01\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t\\x12\\x11\\n\\tfrequency\\x18\\x03 \\x01(\\x01\\""|\\n\\x19WeightedNumericStatistics\\x12\\x0c\\n\\x04mean\\x18\\x01 \\x01(\\x01\\x12\\x0f\\n\\x07std_dev\\x18\\x02 \\x01(\\x01\\x12\\x0e\\n\\x06median\\x18\\x03 \\x01(\\x01\\x12\\x30\\n\\nhistograms\\x18\\x04 \\x03(\\x0b\\x32\\x1c.featureStatistics.Histogram\\""\\x9a\\x01\\n\\x18WeightedStringStatistics\\x12\\x44\\n\\ntop_values\\x18\\x01 \\x03(\\x0b\\x32\\x30.featureStatistics.StringStatistics.FreqAndValue\\x12\\x38\\n\\x0erank_histogram\\x18\\x02 \\x01(\\x0b\\x32 .featureStatistics.RankHistogram\\""\\xa1\\x01\\n\\x0f\\x42ytesStatistics\\x12\\x39\\n\\x0c\\x63ommon_stats\\x18\\x01 \\x01(\\x0b\\x32#.featureStatistics.CommonStatistics\\x12\\x0e\\n\\x06unique\\x18\\x02 \\x01(\\x04\\x12\\x15\\n\\ravg_num_bytes\\x18\\x03 \\x01(\\x02\\x12\\x15\\n\\rmin_num_bytes\\x18\\x04 \\x01(\\x02\\x12\\x15\\n\\rmax_num_bytes\\x18\\x05 \\x01(\\x02\\""\\xed\\x02\\n\\x10\\x43ommonStatistics\\x12\\x17\\n\\x0fnum_non_missing\\x18\\x01 \\x01(\\x04\\x12\\x13\\n\\x0bnum_missing\\x18\\x02 \\x01(\\x04\\x12\\x16\\n\\x0emin_num_values\\x18\\x03 \\x01(\\x04\\x12\\x16\\n\\x0emax_num_values\\x18\\x04 \\x01(\\x04\\x12\\x16\\n\\x0e\\x61vg_num_values\\x18\\x05 \\x01(\\x02\\x12\\x16\\n\\x0etot_num_values\\x18\\x08 \\x01(\\x04\\x12:\\n\\x14num_values_histogram\\x18\\x06 \\x01(\\x0b\\x32\\x1c.featureStatistics.Histogram\\x12J\\n\\x15weighted_common_stats\\x18\\x07 \\x01(\\x0b\\x32+.featureStatistics.WeightedCommonStatistics\\x12\\x43\\n\\x1d\\x66\\x65\\x61ture_list_length_histogram\\x18\\t \\x01(\\x0b\\x32\\x1c.featureStatistics.Histogram\\""\\xc4\\x02\\n\\tHistogram\\x12\\x0f\\n\\x07num_nan\\x18\\x01 \\x01(\\x04\\x12\\x15\\n\\rnum_undefined\\x18\\x02 \\x01(\\x04\\x12\\x34\\n\\x07\\x62uckets\\x18\\x03 \\x03(\\x0b\\x32#.featureStatistics.Histogram.Bucket\\x12\\x38\\n\\x04type\\x18\\x04 \\x01(\\x0e\\x32*.featureStatistics.Histogram.HistogramType\\x12\\x0c\\n\\x04name\\x18\\x05 \\x01(\\t\\x1a\\x63\\n\\x06\\x42ucket\\x12\\x11\\n\\tlow_value\\x18\\x01 \\x01(\\x01\\x12\\x12\\n\\nhigh_value\\x18\\x02 \\x01(\\x01\\x12\\x1c\\n\\x10\\x64\\x65precated_count\\x18\\x03 \\x01(\\x04\\x42\\x02\\x18\\x01\\x12\\x14\\n\\x0csample_count\\x18\\x04 \\x01(\\x01\\"",\\n\\rHistogramType\\x12\\x0c\\n\\x08STANDARD\\x10\\x00\\x12\\r\\n\\tQUANTILES\\x10\\x01\\""\\xc9\\x01\\n\\rRankHistogram\\x12\\x38\\n\\x07\\x62uckets\\x18\\x01 \\x03(\\x0b\\x32\\'.featureStatistics.RankHistogram.Bucket\\x12\\x0c\\n\\x04name\\x18\\x02 \\x01(\\t\\x1ap\\n\\x06\\x42ucket\\x12\\x10\\n\\x08low_rank\\x18\\x01 \\x01(\\x04\\x12\\x11\\n\\thigh_rank\\x18\\x02 \\x01(\\x04\\x12\\x1c\\n\\x10\\x64\\x65precated_count\\x18\\x03 \\x01(\\x04\\x42\\x02\\x18\\x01\\x12\\r\\n\\x05label\\x18\\x04 \\x01(\\t\\x12\\x14\\n\\x0csample_count\\x18\\x05 \\x01(\\x01\\x62\\x06proto3') +) + + + +_FEATURENAMESTATISTICS_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='featureStatistics.FeatureNameStatistics.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='INT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='STRING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BYTES', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, +" +" options=None, + serialized_start=636, + serialized_end=685, +) +_sym_db.RegisterEnumDescriptor(_FEATURENAMESTATISTICS_TYPE) + +_HISTOGRAM_HISTOGRAMTYPE = _descriptor.EnumDescriptor( + name='HistogramType', + full_name='featureStatistics.Histogram.HistogramType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STANDARD', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='QUANTILES', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=2735, + serialized_end=2779, +) +_sym_db.RegisterEnumDescriptor(_HISTOGRAM_HISTOGRAMTYPE) + + +_DATASETFEATURESTATISTICSLIST = _descriptor.Descriptor( + name='DatasetFeatureStatisticsList', + full_name='featureStatistics.DatasetFeatureStatisticsList', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='datasets', full_name='featureStatistics.DatasetFeatureStatisticsList.datasets', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=47, + serialized_end=140, +) + + +_DATASETFEATURESTATISTICS = _descriptor.Descriptor( + name='DatasetFeatureStatistics', + full_name='featureStatistics.DatasetFeatureStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='featureStatistics.DatasetFeatureStatistics.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_examples', full_name='featureStatistics.DatasetFeatureStatistics.num_examples', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weighted_num_examples', full_name='featureStatistics.DatasetFeatureStatistics.weighted_num_examples', index=2, + number=4, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='features', full_name='featureStatistics.DatasetFeatureStatistics.features', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=143, + serialized_end=296, +) + + +_FEATURENAMESTATISTICS = _descriptor.Descriptor( + name='FeatureNameStatistics', + full_name='featureStatistics.FeatureNameStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='featureStatistics.FeatureNameStatistics.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='featureStatistics.FeatureNameStatistics.type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_stats', full_name='featureStatistics.FeatureNameStatistics.num_stats', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='string_stats', full_name='featureStatistics.FeatureNameStatistics.string_stats', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bytes_stats', full_name='featureStatistics.FeatureNameStatistics.bytes_stats', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='custom_stats', full_name='featureStatistics.FeatureNameStatistics.custom_stats', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FEATURENAMESTATISTICS_TYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='stats', full_name='featureStatistics.FeatureNameStatistics.stats', + index=0, containing_type=None, fields=[]), + ], + serialized_start=299, + serialized_end=694, +) + + +_WEIGHTEDCOMMONSTATISTICS = _descriptor.Descriptor( + name='WeightedCommonStatistics', + full_name='featureStatistics.WeightedCommonStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='num_non_missing', full_name='featureStatistics.WeightedCommonStatistics.num_non_missing', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_missing', full_name='featureStatistics.WeightedCommonStatistics.num_missing', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='avg_num_values', full_name='featureStatistics.WeightedCommonStatistics.avg_num_values', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tot_num_values', full_name='featureStatistics.WeightedCommonStatistics.tot_num_values', index=3, + number=4, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=696, + serialized_end=816, +) + + +_CUSTOMSTATISTIC = _descriptor.Descriptor( + name='CustomStatistic', + full_name='featureStatistics.CustomStatistic', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='featureStatistics.CustomStatistic.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num', full_name='featureStatistics.CustomStatistic.num', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='str', full_name='featureStatistics.CustomStatistic.str', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='histogram', full_name='featureStatistics.CustomStatistic.histogram', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='val', full_name='featureStatistics.CustomStatistic.val', + index=0, containing_type=None, fields=[]), + ], + serialized_start=818, + serialized_end=937, +) + + +_NUMERICSTATISTICS = _descriptor.Descriptor( + name='NumericStatistics', + full_name='featureStatistics.NumericStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='common_stats', full_name='featureStatistics.NumericStatistics.common_stats', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='featureStatistics.NumericStatistics.mean', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='std_dev', full_name='featureStatistics.NumericStatistics.std_dev', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_zeros', full_name='featureStatistics.NumericStatistics.num_zeros', index=3, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min', full_name='featureStatistics.NumericStatistics.min', index=4, + number=5, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='median', full_name='featureStatistics.NumericStatistics.median', index=5" +", + number=6, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max', full_name='featureStatistics.NumericStatistics.max', index=6, + number=7, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='histograms', full_name='featureStatistics.NumericStatistics.histograms', index=7, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weighted_numeric_stats', full_name='featureStatistics.NumericStatistics.weighted_numeric_stats', index=8, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=940, + serialized_end=1238, +) + + +_STRINGSTATISTICS_FREQANDVALUE = _descriptor.Descriptor( + name='FreqAndValue', + full_name='featureStatistics.StringStatistics.FreqAndValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated_freq', full_name='featureStatistics.StringStatistics.FreqAndValue.deprecated_freq', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))), + _descriptor.FieldDescriptor( + name='value', full_name='featureStatistics.StringStatistics.FreqAndValue.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='frequency', full_name='featureStatistics.StringStatistics.FreqAndValue.frequency', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1560, + serialized_end=1637, +) + +_STRINGSTATISTICS = _descriptor.Descriptor( + name='StringStatistics', + full_name='featureStatistics.StringStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='common_stats', full_name='featureStatistics.StringStatistics.common_stats', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unique', full_name='featureStatistics.StringStatistics.unique', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='top_values', full_name='featureStatistics.StringStatistics.top_values', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='avg_length', full_name='featureStatistics.StringStatistics.avg_length', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rank_histogram', full_name='featureStatistics.StringStatistics.rank_histogram', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weighted_string_stats', full_name='featureStatistics.StringStatistics.weighted_string_stats', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_STRINGSTATISTICS_FREQANDVALUE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1241, + serialized_end=1637, +) + + +_WEIGHTEDNUMERICSTATISTICS = _descriptor.Descriptor( + name='WeightedNumericStatistics', + full_name='featureStatistics.WeightedNumericStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mean', full_name='featureStatistics.WeightedNumericStatistics.mean', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='std_dev', full_name='featureStatistics.WeightedNumericStatistics.std_dev', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='median', full_name='featureStatistics.WeightedNumericStatistics.median', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='histograms', full_name='featureStatistics.WeightedNumericStatistics.histograms', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1639, + serialized_end=1763, +) + + +_WEIGHTEDSTRINGSTATISTICS = _descriptor.Descriptor( + name='WeightedStringStatistics', + full_name='featureStatistics.WeightedStringStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='top_values', full_name='featureStatistics.WeightedStringStatistics.top_values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rank_histogram', full_name='featureStatistics.WeightedStringStatistics.rank_histogram', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1766, + serialized_end=1920, +) + + +_BYTESSTATISTICS = _descriptor.Descriptor( + name='BytesStatistics', + full_name='featureStatistics.BytesStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='common_stats', full_name='featureStatistics.BytesStatistics.common_stats', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unique', full_name='featureStatistics.BytesStatistics.unique', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='avg_num_bytes', full_name='featureStatistics.BytesStatistics.avg_num_bytes', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min_num_bytes', full_name='featureStatistics.BytesStatistics.min_num_bytes', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_num_bytes', full_name='featureStatistics.BytesStatistics.max_num_bytes', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1923, + serialized_end=2084, +) + + +_COMMONSTATISTICS = _descriptor.Descriptor( + name='CommonStatistics', + full_name='featureStatistics.CommonStatistics', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='num_non_missing', full_name='featureStatistics.CommonStatistics.num_non_missing', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_missing', full_name='featureStatistics.CommonStatistics.num_missing', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min_num_values', full_name='featureStatistics.CommonStatistics.min_num_values', index=2, + number=3, type=4, cpp_type=4" +", label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_num_values', full_name='featureStatistics.CommonStatistics.max_num_values', index=3, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='avg_num_values', full_name='featureStatistics.CommonStatistics.avg_num_values', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tot_num_values', full_name='featureStatistics.CommonStatistics.tot_num_values', index=5, + number=8, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_values_histogram', full_name='featureStatistics.CommonStatistics.num_values_histogram', index=6, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weighted_common_stats', full_name='featureStatistics.CommonStatistics.weighted_common_stats', index=7, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='feature_list_length_histogram', full_name='featureStatistics.CommonStatistics.feature_list_length_histogram', index=8, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2087, + serialized_end=2452, +) + + +_HISTOGRAM_BUCKET = _descriptor.Descriptor( + name='Bucket', + full_name='featureStatistics.Histogram.Bucket', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='low_value', full_name='featureStatistics.Histogram.Bucket.low_value', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='high_value', full_name='featureStatistics.Histogram.Bucket.high_value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='deprecated_count', full_name='featureStatistics.Histogram.Bucket.deprecated_count', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))), + _descriptor.FieldDescriptor( + name='sample_count', full_name='featureStatistics.Histogram.Bucket.sample_count', index=3, + number=4, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2634, + serialized_end=2733, +) + +_HISTOGRAM = _descriptor.Descriptor( + name='Histogram', + full_name='featureStatistics.Histogram', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='num_nan', full_name='featureStatistics.Histogram.num_nan', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_undefined', full_name='featureStatistics.Histogram.num_undefined', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='buckets', full_name='featureStatistics.Histogram.buckets', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='featureStatistics.Histogram.type', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='name', full_name='featureStatistics.Histogram.name', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_HISTOGRAM_BUCKET, ], + enum_types=[ + _HISTOGRAM_HISTOGRAMTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2455, + serialized_end=2779, +) + + +_RANKHISTOGRAM_BUCKET = _descriptor.Descriptor( + name='Bucket', + full_name='featureStatistics.RankHistogram.Bucket', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='low_rank', full_name='featureStatistics.RankHistogram.Bucket.low_rank', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='high_rank', full_name='featureStatistics.RankHistogram.Bucket.high_rank', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='deprecated_count', full_name='featureStatistics.RankHistogram.Bucket.deprecated_count', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))), + _descriptor.FieldDescriptor( + name='label', full_name='featureStatistics.RankHistogram.Bucket.label', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sample_count', full_name='featureStatistics.RankHistogram.Bucket.sample_count', index=4, + number=5, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2871, + serialized_end=2983, +) + +_RANKHISTOGRAM = _descriptor.Descriptor( + name='RankHistogram', + full_name='featureStatistics.RankHistogram', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='buckets', full_name='featureStatistics.RankHistogram.buckets', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='name', full_name='featureStatistics.RankHistogram.name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("""").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_RANKHISTOGRAM_BUCKET, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2782, + serialized_end=2983, +) + +_DATASETFEATURESTATISTICSLIST.fields_by_name['datasets'].message_type = _DATASETFEATURESTATISTICS +_DATASETFEATURESTATISTICS.fields_by_name['features'].message_type = _FEATURENAMESTATISTICS +_FEATURENAMESTATISTICS.fields_by_name['type'].enum_type = _FEATURENAMESTATISTICS_TYPE +_FEATURENAMESTATISTICS.fields_by_name['num_stats'].message_type = _NUMERICSTATISTICS +_FEATURENAMESTATISTICS.fields_by_name['string_stats'].message_type = _STRINGSTATISTICS +_FEATURENAMESTATISTICS.fields_by_name['bytes_stats'].message_type = _BYTESSTATISTICS +_FEATURENAMESTATISTICS.fields_by_name['custom_stats'].message_type = _CUSTOMSTATISTIC +_FEATURENAMESTATISTICS_TYPE.containing_type = _FEATURENAMESTATISTICS +_FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append( + _FEATURENAMESTATISTICS.fields_by_name['num_stats']) +_FEATURENAMESTATISTICS.fields_by_name['num_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats'] +_FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append( + _FEATURENAMESTATISTICS.fields_by_name['string_stats']) +_FEATURENAMESTATISTICS.fields_by_name['string_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats'] +_FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append( + _FEATURENAMESTATISTICS.fields_by_name['bytes_stats']) +_FEATURENAMESTATISTICS.fields_by_name['bytes_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats'] +_CUSTOMSTAT" +"ISTIC.fields_by_name['histogram'].message_type = _HISTOGRAM +_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append( + _CUSTOMSTATISTIC.fields_by_name['num']) +_CUSTOMSTATISTIC.fields_by_name['num'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val'] +_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append( + _CUSTOMSTATISTIC.fields_by_name['str']) +_CUSTOMSTATISTIC.fields_by_name['str'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val'] +_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append( + _CUSTOMSTATISTIC.fields_by_name['histogram']) +_CUSTOMSTATISTIC.fields_by_name['histogram'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val'] +_NUMERICSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS +_NUMERICSTATISTICS.fields_by_name['histograms'].message_type = _HISTOGRAM +_NUMERICSTATISTICS.fields_by_name['weighted_numeric_stats'].message_type = _WEIGHTEDNUMERICSTATISTICS +_STRINGSTATISTICS_FREQANDVALUE.containing_type = _STRINGSTATISTICS +_STRINGSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS +_STRINGSTATISTICS.fields_by_name['top_values'].message_type = _STRINGSTATISTICS_FREQANDVALUE +_STRINGSTATISTICS.fields_by_name['rank_histogram'].message_type = _RANKHISTOGRAM +_STRINGSTATISTICS.fields_by_name['weighted_string_stats'].message_type = _WEIGHTEDSTRINGSTATISTICS +_WEIGHTEDNUMERICSTATISTICS.fields_by_name['histograms'].message_type = _HISTOGRAM +_WEIGHTEDSTRINGSTATISTICS.fields_by_name['top_values'].message_type = _STRINGSTATISTICS_FREQANDVALUE +_WEIGHTEDSTRINGSTATISTICS.fields_by_name['rank_histogram'].message_type = _RANKHISTOGRAM +_BYTESSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS +_COMMONSTATISTICS.fields_by_name['num_values_histogram'].message_type = _HISTOGRAM +_COMMONSTATISTICS.fields_by_name['weighted_common_stats'].message_type = _WEIGHTEDCOMMONSTATISTICS +_COMMONSTATISTICS.fields_by_name['feature_list_length_histogram'].message_type = _HISTOGRAM +_HISTOGRAM_BUCKET.containing_type = _HISTOGRAM +_HISTOGRAM.fields_by_name['buckets'].message_type = _HISTOGRAM_BUCKET +_HISTOGRAM.fields_by_name['type'].enum_type = _HISTOGRAM_HISTOGRAMTYPE +_HISTOGRAM_HISTOGRAMTYPE.containing_type = _HISTOGRAM +_RANKHISTOGRAM_BUCKET.containing_type = _RANKHISTOGRAM +_RANKHISTOGRAM.fields_by_name['buckets'].message_type = _RANKHISTOGRAM_BUCKET +DESCRIPTOR.message_types_by_name['DatasetFeatureStatisticsList'] = _DATASETFEATURESTATISTICSLIST +DESCRIPTOR.message_types_by_name['DatasetFeatureStatistics'] = _DATASETFEATURESTATISTICS +DESCRIPTOR.message_types_by_name['FeatureNameStatistics'] = _FEATURENAMESTATISTICS +DESCRIPTOR.message_types_by_name['WeightedCommonStatistics'] = _WEIGHTEDCOMMONSTATISTICS +DESCRIPTOR.message_types_by_name['CustomStatistic'] = _CUSTOMSTATISTIC +DESCRIPTOR.message_types_by_name['NumericStatistics'] = _NUMERICSTATISTICS +DESCRIPTOR.message_types_by_name['StringStatistics'] = _STRINGSTATISTICS +DESCRIPTOR.message_types_by_name['WeightedNumericStatistics'] = _WEIGHTEDNUMERICSTATISTICS +DESCRIPTOR.message_types_by_name['WeightedStringStatistics'] = _WEIGHTEDSTRINGSTATISTICS +DESCRIPTOR.message_types_by_name['BytesStatistics'] = _BYTESSTATISTICS +DESCRIPTOR.message_types_by_name['CommonStatistics'] = _COMMONSTATISTICS +DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM +DESCRIPTOR.message_types_by_name['RankHistogram'] = _RANKHISTOGRAM +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +DatasetFeatureStatisticsList = _reflection.GeneratedProtocolMessageType('DatasetFeatureStatisticsList', (_message.Message,), dict( + DESCRIPTOR = _DATASETFEATURESTATISTICSLIST, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.DatasetFeatureStatisticsList) + )) +_sym_db.RegisterMessage(DatasetFeatureStatisticsList) + +DatasetFeatureStatistics = _reflection.GeneratedProtocolMessageType('DatasetFeatureStatistics', (_message.Message,), dict( + DESCRIPTOR = _DATASETFEATURESTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.DatasetFeatureStatistics) + )) +_sym_db.RegisterMessage(DatasetFeatureStatistics) + +FeatureNameStatistics = _reflection.GeneratedProtocolMessageType('FeatureNameStatistics', (_message.Message,), dict( + DESCRIPTOR = _FEATURENAMESTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.FeatureNameStatistics) + )) +_sym_db.RegisterMessage(FeatureNameStatistics) + +WeightedCommonStatistics = _reflection.GeneratedProtocolMessageType('WeightedCommonStatistics', (_message.Message,), dict( + DESCRIPTOR = _WEIGHTEDCOMMONSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.WeightedCommonStatistics) + )) +_sym_db.RegisterMessage(WeightedCommonStatistics) + +CustomStatistic = _reflection.GeneratedProtocolMessageType('CustomStatistic', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMSTATISTIC, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.CustomStatistic) + )) +_sym_db.RegisterMessage(CustomStatistic) + +NumericStatistics = _reflection.GeneratedProtocolMessageType('NumericStatistics', (_message.Message,), dict( + DESCRIPTOR = _NUMERICSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.NumericStatistics) + )) +_sym_db.RegisterMessage(NumericStatistics) + +StringStatistics = _reflection.GeneratedProtocolMessageType('StringStatistics', (_message.Message,), dict( + + FreqAndValue = _reflection.GeneratedProtocolMessageType('FreqAndValue', (_message.Message,), dict( + DESCRIPTOR = _STRINGSTATISTICS_FREQANDVALUE, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.StringStatistics.FreqAndValue) + )) + , + DESCRIPTOR = _STRINGSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.StringStatistics) + )) +_sym_db.RegisterMessage(StringStatistics) +_sym_db.RegisterMessage(StringStatistics.FreqAndValue) + +WeightedNumericStatistics = _reflection.GeneratedProtocolMessageType('WeightedNumericStatistics', (_message.Message,), dict( + DESCRIPTOR = _WEIGHTEDNUMERICSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.WeightedNumericStatistics) + )) +_sym_db.RegisterMessage(WeightedNumericStatistics) + +WeightedStringStatistics = _reflection.GeneratedProtocolMessageType('WeightedStringStatistics', (_message.Message,), dict( + DESCRIPTOR = _WEIGHTEDSTRINGSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.WeightedStringStatistics) + )) +_sym_db.RegisterMessage(WeightedStringStatistics) + +BytesStatistics = _reflection.GeneratedProtocolMessageType('BytesStatistics', (_message.Message,), dict( + DESCRIPTOR = _BYTESSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.BytesStatistics) + )) +_sym_db.RegisterMessage(BytesStatistics) + +CommonStatistics = _reflection.GeneratedProtocolMessageType('CommonStatistics', (_message.Message,), dict( + DESCRIPTOR = _COMMONSTATISTICS, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.CommonStatistics) + )) +_sym_db.RegisterMessage(CommonStatistics) + +Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict( + + Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict( + DESCRIPTOR = _HISTOGRAM_BUCKET, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.Histogram.Bucket) + )) + , + DESCRIPTOR = _HISTOGRAM, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.Histogram) + )) +_sym_db.RegisterMessage(Histogram) +_sym_db.RegisterMessage(Histogram.Bucket) + +RankHistogram = _reflection.GeneratedProtocolMessageType('RankHistogram', (_message.Message,), dict( + + Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict( + DESCRIPTOR = _RANKHISTOGRAM_BUCKET, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.RankHistogram.Bucket) + )) + , + DESCRIPTOR = _RANKHISTOGRAM, + __module__ = 'feature_statistics_pb2' + # @@protoc_insertion_point(class_scope:featureStatistics.RankHistogram) + )) +_sym_db.RegisterMessage(RankHistogram) +_sym_db.RegisterMessage(RankHistogram.Bucket) + + +_STRINGSTATISTICS_FREQANDVALUE.fields_by_name['deprecated_freq'].has_options = True +_STRINGSTATISTICS_FREQANDVALUE.fields_by_name['deprecated_freq']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001')) +_HISTOGRAM_BUCKET.fields_by_name['deprecated_count'].has_options = True +_HISTOGRAM_BUCKET.fields_by_name['deprecated_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001')) +_RANKHISTOGRAM_BUCKET.fields_by_name['deprecated_count'].has_options = True +_RANKHISTOGRAM_BUCKET.fields_by_name['deprecated_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001')) +# @@protoc_insertion_point(module_scope) + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""""Class for generating the feature_statistics proto. + +The proto is used as input for the Overview visualization. +"""""" + +from facets_overview.base_feature_statistics_generator import BaseFeatureStatisticsGenerator +import facets_overview.feature_statistics_pb2 as fs + + +class FeatureStatisticsGenerator(BaseFeatureStatisticsGenerator): + """"""Generator of stats proto from TF data."""""" + + def __init__(self): + BaseFeatureStatisticsGenerator.__init__(self, fs.FeatureNameStatistics, + fs.DatasetFeatureStatisticsList, + fs.Histogram) + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""""Base class for generating the feature_statistics proto from generic data. + +The proto is used as input for the Overview visualization. +"""""" + +import numpy as np +import pandas as pd +import sys + + +class BaseGenericFeatureStatisticsGenerator(object): + """"""Base class for generator of stats proto from" +"generic data."""""" + + def __init__(self, fs_proto, datasets_proto, histogram_proto): + self.fs_proto = fs_proto + self.datasets_proto = datasets_proto + self.histogram_proto = histogram_proto + + def ProtoFromDataFrames(self, dataframes, + histogram_categorical_levels_count=None): + """"""Creates a feature statistics proto from a set of pandas dataframes. + + Args: + dataframes: A list of dicts describing tables for each dataset for the + proto. Each entry contains a 'table' field of the dataframe of the + data + and a 'name' field to identify the dataset in the proto. + histogram_categorical_levels_count: int, controls the maximum number of + levels to display in histograms for categorical features. + Useful to prevent codes/IDs features from bloating the stats object. + Defaults to None. + Returns: + The feature statistics proto for the provided tables. + """""" + datasets = [] + for dataframe in dataframes: + table = dataframe['table'] + table_entries = {} + for col in table: + table_entries[col] = self.NdarrayToEntry(table[col]) + datasets.append({ + 'entries': table_entries, + 'size': len(table), + 'name': dataframe['name'] + }) + return self.GetDatasetsProto( + datasets, + histogram_categorical_levels_count=histogram_categorical_levels_count) + + def DtypeToType(self, dtype): + """"""Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum."""""" + if dtype.char in np.typecodes['AllFloat']: + return self.fs_proto.FLOAT + elif (dtype.char in np.typecodes['AllInteger'] or dtype == bool or + np.issubdtype(dtype, np.datetime64) or + np.issubdtype(dtype, np.timedelta64)): + return self.fs_proto.INT + else: + return self.fs_proto.STRING + + def DtypeToNumberConverter(self, dtype): + """"""Converts a Numpy dtype to a converter method if applicable. + + The converter method takes in a numpy array of objects of the provided + dtype + and returns a numpy array of the numbers backing that object for + statistical + analysis. Returns None if no converter is necessary. + + Args: + dtype: The numpy dtype to make a converter for. + + Returns: + The converter method or None. + """""" + if np.issubdtype(dtype, np.datetime64): + + def DatetimesToNumbers(dt_list): + return np.array([pd.Timestamp(dt).value for dt in dt_list]) + + return DatetimesToNumbers + elif np.issubdtype(dtype, np.timedelta64): + + def TimedetlasToNumbers(td_list): + return np.array([pd.Timedelta(td).value for td in td_list]) + + return TimedetlasToNumbers + else: + return None + + def NdarrayToEntry(self, x): + """"""Converts an ndarray to the Entry format."""""" + row_counts = [] + for row in x: + try: + rc = np.count_nonzero(~np.isnan(row)) + if rc != 0: + row_counts.append(rc) + except TypeError: + try: + row_counts.append(row.size) + except AttributeError: + row_counts.append(1) + + data_type = self.DtypeToType(x.dtype) + converter = self.DtypeToNumberConverter(x.dtype) + flattened = x.ravel() + orig_size = len(flattened) + + # Remove all None and nan values and count how many were removed. + flattened = flattened[flattened != np.array(None)] + if converter: + flattened = converter(flattened) + if data_type == self.fs_proto.STRING: + flattened_temp = [] + for x in flattened: + try: + if str(x) != 'nan': + flattened_temp.append(x) + except UnicodeEncodeError: + if x.encode('utf-8') != 'nan': + flattened_temp.append(x) + flattened = flattened_temp + else: + flattened = flattened[~np.isnan(flattened)].tolist() + missing = orig_size - len(flattened) + return { + 'vals': flattened, + 'counts': row_counts, + 'missing': missing, + 'type': data_type + } + + def GetDatasetsProto(self, datasets, features=None, + histogram_categorical_levels_count=None): + """"""Generates the feature stats proto from dictionaries of feature values. + + Args: + datasets: An array of dictionaries, one per dataset, each one containing: + - 'entries': The dictionary of features in the dataset from the parsed + examples. + - 'size': The number of examples parsed for the dataset. + - 'name': The name of the dataset. + features: A list of strings that is a whitelist of feature names to create + feature statistics for. If set to None then all features in the + dataset + are analyzed. Defaults to None. + histogram_categorical_levels_count: int, controls the maximum number of + levels to display in histograms for categorical features. + Useful to prevent codes/IDs features from bloating the stats object. + Defaults to None. + + Returns: + The feature statistics proto for the provided datasets. + """""" + features_seen = set() + whitelist_features = set(features) if features else None + all_datasets = self.datasets_proto() + + # TODO(jwexler): Add ability to generate weighted feature stats + # if there is a specified weight feature in the dataset. + + # Initialize each dataset + for dataset in datasets: + all_datasets.datasets.add( + name=dataset['name'], num_examples=dataset['size']) + # This outer loop ensures that for each feature seen in any of the provided + # datasets, we check the feature once against all datasets. + for outer_dataset in datasets: + for key, value in outer_dataset['entries'].items(): + + # If we have a feature whitelist and this feature is not in the + # whitelist then do not process it. + # If we have processed this feature already, no need to do it again. + if ((whitelist_features and key not in whitelist_features) or + key in features_seen): + continue + features_seen.add(key) + # Default to type int if no type is found, so that the fact that all + # values are missing from this feature can be displayed. + feature_type = value['type'] if 'type' in value else self.fs_proto.INT + # Process the found feature for each dataset. + for j, dataset in enumerate(datasets): + feat = all_datasets.datasets[j].features.add( + type=feature_type, name=key.encode('utf-8')) + value = dataset['entries'].get(key) + has_data = value is not None and (value['vals'].size != 0 + if isinstance( + value['vals'], np.ndarray) else + value['vals']) + commonstats = None + # For numeric features, calculate numeric statistics. + if feat.type in (self.fs_proto.INT, self.fs_proto.FLOAT): + featstats = feat.num_stats + commonstats = featstats.common_stats + if has_data: + nums = value['vals'] + + featstats.std_dev = np.std(nums).item() + + featstats.mean = np.mean(nums).item() + + featstats.min = np.min(nums).item() + + featstats.max = np.max(nums).item() + + featstats.median = np.median(nums).item() + + featstats.num_zeros = len(nums) - np.count_nonzero(nums) + + nums = np.array(nums) + num_nan = len(nums[np.isnan(nums)]) + num_posinf = len(nums[np.isposinf(nums)]) + num_neginf = len(nums[np.isneginf(nums)]) + + # Remove all non-finite (including NaN) values from the numeric + # values in order to calculate histogram buckets/counts. The + # inf values will be added back to the first and last buckets. + nums = nums[np.isfinite(nums)] + counts, buckets = np.histogram(nums) + hist = featstats.histograms.add() + hist.type = self.histogram_proto.STANDARD + hist.num_nan = num_nan + for bucket_count in range(len(counts)): + bucket = hist.buckets.add( + low_value=buckets[bucket_count], + high_value=buckets[bucket_count + 1], + sample_count=counts[bucket_count].item()) + # Add any negative or positive infinities to the first and last + # buckets in the histogram. + if bucket_count == 0 and num_neginf > 0: + bucket.low_value = float('-inf') + bucket.sample_count += num_neginf + elif bucket_count == len(counts) - 1 and num_posinf > 0: + bucket.high_value = float('inf') + bucket.sample_count += num_posinf + if not hist.buckets: + if num_neginf: + hist.buckets.add( + low_value=float('-inf'), + high_value=float('-inf'), + sample_count=num_neginf) + if num_posinf: + hist.buckets.add( + low_value=float('inf'), + high_value=float('inf'), + sample_count=num_posinf) + self._PopulateQuantilesHistogram(featstats.histograms.add(),nums.tolist()) + elif feat.type == self.fs_proto.STRING: + featstats = feat.string_stats + commonstats = featstats.common_stats + if has_data: + strs = [] + for item in value['vals']: + strs.append(item if hasattr(item, '__len__') else + item.encode('utf-8') if hasattr(item, 'encode') else str( + item)) + + featstats.avg_length = np.mean(np.vectorize(len)(strs)) + vals, counts = np.unique(strs, return_counts=True) + featstats.unique = len(vals) + sorted_vals = sorted(zip(counts, vals), reverse=True) + sorted_vals = sorted_vals[:histogram_categorical_levels_count] + for val_index, val in enumerate(sorted_vals): + try: + if (sys.version_info.major < 3 or + isinstance(val[1], (bytes, bytearray))): + printable_val = val[1].decode('UTF-8', 'strict') + else: + printable_val = val[1] + except (UnicodeDecodeError, UnicodeEncodeError): + printable_val = '__BYTES_VALUE__' + bucket = featstats.rank_histogram.buckets.add( + low_rank=val_index, + high_rank=val_index, + sample_count=(val[0].item()), + label=printable_val) + if val_index < 2: + featstats.top_values.add( + value=bucket.label, frequency=bucket.sample_count) + # Add the common stats regardless of the feature type. + if has_data: + commonstats.num_missing = value['missing'] + commonstats.num_non_missing = (all_datasets.datasets[j].num_examples + - featstats.common_stats.num_missing) + commonstats.min_num_values = int(np.min(value['counts']).astype(int)) + commonstats.max_num_values = int(np.max(value['counts']).astype(int)) + commonstats.avg_num_values = np.mean(value['counts']) + if 'feat_lens' in value and value['feat_lens']: + self._PopulateQuantilesHistogram( + commonstats.feature_list_length_histogram, value['feat_lens']) + self._PopulateQuantilesHistogram(commonstats.num_values_histogram, + value['counts']) + else: + commonstats.num_non_missing = 0 + commonstats.num_missing = all_datasets.datasets[j].num_examples + + return all_datasets + + def _PopulateQuantilesHistogram(self, hist, nums): + """"""Fills in the histogram with quantile information from the provided array. + + Args: + hist: A Histogram proto message to fill in. + nums: A list of numbers to create a quantiles histogram from. + """""" + if not nums: + return + num_quantile_buckets = 10 + quantiles_to_get = [ + x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1) + ] + try: + quantiles = np.percentile(nums, quantiles_to_get) + except: + quantiles = [0.0] + hist.type = self.histogram_proto.QUANTILES + quantiles_sample_count = float(len(nums)) / num_quantile_buckets + for low, high in zip(quantiles, quantiles[1:]): + hist.buckets.add( + low_value=low, high_value=high, sample_count=quantiles_sample_count) + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator +import numpy as np +import pandas as pd +from tensorflow.python.platform import googletest + + +class GenericFeatureStatisticsGeneratorTest(googletest.TestCase): + + def setUp(self): + self.gfsg = GenericFeatureStatisticsGenerator() + + def testProtoFromDataFrames(self): + data = [[1, 'hi'], [2, 'hello'], [3, 'hi']] + df = pd.DataFrame(data, columns=['testFeatureInt', 'testFeatureString']) + dataframes = [{'table': df, 'name': 'testDataset'}] + p = self.gfsg.ProtoFromDataFrames(dataframes) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('testDataset', test_data" +".name) + self.assertEqual(3, test_data.num_examples) + self.assertEqual(2, len(test_data.features)) + + if test_data.features[0].name == 'testFeatureInt': + numfeat = test_data.features[0] + stringfeat = test_data.features[1] + else: + numfeat = test_data.features[1] + stringfeat = test_data.features[0] + + self.assertEqual('testFeatureInt', numfeat.name) + self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type) + self.assertEqual(1, numfeat.num_stats.min) + self.assertEqual(3, numfeat.num_stats.max) + self.assertEqual('testFeatureString', stringfeat.name) + self.assertEqual(self.gfsg.fs_proto.STRING, stringfeat.type) + self.assertEqual(2, stringfeat.string_stats.unique) + + def testNdarrayToEntry(self): + arr = np.array([1.0, 2.0, None, float('nan'), 3.0], dtype=float) + + entry = self.gfsg.NdarrayToEntry(arr) + self.assertEqual(2, entry['missing']) + + arr = np.array(['a', 'b', float('nan'), 'c'], dtype=str) + entry = self.gfsg.NdarrayToEntry(arr) + self.assertEqual(1, entry['missing']) + + def testNdarrayToEntryTimeTypes(self): + arr = np.array( + [np.datetime64('2005-02-25'), + np.datetime64('2006-02-25')], + dtype=np.datetime64) + entry = self.gfsg.NdarrayToEntry(arr) + self.assertEqual([1109289600000000000, 1140825600000000000], entry['vals']) + + arr = np.array( + [np.datetime64('2009-01-01') - np.datetime64('2008-01-01')], + dtype=np.timedelta64) + entry = self.gfsg.NdarrayToEntry(arr) + self.assertEqual([31622400000000000], entry['vals']) + + def testDTypeToType(self): + self.assertEqual(self.gfsg.fs_proto.INT, + self.gfsg.DtypeToType(np.dtype(np.int32))) + # Boolean and time types treated as int + self.assertEqual(self.gfsg.fs_proto.INT, + self.gfsg.DtypeToType(np.dtype(np.bool))) + self.assertEqual(self.gfsg.fs_proto.INT, + self.gfsg.DtypeToType(np.dtype(np.datetime64))) + self.assertEqual(self.gfsg.fs_proto.INT, + self.gfsg.DtypeToType(np.dtype(np.timedelta64))) + self.assertEqual(self.gfsg.fs_proto.FLOAT, + self.gfsg.DtypeToType(np.dtype(np.float32))) + self.assertEqual(self.gfsg.fs_proto.STRING, + self.gfsg.DtypeToType(np.dtype(np.str))) + # Unsupported types treated as string for now + self.assertEqual(self.gfsg.fs_proto.STRING, + self.gfsg.DtypeToType(np.dtype(np.void))) + + def testGetDatasetsProtoFromEntriesLists(self): + entries = {} + entries['testFeature'] = { + 'vals': [1, 2, 3], + 'counts': [1, 1, 1], + 'missing': 0, + 'type': self.gfsg.fs_proto.INT + } + datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] + p = self.gfsg.GetDatasetsProto(datasets) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('testDataset', test_data.name) + self.assertEqual(3, test_data.num_examples) + self.assertEqual(1, len(test_data.features)) + numfeat = test_data.features[0] + self.assertEqual('testFeature', numfeat.name) + self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type) + self.assertEqual(1, numfeat.num_stats.min) + self.assertEqual(3, numfeat.num_stats.max) + hist = numfeat.num_stats.common_stats.num_values_histogram + buckets = hist.buckets + self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type) + self.assertEqual(10, len(buckets)) + self.assertEqual(1, buckets[0].low_value) + self.assertEqual(1, buckets[0].high_value) + self.assertEqual(.3, buckets[0].sample_count) + self.assertEqual(1, buckets[9].low_value) + self.assertEqual(1, buckets[9].high_value) + self.assertEqual(.3, buckets[9].sample_count) + + def testGetDatasetsProtoSequenceExampleHistogram(self): + entries = {} + entries['testFeature'] = { + 'vals': [1, 2, 2, 3], + 'counts': [1, 2, 1], + 'feat_lens': [1, 2, 1], + 'missing': 0, + 'type': self.gfsg.fs_proto.INT + } + datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] + p = self.gfsg.GetDatasetsProto(datasets) + hist = p.datasets[0].features[ + 0].num_stats.common_stats.feature_list_length_histogram + buckets = hist.buckets + self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type) + self.assertEqual(10, len(buckets)) + self.assertEqual(1, buckets[0].low_value) + self.assertEqual(1, buckets[0].high_value) + self.assertEqual(.3, buckets[0].sample_count) + self.assertEqual(1.8, buckets[9].low_value) + self.assertEqual(2, buckets[9].high_value) + self.assertEqual(.3, buckets[9].sample_count) + + def testGetDatasetsProtoWithWhitelist(self): + entries = {} + entries['testFeature'] = { + 'vals': [1, 2, 3], + 'counts': [1, 1, 1], + 'missing': 0, + 'type': self.gfsg.fs_proto.INT + } + entries['ignoreFeature'] = { + 'vals': [5, 6], + 'counts': [1, 1], + 'missing': 1, + 'type': self.gfsg.fs_proto.INT + } + datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] + p = self.gfsg.GetDatasetsProto(datasets, features=['testFeature']) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('testDataset', test_data.name) + self.assertEqual(3, test_data.num_examples) + self.assertEqual(1, len(test_data.features)) + numfeat = test_data.features[0] + self.assertEqual('testFeature', numfeat.name) + self.assertEqual(1, numfeat.num_stats.min) + + def testGetDatasetsProtoWithMaxHistigramLevelsCount(self): + # Selected entries' lengths make it easy to compute average length + data = [['hi'], ['good'], ['hi'], ['hi'], ['a'], ['a']] + df = pd.DataFrame(data, columns=['testFeatureString']) + dataframes = [{'table': df, 'name': 'testDataset'}] + # Getting proto from ProtoFromDataFrames instead of GetDatasetsProto + # directly to avoid any hand written values ex: size of dataset. + p = self.gfsg.ProtoFromDataFrames(dataframes, + histogram_categorical_levels_count=2) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('testDataset', test_data.name) + self.assertEqual(6, test_data.num_examples) + self.assertEqual(1, len(test_data.features)) + numfeat = test_data.features[0] + self.assertEqual('testFeatureString', numfeat.name) + + top_values = numfeat.string_stats.top_values + self.assertEqual(3, top_values[0].frequency) + self.assertEqual('hi', top_values[0].value) + + self.assertEqual(3, numfeat.string_stats.unique) + self.assertEqual(2, numfeat.string_stats.avg_length) + + rank_hist = numfeat.string_stats.rank_histogram + buckets = rank_hist.buckets + self.assertEqual(2, len(buckets)) + self.assertEqual('hi', buckets[0].label) + self.assertEqual(3, buckets[0].sample_count) + self.assertEqual('a', buckets[1].label) + self.assertEqual(2, buckets[1].sample_count) + +if __name__ == '__main__': + googletest.main() + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from facets_overview.feature_statistics_generator import FeatureStatisticsGenerator +import numpy as np +import tensorflow as tf +from tensorflow.python.platform import googletest + + +class FeatureStatisticsGeneratorTest(googletest.TestCase): + + def setUp(self): + self.fs = FeatureStatisticsGenerator() + + def testParseExampleInt(self): + # Tests parsing examples of integers + examples = [] + for i in range(50): + example = tf.train.Example() + example.features.feature['num'].int64_list.value.append(i) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + self.assertEqual(1, len(entries)) + self.assertIn('num', entries) + info = entries['num'] + self.assertEqual(0, info['missing']) + self.assertEqual(self.fs.fs_proto.INT, info['type']) + for i in range(len(examples)): + self.assertEqual(1, info['counts'][i]) + self.assertEqual(i, info['vals'][i]) + + def testParseExampleMissingValueList(self): + # Tests parsing examples of integers + examples = [] + example = tf.train.Example() + # pylint: disable=pointless-statement + example.features.feature['str'] + # pylint: enable=pointless-statement + examples.append(example) + example = tf.train.Example() + example.features.feature['str'].bytes_list.value.append(b'test') + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + self.assertEqual(1, len(entries)) + self.assertIn('str', entries) + info = entries['str'] + self.assertEqual(1, info['missing']) + self.assertEqual(self.fs.fs_proto.STRING, info['type']) + self.assertEqual(0, info['counts'][0]) + self.assertEqual(1, info['counts'][1]) + + def _check_sequence_example_entries(self, + entries, + n_examples, + n_features, + feat_len=None): + self.assertIn('num', entries) + info = entries['num'] + self.assertEqual(0, info['missing']) + self.assertEqual(self.fs.fs_proto.INT, info['type']) + for i in range(n_examples): + self.assertEqual(n_features, info['counts'][i]) + if feat_len is not None: + self.assertEqual(feat_len, info['feat_lens'][i]) + for i in range(n_examples * n_features): + self.assertEqual(i, info['vals'][i]) + if feat_len is None: + self.assertEqual(0, len(info['feat_lens'])) + + def testParseExampleSequenceContext(self): + # Tests parsing examples of integers in context field + examples = [] + for i in range(50): + example = tf.train.SequenceExample() + example.context.feature['num'].int64_list.value.append(i) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.context.feature, + example.feature_lists.feature_list, entries, i) + self._check_sequence_example_entries(entries, 50, 1) + self.assertEqual(1, len(entries)) + + def testParseExampleSequenceFeatureList(self): + examples = [] + for i in range(50): + example = tf.train.SequenceExample() + feat = example.feature_lists.feature_list['num'].feature.add() + feat.int64_list.value.append(i) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.context.feature, + example.feature_lists.feature_list, entries, i) + self._check_sequence_example_entries(entries, 50, 1, 1) + + def testParseExampleSequenceFeatureListMultipleEntriesInner(self): + examples = [] + for i in range(2): + example = tf.train.SequenceExample() + feat = example.feature_lists.feature_list['num'].feature.add() + for j in range(25): + feat.int64_list.value.append(i * 25 + j) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example" +".context.feature, + example.feature_lists.feature_list, entries, i) + + self._check_sequence_example_entries(entries, 2, 25, 1) + + def testParseExampleSequenceFeatureListMultipleEntriesOuter(self): + # Tests parsing examples of integers in context field + examples = [] + for i in range(2): + example = tf.train.SequenceExample() + for j in range(25): + feat = example.feature_lists.feature_list['num'].feature.add() + feat.int64_list.value.append(i * 25 + j) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.context.feature, + example.feature_lists.feature_list, entries, i) + self._check_sequence_example_entries(entries, 2, 25, 25) + + def testVaryingCountsAndMissing(self): + # Tests parsing examples of when some examples have missing features + examples = [] + for i in range(5): + example = tf.train.Example() + example.features.feature['other'].int64_list.value.append(0) + for _ in range(i): + example.features.feature['num'].int64_list.value.append(i) + examples.append(example) + example = tf.train.Example() + example.features.feature['other'].int64_list.value.append(0) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + info = entries['num'] + self.assertEqual(2, info['missing']) + self.assertEqual(4, len(info['counts'])) + for i in range(4): + self.assertEqual(i + 1, info['counts'][i]) + self.assertEqual(10, len(info['vals'])) + + def testParseExampleStringsAndFloats(self): + # Tests parsing examples of string and float features + examples = [] + for i in range(50): + example = tf.train.Example() + example.features.feature['str'].bytes_list.value.append(b'hi') + example.features.feature['float'].float_list.value.append(i) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + self.assertEqual(2, len(entries)) + self.assertEqual(self.fs.fs_proto.FLOAT, entries['float']['type']) + self.assertEqual(self.fs.fs_proto.STRING, entries['str']['type']) + for i in range(len(examples)): + self.assertEqual(1, entries['str']['counts'][i]) + self.assertEqual(1, entries['float']['counts'][i]) + self.assertEqual(i, entries['float']['vals'][i]) + self.assertEqual('hi', entries['str']['vals'][i].decode( + 'UTF-8', 'strict')) + + def testParseExamplesTypeMismatch(self): + examples = [] + example = tf.train.Example() + example.features.feature['feat'].int64_list.value.append(0) + examples.append(example) + example = tf.train.Example() + example.features.feature['feat'].bytes_list.value.append(b'str') + examples.append(example) + + entries = {} + self.fs._ParseExample(examples[0].features.feature, [], entries, 0) + + with self.assertRaises(TypeError): + self.fs._ParseExample(examples[1].features.feature, [], entries, 1) + + def testGetDatasetsProtoFromEntriesLists(self): + entries = {} + entries['testFeature'] = { + 'vals': [1, 2, 3], + 'counts': [1, 1, 1], + 'missing': 0, + 'type': self.fs.fs_proto.INT + } + datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] + p = self.fs.GetDatasetsProto(datasets) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('testDataset', test_data.name) + self.assertEqual(3, test_data.num_examples) + self.assertEqual(1, len(test_data.features)) + numfeat = test_data.features[0] + self.assertEqual('testFeature', numfeat.name) + self.assertEqual(self.fs.fs_proto.INT, numfeat.type) + self.assertEqual(1, numfeat.num_stats.min) + self.assertEqual(3, numfeat.num_stats.max) + + def testGetProtoNums(self): + # Tests converting int examples into the feature stats proto + examples = [] + for i in range(50): + example = tf.train.Example() + example.features.feature['num'].int64_list.value.append(i) + examples.append(example) + example = tf.train.Example() + example.features.feature['other'].int64_list.value.append(0) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] + p = self.fs.GetDatasetsProto(datasets) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('test', test_data.name) + self.assertEqual(51, test_data.num_examples) + + numfeat = test_data.features[0] if ( + test_data.features[0].name == 'num') else test_data.features[1] + self.assertEqual('num', numfeat.name) + self.assertEqual(self.fs.fs_proto.INT, numfeat.type) + self.assertEqual(0, numfeat.num_stats.min) + self.assertEqual(49, numfeat.num_stats.max) + self.assertEqual(24.5, numfeat.num_stats.mean) + self.assertEqual(24.5, numfeat.num_stats.median) + self.assertEqual(1, numfeat.num_stats.num_zeros) + self.assertAlmostEqual(14.430869689, numfeat.num_stats.std_dev, 4) + self.assertEqual(1, numfeat.num_stats.common_stats.num_missing) + self.assertEqual(50, numfeat.num_stats.common_stats.num_non_missing) + self.assertEqual(1, numfeat.num_stats.common_stats.min_num_values) + self.assertEqual(1, numfeat.num_stats.common_stats.max_num_values) + self.assertAlmostEqual(1, numfeat.num_stats.common_stats.avg_num_values, 4) + hist = numfeat.num_stats.common_stats.num_values_histogram + buckets = hist.buckets + self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type) + self.assertEqual(10, len(buckets)) + self.assertEqual(1, buckets[0].low_value) + self.assertEqual(1, buckets[0].high_value) + self.assertEqual(5, buckets[0].sample_count) + self.assertEqual(1, buckets[9].low_value) + self.assertEqual(1, buckets[9].high_value) + self.assertEqual(5, buckets[9].sample_count) + + self.assertEqual(2, len(numfeat.num_stats.histograms)) + buckets = numfeat.num_stats.histograms[0].buckets + self.assertEqual(self.fs.histogram_proto.STANDARD, + numfeat.num_stats.histograms[0].type) + self.assertEqual(10, len(buckets)) + self.assertEqual(0, buckets[0].low_value) + self.assertEqual(4.9, buckets[0].high_value) + self.assertEqual(5, buckets[0].sample_count) + self.assertAlmostEqual(44.1, buckets[9].low_value) + self.assertEqual(49, buckets[9].high_value) + self.assertEqual(5, buckets[9].sample_count) + + buckets = numfeat.num_stats.histograms[1].buckets + self.assertEqual(self.fs.histogram_proto.QUANTILES, + numfeat.num_stats.histograms[1].type) + self.assertEqual(10, len(buckets)) + self.assertEqual(0, buckets[0].low_value) + self.assertEqual(4.9, buckets[0].high_value) + self.assertEqual(5, buckets[0].sample_count) + self.assertAlmostEqual(44.1, buckets[9].low_value) + self.assertEqual(49, buckets[9].high_value) + self.assertEqual(5, buckets[9].sample_count) + + def testQuantiles(self): + examples = [] + for i in range(50): + example = tf.train.Example() + example.features.feature['num'].int64_list.value.append(i) + examples.append(example) + for i in range(50): + example = tf.train.Example() + example.features.feature['num'].int64_list.value.append(100) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] + p = self.fs.GetDatasetsProto(datasets) + + numfeat = p.datasets[0].features[0] + self.assertEqual(2, len(numfeat.num_stats.histograms)) + self.assertEqual(self.fs.histogram_proto.QUANTILES, + numfeat.num_stats.histograms[1].type) + buckets = numfeat.num_stats.histograms[1].buckets + self.assertEqual(10, len(buckets)) + self.assertEqual(0, buckets[0].low_value) + self.assertEqual(9.9, buckets[0].high_value) + self.assertEqual(10, buckets[0].sample_count) + self.assertEqual(100, buckets[9].low_value) + self.assertEqual(100, buckets[9].high_value) + self.assertEqual(10, buckets[9].sample_count) + + def testInfinityAndNan(self): + examples = [] + for i in range(50): + example = tf.train.Example() + example.features.feature['num'].float_list.value.append(i) + examples.append(example) + example = tf.train.Example() + example.features.feature['num'].float_list.value.append(float('inf')) + examples.append(example) + example = tf.train.Example() + example.features.feature['num'].float_list.value.append(float('-inf')) + examples.append(example) + example = tf.train.Example() + example.features.feature['num'].float_list.value.append(float('nan')) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] + p = self.fs.GetDatasetsProto(datasets) + + numfeat = p.datasets[0].features[0] + + self.assertEqual('num', numfeat.name) + self.assertEqual(self.fs.fs_proto.FLOAT, numfeat.type) + self.assertTrue(np.isnan(numfeat.num_stats.min)) + self.assertTrue(np.isnan(numfeat.num_stats.max)) + self.assertTrue(np.isnan(numfeat.num_stats.mean)) + self.assertTrue(np.isnan(numfeat.num_stats.median)) + self.assertEqual(1, numfeat.num_stats.num_zeros) + self.assertTrue(np.isnan(numfeat.num_stats.std_dev)) + self.assertEqual(53, numfeat.num_stats.common_stats.num_non_missing) + hist = buckets = numfeat.num_stats.histograms[0] + buckets = hist.buckets + self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type) + self.assertEqual(1, hist.num_nan) + self.assertEqual(10, len(buckets)) + self.assertEqual(float('-inf'), buckets[0].low_value) + self.assertEqual(4.9, buckets[0].high_value) + self.assertEqual(6, buckets[0].sample_count) + self.assertEqual(44.1, buckets[9].low_value) + self.assertEqual(float('inf'), buckets[9].high_value) + self.assertEqual(6, buckets[9].sample_count) + + def testInfinitysOnly(self): + examples = [] + example = tf.train.Example() + example.features.feature['num'].float_list.value.append(float('inf')) + examples.append(example) + example = tf.train.Example() + example.features.feature['num'].float_list.value.append(float('-inf')) + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] + p = self.fs.GetDatasetsProto(datasets) + + numfeat = p.datasets[0].features[0] + hist = buckets = numfeat.num_stats.histograms[0] + buckets = hist.buckets + self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type) + self.assertEqual(10, len(buckets)) + self.assertEqual(float('-inf'), buckets[0].low_value) + self.assertEqual(0.1, buckets[0].high_value) + self.assertEqual(1, buckets[0].sample_count) + self.assertEqual(0.9, buckets[9].low_value) + self.assertEqual(float('inf'), buckets[9].high_value) + self.assertEqual(1, buckets[9].sample_count) + + def testGetProtoStrings(self): + # Tests converting string examples into the feature stats proto + examples = [] + for i in range(2): + example = tf.train.Example() + example.features.feature['str'].bytes_list.value.append(b'hello') + examples" +".append(example) + for i in range(3): + example = tf.train.Example() + example.features.feature['str'].bytes_list.value.append(b'hi') + examples.append(example) + example = tf.train.Example() + example.features.feature['str'].bytes_list.value.append(b'hey') + examples.append(example) + + entries = {} + for i, example in enumerate(examples): + self.fs._ParseExample(example.features.feature, [], entries, i) + + datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] + p = self.fs.GetDatasetsProto(datasets) + + self.assertEqual(1, len(p.datasets)) + test_data = p.datasets[0] + self.assertEqual('test', test_data.name) + self.assertEqual(6, test_data.num_examples) + + strfeat = test_data.features[0] + self.assertEqual('str', strfeat.name) + self.assertEqual(self.fs.fs_proto.STRING, strfeat.type) + self.assertEqual(3, strfeat.string_stats.unique) + self.assertAlmostEqual(19 / 6.0, strfeat.string_stats.avg_length, 4) + self.assertEqual(0, strfeat.string_stats.common_stats.num_missing) + self.assertEqual(6, strfeat.string_stats.common_stats.num_non_missing) + self.assertEqual(1, strfeat.string_stats.common_stats.min_num_values) + self.assertEqual(1, strfeat.string_stats.common_stats.max_num_values) + self.assertEqual(1, strfeat.string_stats.common_stats.avg_num_values) + hist = strfeat.string_stats.common_stats.num_values_histogram + buckets = hist.buckets + self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type) + self.assertEqual(10, len(buckets)) + self.assertEqual(1, buckets[0].low_value) + self.assertEqual(1, buckets[0].high_value) + self.assertEqual(.6, buckets[0].sample_count) + self.assertEqual(1, buckets[9].low_value) + self.assertEqual(1, buckets[9].high_value) + self.assertEqual(.6, buckets[9].sample_count) + + self.assertEqual(2, len(strfeat.string_stats.top_values)) + self.assertEqual(3, strfeat.string_stats.top_values[0].frequency) + self.assertEqual('hi', strfeat.string_stats.top_values[0].value) + self.assertEqual(2, strfeat.string_stats.top_values[1].frequency) + self.assertEqual('hello', strfeat.string_stats.top_values[1].value) + + buckets = strfeat.string_stats.rank_histogram.buckets + self.assertEqual(3, len(buckets)) + self.assertEqual(0, buckets[0].low_rank) + self.assertEqual(0, buckets[0].high_rank) + self.assertEqual(3, buckets[0].sample_count) + self.assertEqual('hi', buckets[0].label) + self.assertEqual(2, buckets[2].low_rank) + self.assertEqual(2, buckets[2].high_rank) + self.assertEqual(1, buckets[2].sample_count) + self.assertEqual('hey', buckets[2].label) + + def testGetProtoMultipleDatasets(self): + # Tests converting multiple datsets into the feature stats proto + # including ensuring feature order is consistent in the protos. + examples1 = [] + for i in range(2): + example = tf.train.Example() + example.features.feature['str'].bytes_list.value.append(b'one') + example.features.feature['num'].int64_list.value.append(0) + examples1.append(example) + examples2 = [] + example = tf.train.Example() + example.features.feature['num'].int64_list.value.append(1) + example.features.feature['str'].bytes_list.value.append(b'two') + examples2.append(example) + + entries1 = {} + for i, example1 in enumerate(examples1): + self.fs._ParseExample(example1.features.feature, [], entries1, i) + entries2 = {} + for i, example2 in enumerate(examples2): + self.fs._ParseExample(example2.features.feature, [], entries2, i) + + datasets = [{ + 'entries': entries1, + 'size': len(examples1), + 'name': 'test1' + }, { + 'entries': entries2, + 'size': len(examples2), + 'name': 'test2' + }] + p = self.fs.GetDatasetsProto(datasets) + + self.assertEqual(2, len(p.datasets)) + test_data_1 = p.datasets[0] + self.assertEqual('test1', test_data_1.name) + self.assertEqual(2, test_data_1.num_examples) + num_feat_index = 0 if test_data_1.features[0].name == 'num' else 1 + self.assertEqual(0, test_data_1.features[num_feat_index].num_stats.max) + test_data_2 = p.datasets[1] + self.assertEqual('test2', test_data_2.name) + self.assertEqual(1, test_data_2.num_examples) + self.assertEqual(1, test_data_2.features[num_feat_index].num_stats.max) + + def testGetEntriesNoFiles(self): + features, num_examples = self.fs._GetEntries(['test'], 10, + lambda unused_path: []) + self.assertEqual(0, num_examples) + self.assertEqual({}, features) + + @staticmethod + def get_example_iter(): + + def ex_iter(unused_filename): + examples = [] + for i in range(50): + example = tf.train.Example() + example.features.feature['num'].int64_list.value.append(i) + examples.append(example.SerializeToString()) + return examples + + return ex_iter + + def testGetEntries_one(self): + features, num_examples = self.fs._GetEntries(['test'], 1, + self.get_example_iter()) + self.assertEqual(1, num_examples) + self.assertTrue('num' in features) + + def testGetEntries_oneFile(self): + unused_features, num_examples = self.fs._GetEntries(['test'], 1000, + self.get_example_iter()) + self.assertEqual(50, num_examples) + + def testGetEntries_twoFiles(self): + unused_features, num_examples = self.fs._GetEntries(['test0', 'test1'], + 1000, + self.get_example_iter()) + self.assertEqual(100, num_examples) + + def testGetEntries_stopInSecondFile(self): + unused_features, num_examples = self.fs._GetEntries([ + 'test@0', 'test@1', 'test@2', 'test@3', 'test@4', 'test@5', 'test@6', + 'test@7', 'test@8', 'test@9' + ], 75, self.get_example_iter()) + self.assertEqual(75, num_examples) + + +if __name__ == '__main__': + googletest.main() + # Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the ""License""); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an ""AS IS"" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""""Base class for generating the feature_statistics proto from TensorFlow data. + +The proto is used as input for the Overview visualization. +"""""" + +from functools import partial +from facets_overview.base_generic_feature_statistics_generator import BaseGenericFeatureStatisticsGenerator +import tensorflow as tf + + +# The feature name used to track sequence length when analyzing +# tf.SequenceExamples. +SEQUENCE_LENGTH_FEATURE_NAME = 'sequence length (derived feature)' + + +class BaseFeatureStatisticsGenerator(BaseGenericFeatureStatisticsGenerator): + """"""Base class for generator of stats proto from TF data."""""" + + def __init__(self, fs_proto, datasets_proto, histogram_proto): + BaseGenericFeatureStatisticsGenerator.__init__( + self, fs_proto, datasets_proto, histogram_proto) + + def ProtoFromTfRecordFiles(self, + files, + max_entries=10000, + features=None, + is_sequence=False, + iterator_options=None, + histogram_categorical_levels_count=None): + """"""Creates a feature statistics proto from a set of TFRecord files. + + Args: + files: A list of dicts describing files for each dataset for the proto. + Each + entry contains a 'path' field with the path to the TFRecord file on + disk + and a 'name' field to identify the dataset in the proto. + max_entries: The maximum number of examples to load from each dataset + in order to create the proto. Defaults to 10000. + features: A list of strings that is a whitelist of feature names to create + feature statistics for. If set to None then all features in the + dataset + are analyzed. Defaults to None. + is_sequence: True if the input data from 'tables' are tf.SequenceExamples, + False if tf.Examples. Defaults to false. + iterator_options: Options to pass to the iterator that reads the examples. + Defaults to None. + histogram_categorical_levels_count: int, controls the maximum number of + levels to display in histograms for categorical features. + Useful to prevent codes/IDs features from bloating the stats object. + Defaults to None. + + Returns: + The feature statistics proto for the provided files. + """""" + datasets = [] + for entry in files: + entries, size = self._GetTfRecordEntries(entry['path'], max_entries, + is_sequence, iterator_options) + datasets.append({'entries': entries, 'size': size, 'name': entry['name']}) + return self.GetDatasetsProto( + datasets, + features, + histogram_categorical_levels_count) + + def _ParseExample(self, example_features, example_feature_lists, entries, + index): + """"""Parses data from an example, populating a dictionary of feature values. + + Args: + example_features: A map of strings to tf.Features from the example. + example_feature_lists: A map of strings to tf.FeatureLists from the + example. + entries: A dictionary of all features parsed thus far and arrays of their + values. This is mutated by the function. + index: The index of the example to parse from a list of examples. + Raises: + TypeError: Raises an exception when a feature has inconsistent types + across + examples. + """""" + features_seen = set() + + for feature_list, is_feature in zip( + [example_features, example_feature_lists], [True, False]): + sequence_length = None + for feature_name in feature_list: + # If this feature has not been seen in previous examples, then + # initialize its entry into the entries dictionary. + if feature_name not in entries: + entries[feature_name] = { + 'vals': [], + 'counts': [], + 'feat_lens': [], + 'missing': index + } + + feature_entry = entries[feature_name] + feature = feature_list[feature_name] + + value_type = None + value_list = [] + if is_feature: + # If parsing a tf.Feature, extract the type and values simply. + if feature.HasField('float_list'): + value_list = feature.float_list.value + value_type = self.fs_proto.FLOAT + elif feature.HasField('bytes_list'): + value_list = feature.bytes_list.value + value_type = self.fs_proto.STRING + elif feature.HasField('int64_list'): + value_list = feature.int64_list.value + value_type = self.fs_proto.INT + else: + # If parsing a tf.FeatureList, get the type and values by iterating + # over all Features in the FeatureList. + sequence_length = len(feature.feature) + if sequence_length != 0 and feature.feature[0].HasField('float_list'): + for feat in feature.feature: + for value in feat.float_list.value: + value_list.append(value) + value_type = self.fs_proto.FLOAT + elif sequence_length != 0 and feature.feature[0].HasField( + 'bytes_list'): + for feat in feature.feature: + for value in feat.bytes_list.value: + value_list.append(value) + value_type = self.fs_proto.STRING + elif sequence_length != 0 and feature.feature[0].HasField( + 'int64_list'): + for feat in feature.feature: + for value in feat.int64_list.value: + value_list.append(value) + value_type = self.fs_proto.INT + if value_type is not None: + if 'type' not in feature_entry: + feature_entry['type'] = value_type + elif feature_entry['type'] != value_type: + raise TypeError('type mismatch for feature ' + feature_name) + feature_entry['counts'].append(len(value_list)) + feature_entry['vals'].extend(value_list) + if sequence_length is not None: + feature_entry['feat_lens'].append(sequence_length) + if value_list: + features_seen.add(feature_name) + + # For all previously-seen features not found in this example, update the + # feature's missing value. + for f in entries: + fv = entries[f] + if f not in features_seen: + fv['missing'] += 1 + + def _GetEntries(self, + paths, + max_entries, + iterator_from_file, + is_sequence=False): + """"""Extracts examples into a dictionary of feature values. + + Args: + paths: A list of the paths to the files to parse. + max_entries: The maximum number of examples to load. + iterator_from_file: A method that takes a file path string and returns an + iterator to the examples in that file. + is_sequence: True if the input data from 'iterator_from_file' are + tf.SequenceExamples, False if tf.Examples. Defaults to false. + + Returns: + A tuple with two elements: + - A dictionary of all features parsed thus far and arrays of their + values. + - The number of examples parsed. + " +""""""" + entries = {} + index = 0 + for filepath in paths: + reader = iterator_from_file(filepath) + for record in reader: + if is_sequence: + sequence_example = tf.train.SequenceExample.FromString(record) + self._ParseExample(sequence_example.context.feature, + sequence_example.feature_lists.feature_list, + entries, index) + else: + self._ParseExample( + tf.train.Example.FromString(record).features.feature, [], entries, + index) + index += 1 + if index == max_entries: + return entries, index + return entries, index + + def _GetTfRecordEntries(self, path, max_entries, is_sequence, + iterator_options): + """"""Extracts TFRecord examples into a dictionary of feature values. + + Args: + path: The path to the TFRecord file(s). + max_entries: The maximum number of examples to load. + is_sequence: True if the input data from 'path' are tf.SequenceExamples, + False if tf.Examples. Defaults to false. + iterator_options: Options to pass to the iterator that reads the examples. + Defaults to None. + + Returns: + A tuple with two elements: + - A dictionary of all features parsed thus far and arrays of their + values. + - The number of examples parsed. + """""" + return self._GetEntries([path], max_entries, + partial( + tf.compat.v1.io.tf_record_iterator, + options=iterator_options), is_sequence) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from importlib.metadata import version +import sys +import os +def requirementfile(deploy_path,model,textFeatures,learner_type): + print('hola', model) + modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] + requires = '' + for mod in modules: + requires += f""{mod}=={version(mod)}\\n"" + if len(textFeatures) > 0: + tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] + for mod in tmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Extreme Gradient Boosting (XGBoost)': + mmodules = ['xgboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Light Gradient Boosting (LightGBM)': + mmodules = ['lightgbm'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Categorical Boosting (CatBoost)': + mmodules = ['catboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'arima': + mmodules = ['pmdarima'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'fbprophet': + mmodules = ['prophet'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': + mmodules = ['tensorflow'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 + mmodules = ['lifelines'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'sentencetransformer': #bug 12833 + mmodules = ['sentence_transformers'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + filename = os.path.join(deploy_path,'requirements.txt') + f = open(filename, ""wb"") + f.write(str(requires).encode('utf8')) + f.close() + + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import json +import shutil +import logging +import sys +from AionConfigManager import AionConfigManager +from sklearn.externals import joblib +class edgeformats: + def __init__(self,deploy_path): + self.deploy_path = deploy_path + self.edge_deploy_path = os.path.join(deploy_path,""edge"") + os.mkdir(self.edge_deploy_path) + + + def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file): + if edge_format == 'onnx': + from skl2onnx import convert_sklearn + from skl2onnx.common.data_types import FloatTensorType + xtrain = xtrain[features] + initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))] + filename = os.path.join(self.deploy_path,saved_model) + loaded_model = joblib.load(filename) + onx = convert_sklearn(loaded_model, initial_types=initial_type) + onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx') + with open(onnx_filename, ""wb"") as f: + f.write(onx.SerializeToString()) + self.createedgeruntimeFile(onnx_filename,profiled_data_file,features) + + def createedgeruntimeFile(self,onnx_filename,datafilepath,features): + runtimefilecontent = '' + runtimefilecontent += 'import pandas' + runtimefilecontent += '\\n' + runtimefilecontent += 'import numpy' + runtimefilecontent += '\\n' + runtimefilecontent += 'import sys' + runtimefilecontent += '\\n' + runtimefilecontent += 'import onnxruntime as rt' + runtimefilecontent += '\\n' + runtimefilecontent += 'def onnx_runtime_validation():' + runtimefilecontent += '\\n' + runtimefilecontent += ' modelfile = r""'+str(onnx_filename)+'""' + runtimefilecontent += '\\n' + runtimefilecontent += ' datafile = r""'+str(datafilepath)+'""' + runtimefilecontent += '\\n' + runtimefilecontent += ' dataframe = pandas.read_csv(datafile)' + runtimefilecontent += '\\n' + runtimefilecontent += ' dataframe = dataframe['+str(features)+']' + runtimefilecontent += '\\n' + runtimefilecontent += ' df = dataframe.head(8)' + runtimefilecontent += '\\n' + runtimefilecontent += ' dataset = df.values' + runtimefilecontent += '\\n' + runtimefilecontent += ' sess = rt.InferenceSession(modelfile)' + runtimefilecontent += '\\n' + runtimefilecontent += ' input_name = sess.get_inputs()[0].name' + runtimefilecontent += '\\n' + runtimefilecontent += ' label_name = sess.get_outputs()[0].name' + runtimefilecontent += '\\n' + runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape' + runtimefilecontent += '\\n' + runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)' + runtimefilecontent += '\\n' + runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]' + runtimefilecontent += '\\n' + runtimefilecontent += ' df[\\'predictions\\'] = pred_onx' + runtimefilecontent += '\\n' + runtimefilecontent += ' result = df.to_json(orient=""records"")' + runtimefilecontent += '\\n' + runtimefilecontent += ' return(result)' + runtimefilecontent += '\\n' + runtimefilecontent += 'if __name__ == ""__main__"":' + runtimefilecontent += '\\n' + runtimefilecontent += ' output = onnx_runtime_validation()' + runtimefilecontent += '\\n' + runtimefilecontent += ' print(""predictions:"",output)' + filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py') + f = open(filename, ""w"") + f.write(str(runtimefilecontent)) + f.close() + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import json +import shutil +import logging + +class outputFormatter: + def __init__(self): + self.log = logging.getLogger('eion') + self.log.info('========> Inside Output Formatter') + + def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature): + self.output_formatfile = 'import json' + self.output_formatfile += '\\n' + self.output_formatfile += 'import numpy as np' + self.output_formatfile += '\\n' + self.output_formatfile += 'import pandas as pd' + self.output_formatfile += '\\n' + self.output_formatfile += 'import os' + self.output_formatfile += '\\n' + self.output_formatfile += 'from pathlib import Path' + self.output_formatfile += '\\n' + if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()==""anomaly_detection""): + self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity' + self.output_formatfile += '\\n' + + self.output_formatfile += 'class output_format(object):' + self.output_formatfile += '\\n' + if(model == 'VAR'): + self.output_formatfile += ' def invertTransformation(self,predictions):' + self.output_formatfile += '\\n' + self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"".."",""data"",""trainingdata.csv""))' + self.output_formatfile += '\\n' + self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount) + self.output_formatfile += '\\n' + self.output_formatfile += ' targetFeature = ""'+str(targetFeature)+'""' + self.output_formatfile += '\\n' + self.output_formatfile += ' columns = targetFeature.split("","")' + self.output_formatfile += '\\n' + self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)' + self.output_formatfile += '\\n' + self.output_formatfile += ' for j in range(0,len(columns)):' + self.output_formatfile += '\\n' + self.output_formatfile += ' for i in range(0, len(predictions)):' + self.output_formatfile += '\\n' + self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction = pred' + self.output_formatfile += '\\n' + self.output_formatfile += ' for col in columns:' + self.output_formatfile += '\\n' + self.output_formatfile += ' if col in dictDiffCount:' + self.output_formatfile += '\\n' + self.output_formatfile += ' if dictDiffCount[col]==2:' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()' + self.output_formatfile += '\\n' + self.output_formatfile += ' prediction = pred' + self.output_formatfile += '\\n' + self.output_formatfile += ' return(prediction)' + self.output_formatfile += '\\n' + self.log.info(""op:modelType: \\n""+str(modelType)) + if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()==""anomaly_detection""): + # if modelType == 'anomaly_detection': + self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):' + self.output_formatfile += '\\n' + self.output_formatfile += ' try:' + self.output_formatfile += '\\n' + self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) ' + self.output_formatfile += '\\n' + self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) ' + self.output_formatfile += '\\n' + self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() ' + self.output_formatfile += '\\n' + self.output_formatfile += ' except Exception as e:' + self.output_formatfile += '\\n' + self.output_formatfile += ' print(f""find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} "")\\n' + self.output_formatfile += ' return anomaly_info_df' + self.output_formatfile += '\\n' + if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()==""anomaly_detection""): + if (datetimeFeature!" +"='' and datetimeFeature!='NA'): + self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' " +"self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)' + self.output_formatfile += '\\n' + if learner_type != 'DL': + self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1).round(2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' + self.output_formatfile += '\\n' + else: + if model == 'COX': + self.output_formatfile += '\\n' + self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)' + self.output_formatfile += '\\n' + #self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\\'records\\',double_precision=2)' + #self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput' + self.output_formatfile += '\\n' + else: + self.output_formatfile += ' df[\\'prediction\\'] = modeloutput[0]' + if(learner_type == 'objectDetection'): + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\']' + else: + self.output_formatfile += '\\n' + self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\'].round(2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)' + self.output_formatfile += '\\n' + self.output_formatfile += ' outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)}' + self.output_formatfile += '\\n' + self.output_formatfile += ' return(json.dumps(outputjson))' + filename = os.path.join(deploy_path,'script','output_format.py') + #print(deploy_path) + f = open(filename, ""wb"") + self.log.info('-------> Output Mapping File Location :'+filename) + f.write(str(self.output_formatfile).encode('utf8')) + f.close() #task 11190: Item based Recommender system---Usnish +import os +def generate_recommender_code(deployPath): + code = """""" +import pandas as pd +import numpy as np +import os +ITEMID = 'itemId' +DATA_FOLDER = 'data' +USER_ITEM_MATRIX = 'user_item_matrix.csv' +ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv' +RATING = 'rating' +SIMILARITY_SCORE = 'similarity_score' + +class collaborative_filter(object): + def __init__(self): + self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0) + self.matrix.index.name = ITEMID + self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX)) + self.item_similarity_cosine.index.name = ITEMID + self.item_similarity_cosine.columns.name = ITEMID + def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5): + import operator + if not isinstance(picked_userid,str): + picked_userid = str(picked_userid) + if picked_userid not in self.matrix.columns: + raise KeyError(""UserID Does Not Exist"") + # Movies that the target user has not watched + try: + picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index() + picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist() + + # Movies that the target user has watched + picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \\ + .sort_values(ascending=False)) \\ + .reset_index() \\ + .rename(columns={picked_userid: 'rating'}) + + # Dictionary to save the unwatched movie and predicted rating pair + rating_prediction = {} + # Loop through unwatched movies + for picked_movie in picked_userid_unwatched: + if not isinstance(picked_movie,str): + picked_movie = str(picked_movie) + # Calculate the similarity score of the picked movie with other movies + try: + picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename( + columns={picked_movie: SIMILARITY_SCORE}) + # Rank the similarities between the picked user watched movie and the picked unwatched movie. + picked_userid_watched_similarity = pd.merge(left=picked_userid_watched, + right=picked_movie_similarity_score, + on=ITEMID, + how='inner') \\ + .sort_values(SIMILARITY_SCORE, ascending=False)[ + :number_of_similar_items] + # Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user + try: + predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6) + except Exception as e: + predicted_rating = 0 + # Save the predicted rating in the dictionary + + rating_prediction[picked_movie] = predicted_rating + except Exception as e: + rating_prediction[picked_movie] = 0 + # Return the top recommended movies + + return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations] + except Exception as e: + print(e) + raise KeyError(str(e)) + def predict(self,X): + predictions = [] + for index,row in X.iterrows(): + score = self.item_based_rec(int(row[""uid""]),int(row[""numberOfRecommendation""])) + df = pd.DataFrame(score,columns=['ItemId','Ratings']) + predictions.append(df) + return predictions"""""" + filename = os.path.join(deployPath, 'script', 'item_recommendation.py') + # print(deploy_path) + f = open(filename, ""wb"") + + f.write(str(code).encode('utf8')) + f.close() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +from pathlib import Path +from AION.prediction_package.imports import importModule +from AION.prediction_package import utility +from AION.prediction_package.utility import TAB_CHAR +from importlib.metadata import version + +"""""" + This file provide the functionality which is common for most of the + problem types deployment. +"""""" + +def main_code(): + return """""" +class predict(): + + def __init__(self): + self.profiler = inputprofiler() + self.selector = selector() + self.trainer = trainer() + self.formatter = output_format() + + def run(self, data): + try: + df = self._parse_data(data) + raw_df = df.copy() + df = self.profiler.run(df) + df = self.selector.run(df) + df = self.trainer.run(df) + output = self.formatter.run(raw_df, df) + print(""predictions:"",output) + return (output) + except Exception as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + + def _parse_data(self, data): + file_path = Path(data) + if file_path.suffix == "".tsv"": + df = pd.read_csv(data,encoding='utf-8',sep='\\\\t',skipinitialspace = True,na_values=['-','?']) + elif file_path.suffix in ["".csv"", "".dat""]: + df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) + elif file_path.suffix in ["".gz""] and file_path.stem.endswith('.csv'): + df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) + elif file_path.suffix == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + df = pd.json_normalize(jsonData) + else: + jsonData = json.loads(data) + df = pd.json_normalize(jsonData) + return df + +import sys +if __name__ == ""__main__"": + output = predict().run(sys.argv[1]) + """""" + +def profiler_code(params, indent=0): + """""" + This will create the profiler file based on the config file. + separated file is created as profiler is required for input drift also. + """""" + imported_modules = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} + ] + importer = importModule() + utility.import_modules(importer, imported_modules) + code = """""" + + +class inputprofiler(): +"""""" + init_code = """""" + def __init__(self): +"""""" + if params.get('text_features'): + imported_modules.append({'module':'importlib.util'}) + init_code += """""" + # preprocessing + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if not preprocess_path.exists(): + raise ValueError(f'Preprocess model file not found: {preprocess_path}') + self.profiler = joblib.load(preprocess_path) + +"""""" + run_code = """""" + def run(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + if params.get('input_features_type'): + imported_modules.append({'module':'dtype','mod_from':'numpy'}) + run_code += f"""""" + df = df.astype({params.get('input_features_type')}) +"""""" + if params.get('word2num_features'): + imported_modules.append({'module':'w2n','mod_from':'word2number'}) + run_code += f"""""" + def s2n(value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan + df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))"""""" + if params.get('unpreprocessed_columns'): + run_code += f"""""" + unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}'] + df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True) + """""" + if params.get('force_numeric_conv'): + run_code += f"""""" + df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""""" + if params.get('conversion_method','').lower() == 'glove': + code_text, modules = __profiler_glove_code(params) + imported_modules.extend( modules) + init_code += code_text + elif params.get('conversion_method','').lower() == 'fasttext': + init_code += __profiler_fasttext_code(params) + run_code += __profiler_main_code(params) + if params.get('unpreprocessed_columns'): + run_code += f"""""" + df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data + """""" + utility.import_modules(importer, imported_modules) + import_code = importer.getCode() + return import_code + code + init_code + run_code + +def __profiler_glove_code(params, indent=2): + modules = [] + modules.append({'module':'load_pretrained','mod_from':'text.Embedding'}) + modules.append({'module':'TextProcessing','mod_from':'text'}) + code = """""" +model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') +embed_size, pretrained_model = load_pretrained(model_path) +self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) +"""""" + return code.replace('\\n', '\\n'+(indent * TAB_CHAR)), modules + +def __profiler_fasttext_code(params, indent=2): + code = """""" +def get_pretrained_model_path(): + try: + from AION.appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + except: + modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' + + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath +if not importlib.util.find_spec('fasttext'): + raise ValueError('fastText not installed') +else: + import os + import fasttext + import fasttext.util + cwd = os.getcwd() + os.chdir(get_pretrained_model_path()) + fasttext.util.download_" +"model('en', if_exists='ignore') + pretrained_model = fasttext.load_model('cc.en.300.bin') + os.chdir(cwd) +self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) +self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary') +"""""" + return code.replace('\\n', '\\n'+(indent * TAB_CHAR)) + +def __profiler_main_code(params, indent=2): + code = f"""""" +df = self.profiler.transform(df) +columns = {params['output_features']} +if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns=columns) +else: + df = pd.DataFrame(df, columns=columns) +return df +"""""" + return code.replace('\\n', '\\n'+(indent * TAB_CHAR)) + +def feature_selector_code( params, indent=0): + modules = [ + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'} + ] + code = """""" +class selector(): + # this class + def __init__(self): + pass + + def run(self, df):"""""" + code +=f"""""" + return df[{params['output_features']}] +"""""" + return code, modules + +def feature_reducer_code( params, indent=0): + modules = [ + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} + ] + code = f"""""" +class selector(): + def __init__(self): + reducer_file = (Path(__file__).parent/""model"")/""{params['reducer_file']}"" + if not reducer_file.exists(): + raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}') + self.model = joblib.load(reducer_file) + + def run(self, df): + reducer_input = {params['input_features']} + reducer_output = {params['output_features']} + df = self.model.transform(df[reducer_input]) + return pd.DataFrame(df,columns=reducer_output) + """""" + if indent: + code = code.replace('\\n', '\\n'+(indent * TAB_CHAR)) + return code, modules + +def create_feature_list(config=None, target_feature=None, deploy_path=None): + featurelist = [] + if 'profiler' in config: + if 'input_features_type' in config['profiler']: + input_features = config['profiler']['input_features_type'] + for x in input_features: + featurelt={} + featurelt['feature'] = x + if x == target_feature: + featurelt['Type'] = 'Target' + else: + if input_features[x] in ['int','int64','float','float64']: + featurelt['Type'] = 'Numeric' + elif input_features[x] == 'object': + featurelt['Type'] = 'Text' + elif input_features[x] == 'category': + featurelt['Type'] = 'Category' + else: + featurelt['Type'] = 'Unknown' + featurelist.append(featurelt) + + featurefile = f"""""" +import json +def getfeatures(): + try: + features = {featurelist} + outputjson = {{""status"":""SUCCESS"",""features"":features}} + output = json.dumps(outputjson) + print(""Features:"",output) + return(output) + except Exception as e: + output = {{""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}} + print(""Features:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = getfeatures() +"""""" + with open( deploy_path/'featureslist.py', 'wb') as f: + f.write( str(featurefile).encode('utf8')) + +def requirement_file(deploy_path,model,textFeatures,learner_type='ML'): + modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] + requires = '' + for mod in modules: + requires += f""{mod}=={version(mod)}\\n"" + if len(textFeatures) > 0: + tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] + for mod in tmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Extreme Gradient Boosting (XGBoost)': + mmodules = ['xgboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Light Gradient Boosting (LightGBM)': + mmodules = ['lightgbm'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model == 'Categorical Boosting (CatBoost)': + mmodules = ['catboost'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'arima': + mmodules = ['pmdarima'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'fbprophet': + mmodules = ['prophet'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': + mmodules = ['tensorflow'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 + mmodules = ['lifelines'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + if model.lower() == 'sentencetransformer': #bug 12833 + mmodules = ['sentence_transformers'] + for mod in mmodules: + requires += f""{mod}=={version(mod)}\\n"" + with open( deploy_path/'requirements.txt', 'wb') as f: + f.write(str(requires).encode('utf8')) + +def create_readme_file(deploy_path,modelfile,features): + data = json.dumps([{x:x+'_value'} for x in features]) + backslash_data = data.replace('""', '\\\\""') + content = f"""""" +========== Files Structures ========== +{modelfile} ------ Trained Model +aion_prediction.py --> Python package entry point +script/inputprofiler.py --> Profiling like FillNA and Category to Numeric + +========== How to call the model ========== +============== From Windows Terminal ========== +python aion_prediction.py ""{backslash_data}"" +============== From Linux Terminal ========== +python aion_prediction.py ""{data}"" + +============== Output ========== +{{""status"":""SUCCESS"",""data"":[{{""Data1"":""Value"",""prediction"":""Value""}}]}} ## for single Row/Record +{{""status"":""SUCCESS"",""data"":[{{""Data1"":""Value"",""prediction"":""Value""}},{{""Data1"":""Value"",""prediction"":""Value""}}]}} ## For Multiple Row/Record +{{""status"":""ERROR"",""message"":""description""}} ## In Case Exception or Error + """""" + filename = deploy_path/'readme.txt' + with open(filename, 'w') as f: + f.write(content) + +def create_util_folder(deploy_path): + import tarfile + ext_path = Path(__file__).parent.parent/'utilities' + for x in ext_path.iterdir(): + if x.suffix == '.tar': + if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']: + my_tar = tarfile.open(x) + my_tar.extractall(deploy_path) + my_tar.close() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import json +import shutil +import logging + +class aionPrediction: + + def __init__(self): + self.log = logging.getLogger('eion') + + def create_optimus_prediction_file (self,classname,deploy_path,learner_type): + self.predictionFile = 'import warnings' + self.predictionFile += '\\n' + self.predictionFile += 'warnings.filterwarnings(""ignore"")' + self.predictionFile += '\\n' + self.predictionFile += 'import json' + self.predictionFile += '\\n' + self.predictionFile += 'import os' + self.predictionFile += '\\n' + self.predictionFile += 'import sys' + self.predictionFile += '\\n' + self.predictionFile += 'import pandas as pd' + self.predictionFile += '\\n' + self.predictionFile += 'from pandas import json_normalize' + self.predictionFile += '\\n' + self.predictionFile += 'from importlib import import_module' + self.predictionFile += '\\n' + self.predictionFile += 'import importlib.util' + self.predictionFile += '\\n' + self.predictionFile += 'class prediction:' + self.predictionFile += '\\n' + self.predictionFile += ' def predict_from_json(self,json_data):' + self.predictionFile += '\\n' + self.predictionFile += ' data = json.loads(json_data)' + self.predictionFile += '\\n' + self.predictionFile += ' output=self.predict(data)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",output)' + self.predictionFile += '\\n' + self.predictionFile += '\\n' + self.predictionFile += ' def predict_from_file(self,filename):' + self.predictionFile += '\\n' + self.predictionFile += ' with open(filename,\\'r\\',encoding=\\'utf-8\\') as f:' + self.predictionFile += '\\n' + self.predictionFile += ' data = json.load(f)' + self.predictionFile += '\\n' + self.predictionFile += ' output=self.predict(data)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",output)' + self.predictionFile += '\\n' + self.predictionFile += '\\n' + self.predictionFile += ' def predict(self,json_data):' + self.predictionFile += '\\n' + self.predictionFile += ' try:' + self.predictionFile += '\\n' + #self.predictionFile += ' jsonData = json.loads(json_data)' + self.predictionFile += ' jsonData=json_data' + self.predictionFile += '\\n' + self.predictionFile += ' model_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/trained_model.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' model_obj.loader.exec_module(model)' + self.predictionFile += '\\n' + #if(learner_type != 'TextML'): + self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/inputprofiler.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)' + self.predictionFile += '\\n' + + self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/selector.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' selector_obj.loader.exec_module(selector)' + self.predictionFile += '\\n' + + self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location(""module.name"", os.path.dirname(os.path.abspath(__file__))+""/output_format.py"")' + self.predictionFile += '\\n' + self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)' + self.predictionFile += '\\n' + self.predictionFile += ' output_format_obj.loader.exec_module(output_format)' + self.predictionFile += '\\n' + + self.predictionFile += ' df = json_normalize(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' df0 = df.copy()' + self.predictionFile += '\\n' + #if(learner_type != 'TextML'): + self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()' + self.predictionFile += '\\n' + self.predictionFile += ' df = profilerobj.apply_profiler(df)' + self.predictionFile += '\\n' + self.predictionFile += ' selectobj = selector.selector()' + self.predictionFile += '\\n' + self.predictionFile += ' df = selectobj.apply_selector(df)' + self.predictionFile += '\\n' + self.predictionFile += ' output = model.trained_model().predict(df,"""")' + self.predictionFile += '\\n' + self.predictionFile += ' outputobj = output_format.output_format()' + self.predictionFile += '\\n' + self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' + #self.predictionFile += '\\n' + #self.predictionFile += ' print(output)' + self.predictionFile += '\\n' + self.predictionFile += ' return output' + self.predictionFile += '\\n' + self.predictionFile += ' except KeyError as e:' + self.predictionFile += '\\n' + self.predictionFile" +"+= ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' return json.dumps(output)' + self.predictionFile += '\\n' + self.predictionFile += ' " +""")' + self.predictionFile += '\\n' + self.predictionFile += 'import json' + self.predictionFile += '\\n' + self.predictionFile += 'import os' + self.predictionFile += '\\n' + self.predictionFile += 'import sys' + self.predictionFile += '\\n' + self.predictionFile += 'import pandas as pd' + self.predictionFile += '\\n' + self.predictionFile += 'from pandas import json_normalize' + self.predictionFile += '\\n' + + if(learner_type.lower() != 'recommendersystem'): #task 11190 + self.predictionFile += 'from script.selector import selector' + self.predictionFile += '\\n' + self.predictionFile += 'from script.inputprofiler import inputprofiler' + self.predictionFile += '\\n' + #self.predictionFile += 'from '+classname+' import '+classname + self.predictionFile += 'from script.trained_model import trained_model' + self.predictionFile += '\\n' + else: + self.predictionFile += 'from script.item_recommendation import collaborative_filter' + self.predictionFile += '\\n' + + self.predictionFile += 'from script.output_format import output_format' + self.predictionFile += '\\n' + if (learner_type != 'RecommenderSystem'): #task 11190 + self.predictionFile += 'profilerobj = inputprofiler()' + self.predictionFile += '\\n' + self.predictionFile += 'selectobj = selector()' + self.predictionFile += '\\n' + self.predictionFile += 'modelobj = trained_model()' + self.predictionFile += '\\n' + else: + self.predictionFile += 'colabobj = collaborative_filter()' + self.predictionFile += '\\n' + + self.predictionFile += 'outputobj = output_format()' + self.predictionFile += '\\n' + self.predictionFile += 'def predict(data):' + self.predictionFile += '\\n' + self.predictionFile += ' try:' + self.predictionFile += '\\n' + self.predictionFile += ' if os.path.splitext(data)[1] == "".tsv"":' + self.predictionFile += '\\n' + self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',sep=\\'\\\\t\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])' + self.predictionFile += '\\n' + self.predictionFile += ' elif os.path.splitext(data)[1] == "".csv"":' + self.predictionFile += '\\n' + self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])' + self.predictionFile += '\\n' + self.predictionFile += ' elif os.path.splitext(data)[1] == "".dat"":' + self.predictionFile += '\\n' + self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])' + self.predictionFile += '\\n' + self.predictionFile += ' else:' + self.predictionFile += '\\n' + self.predictionFile += ' if os.path.splitext(data)[1] == "".json"":' + self.predictionFile += '\\n' + self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.load(f)' + self.predictionFile += '\\n' + self.predictionFile += ' else:' + self.predictionFile += '\\n' + self.predictionFile += ' jsonData = json.loads(data)' + self.predictionFile += '\\n' + self.predictionFile += ' df = json_normalize(jsonData)' + self.predictionFile += '\\n' + self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)' + self.predictionFile += '\\n' + if str(rowfilterexpression) != '': + self.predictionFile += ' filterexpression = ""'+rowfilterexpression+'""' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.query(filterexpression)' + self.predictionFile += '\\n' + #print(grouperbyjson) + if str(grouperbyjson) != '': + datetime = grouperbyjson['datetime'] + unit = grouperbyjson['unit'] + if unit == '': + self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'])' + self.predictionFile += '\\n' + else: + self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'],unit=\\''+unit+'\\')' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.reset_index()' + self.predictionFile += '\\n' + self.predictionFile += ' df.set_index(\\'date\\',inplace=True)' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.'+grouperbyjson['groupbystring'] + self.predictionFile += '\\n' + self.predictionFile += ' df.columns = df.columns.droplevel(0)' + self.predictionFile += '\\n' + self.predictionFile += ' df = df.reset_index()' + self.predictionFile += '\\n' + + self.predictionFile += ' df0 = df.copy()' + self.predictionFile += '\\n' + + if(learner_type != 'RecommenderSystem'): #task 11190 + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)' + self.predictionFile += '\\n' + else: + self.predictionFile += ' df = profilerobj.apply_profiler(df)' + self.predictionFile += '\\n' + self.predictionFile += ' df = selectobj.apply_selector(df)' + self.predictionFile += '\\n' + #self.predictionFile += ' modelobj = '+classname+'()' + self.predictionFile += ' output = modelobj.predict(df,"""")' + self.predictionFile += '\\n' + else: + self.predictionFile += ' output = colabobj.predict(df)' + self.predictionFile += '\\n' + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)' + self.predictionFile += '\\n' + else: + self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",output)' + self.predictionFile += '\\n' + self.predictionFile += ' return(output)' + self.predictionFile += '\\n' + self.predictionFile += ' except KeyError as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' except Exception as e:' + self.predictionFile += '\\n' + self.predictionFile += ' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + self.predictionFile += '\\n' + self.predictionFile += ' print(""predictions:"",json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += ' return (json.dumps(output))' + self.predictionFile += '\\n' + self.predictionFile += 'if __name__ == ""__main__"":' + self.predictionFile += '\\n' + self.predictionFile += ' output = predict(sys.argv[1])' + + filename = os.path.join(deploy_path,'aion_predict.py') + f = open(filename, ""w"") + f.write(str(self.predictionFile)) + f.close() + def create_classification_text_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +# from evidently.dashboard import Dashboard +# from evidently.tabs import ClassificationPerformanceTab +from evidently.pipeline.column_mapping import ColumnMapping +from aion_predict import predict +from evidently.report import Report +from evidently.pipeline.column_mapping import ColumnMapping +from evidently.metric_preset import ClassificationPreset +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.text_features = features.split(',') + iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()]) + iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + iris_model_performance_dashboard.save_html(report) + metrics_output = iris_model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + def create_classification_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +from evidently.report import Report +from evidently.metric_preset import ClassificationPreset +from evidently.pipeline.column_mapping import ColumnMapping +from aion_predict import predict +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.numerical_features = features.split(',') + model_performance_dashboard = Report(metrics = [ClassificationPreset()]) + model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + model_performance_dashboard.save_html(report) + metrics_output = model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + else: + output = {""status"":""SUCCESS"",""htmlPath"":'NA'} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os." +"path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + def create_model_service(self,deploy_path,serviceName,problemType): + + filedata = """""" +from flask import Flask, jsonify, request +from flask_restful import Resource, Api +from aion_predict import predict"""""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +from aion_xai import local_analysis +from aion_ipdrift import drift +from aion_opdrift import odrift"""""" + filedata += """""" +import json +import os +import pandas as pd +import io +import argparse +from pathlib import Path +from flask_cors import CORS, cross_origin +app = Flask(__name__) +#cross origin resource from system arguments +parser = argparse.ArgumentParser() +parser.add_argument('-ip', '--ipaddress', help='IP Address') +parser.add_argument('-p', '--port', help='Port Number') +parser.add_argument(""-cors"", type=str, required=False) +d = vars(parser.parse_args()) + +modelPath = Path(__file__).parent +try: + with open( (modelPath/'etc')/'display.json', 'r') as f: + disp_data = json.load(f) + is_explainable = not disp_data.get('textFeatures') +except: + disp_data = {} + is_explainable = True + +if ""cors"" in d.keys(): + if d[""cors""] != '' and d[""cors""] != None: + d[""cors""] = [s.strip() for s in d[""cors""].split("","")] + #cors = CORS(app, resources={r""/AION/*"": {""origins"": [""http://localhost"", ""http://localhost:5000""]}}) + cors = CORS(app, resources={r""/AION/*"": {""origins"": d[""cors""]}}) +api = Api(app) +class predictapi(Resource): + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + data = request.get_json() + output = predict().run(json.dumps(data)) + return jsonify(json.loads(output)) + +class predictfileapi(Resource): + def post(self): + if 'file' in request.files: + file = request.files['file'] + urlData = file.read() + rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) + data = rawData.to_json(orient='records') + output = predict().run(data) + return jsonify(json.loads(output)) + else: + displaymsg='File is mising' + return jsonify(displaymsg) + + def get(self): + msg=\\"""""" +RequestType: POST +Body:send file content in body\\"""""" + return jsonify(msg) + """""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +class explainapi(Resource): + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + data = request.get_json() + if is_explainable: + output = local_analysis(json.dumps(data)) + else: + output = json.dumps({""status"":""FAIL"",""data"":""explain api is not supported when text features are used for training""}) + return jsonify(json.loads(output)) + +class monitoringapi(Resource): + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + def post(self): + data = request.get_json() + output = drift(json.dumps(data)) + return jsonify(json.loads(output)) + +class performanceapi(Resource): + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + def post(self): + data = request.get_json() + output = odrift(json.dumps(data)) + return jsonify(json.loads(output)) + """""" + filedata += """""" +api.add_resource(predictapi, '/AION/{serviceName}/predict')"""""".format(serviceName=serviceName) + filedata += """""" +api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')"""""".format(serviceName=serviceName) + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +api.add_resource(explainapi, '/AION/{serviceName}/explain') +api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') +api.add_resource(performanceapi, '/AION/{serviceName}/performance')"""""".format(serviceName=serviceName) + filedata += """""" +if __name__ == '__main__': + args = parser.parse_args() + app.run(args.ipaddress,port = args.port,debug = True)"""""" + filename = os.path.join(deploy_path,'aion_service.py') + f = open(filename, ""wb"") + f.write(str(filedata).encode('utf8')) + f.close() + + def create_regression_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +from evidently.report import Report +from evidently.metric_preset import RegressionPreset +from evidently.pipeline.column_mapping import ColumnMapping +from aion_predict import predict +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.numerical_features = features.split(',') + iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) + iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + iris_model_performance_dashboard.save_html(report) + metrics_output = iris_model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + else: + output = {""status"":""SUCCESS"",""htmlPath"":'NA'} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_regression_text_performance_file(self,deploy_path,features,target): + features = "","".join([feature for feature in features]) + self.predictionFile = """"""\\ +import pandas as pd +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +from pandas import json_normalize +from aion_predict import predict +from evidently.report import Report +from evidently.pipeline.column_mapping import ColumnMapping +from evidently.metric_preset import RegressionPreset +def odrift(data): + try: +"""""" + self.predictionFile += ' features = \\''+features+'\\'' + self.predictionFile += '\\n' + self.predictionFile += ' target = \\''+target+'\\'' + self.predictionFile += '\\n' + self.predictionFile +=""""""\\ + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + production = predict().run(jsonData['currentDataLocation']) + reference = predict().run(jsonData['trainingDataLocation']) + production = json.loads(production) + reference = json.loads(reference) + if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): + production = production['data'] + production = json_normalize(production) + reference = reference['data'] + reference = json_normalize(reference) + production['target'] = production[target] + reference['target'] = reference[target] + column_mapping = ColumnMapping() + column_mapping.target = target + column_mapping.prediction = 'prediction' + column_mapping.datetime = None + column_mapping.numerical_features = features.split(',') + iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) + iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) + report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') + iris_model_performance_dashboard.save_html(report) + metrics_output = iris_model_performance_dashboard.as_dict() + output = {""status"":""SUCCESS"",""htmlPath"":report, 'drift_details':metrics_output['metrics']} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + else: + output = {""status"":""SUCCESS"",""htmlPath"":'NA'} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + + except KeyError as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + print(e) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""drift:"",json.dumps(output)) + return (json.dumps(output)) + +if __name__ == ""__main__"": + output = odrift(sys.argv[1])"""""" + + filename = os.path.join(deploy_path,'aion_opdrift.py') + f = open(filename, ""wb"") + f.write(str(self.predictionFile).encode('utf8')) + f.close() + + def create_publish_service(self,datalocation,usecaseid,version,problemType): + filename = os.path.join(datalocation,'aion_publish_service.py') + if not os.path.exists(filename): + filedata = """""" +import sys +import json +import time +import sqlite3 +import argparse +import pandas as pd +import io +from pathlib import Path +from datetime import datetime + +filename = Path(__file__).parent/'config.json' +with open (filename, ""r"") as f: + data = json.loads(f.read()) +modelVersion = str(data['version']) +modelPath = Path(__file__).parent/modelVersion +sys.path.append(str(modelPath)) + +try: + with open( (modelPath/'etc')/'display.json', 'r') as f: + disp_data = json.load(f) + is_explainable = not disp_data.get('textFeatures') +except: + disp_data = {} + is_explainable = True + +from flask import Flask, jsonify, request +from flask_restful import Resource, Api +from flask_cors import CORS, cross_origin +from flask import Response +from aion_predict import predict +"""""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +from aion_ipdrift import drift +from aion_opdrift import odrift +if is_explainable: + from aion_xai import local_analysis +"""""" + filedata += """""" +dataPath = Path(__file__).parent/'data' +dataPath.mkdir(parents=True, exist_ok=True) +app = Flask(__name__) +#cross origin resource from system arguments +parser = argparse.ArgumentParser() +parser.add_argument('-ip', '--ipaddress', help='IP Address') +parser.add_argument('-p', '--port', help='Port Number') +parser.add_argument(""-cors"", type=str, required=False) +d = vars(parser.parse_args()) + +if ""cors"" in d.keys(): + if d[""cors""] != '' and d[""cors""] != None: + d[""cors""] = [s.strip() for s in d[""cors""].split("","")] + #cors = CORS(app, resources={r""/AION/*"": {""origins"": [""http://localhost"", ""http://localhost:5000""]}}) + cors = CORS(app, resources={r""/AION/*"": {""origins"": d[""cors""]}}) +api = Api(app) + +class sqlite_db(): + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem +" +"'.db' + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + self.tables = [] + + def table_exists(self, name): + if name in self.tables: + return True + elif name: + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + if len(listOfTables) > 0 : + self.tables.append(name) + return True + return False + + def read(self, table_name,condition=''): + if condition == '': + return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + else: + return pd.read_sql_query(f""SELECT * FROM {table_name} WHERE {condition}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + def update(self,table_name,updates,condition): + update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' + self.cursor.execute(update_query) + self.conn.commit() + return True + def write(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def delete(self, name): + pass + def close(self): + self.conn.close()"""""" + filedata += """""" +app = Flask(__name__) +api = Api(app) + +class predictapi(Resource): + + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('metrices'): + data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',""noOfActualCalls"":'0',""mid"":'0'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) + data = request.get_json() + output = predict().run(json.dumps(data)) + outputobj = json.loads(output) + if outputobj['status'] == 'SUCCESS': + try: + df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') + if not sqlite_dbObj.table_exists('prodData'): + sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) + sqlite_dbObj.write(df2,'prodData') + except: + pass + try: + data = sqlite_dbObj.read('metrices') + #print(data) + if len(data) == 0: + data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',""noOfActualCalls"":'0'}] + data = pd.read_json(json.dumps(data), orient ='records') + sqlite_dbObj.write(data,'metrices') + else: + noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 + sqlite_dbObj.update('metrices',""noOfPredictCalls = '""+str(noofPredictCalls)+""'"",""mid = 0"") + except Exception as e: + print(e) + pass + return jsonify(json.loads(output)) +class predictfileapi(Resource): + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('metrices'): + data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',""noOfActualCalls"":'0',""mid"":'0'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) + if 'file' in request.files: + file = request.files['file'] + urlData = file.read() + rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) + data = rawData.to_json(orient='records') + output = predict().run(data) + outputobj = json.loads(output) + if outputobj['status'] == 'SUCCESS': + try: + df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') + if not sqlite_dbObj.table_exists('prodData'): + sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) + sqlite_dbObj.write(df2,'prodData') + except: + pass + try: + data = sqlite_dbObj.read('metrices') + #print(data) + if len(data) == 0: + data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',""noOfActualCalls"":'0'}] + data = pd.read_json(json.dumps(data), orient ='records') + sqlite_dbObj.write(data,'metrices') + else: + noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 + sqlite_dbObj.update('metrices',""noOfPredictCalls = '""+str(noofPredictCalls)+""'"",""mid = 0"") + except Exception as e: + print(e) + pass + return jsonify(json.loads(output)) + else: + output = {'status':'error','msg':'File is missing'} + return jsonify(output) + """""" + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +class explainapi(Resource): + + def get(self): + features = disp_data.get('modelFeatures') + if features: + msg=\\"""""" +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + \\"""""".format(displaymsg={ x:'Value' for x in features}) + else: + displaymsg='Data in JSON Format' + return jsonify(displaymsg) + + def post(self): + data = request.get_json() + if is_explainable: + output = local_analysis(json.dumps(data)) + else: + output = json.dumps({""status"":""FAIL"",""data"":""explain api is not supported when text features are used for training""}) + return jsonify(json.loads(output)) + +class monitoringapi(Resource): + + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('monitoring'): + data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) + trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' + if not sqlite_dbObj.table_exists('prodData'): + return jsonify({'status':'Error','msg':'Prod data not available'}) + data = sqlite_dbObj.read('prodData') + filetimestamp = str(int(time.time())) + dataFile = dataPath/('AION_' + filetimestamp+'.csv') + data.to_csv(dataFile, index=False) + data = request.get_json() + data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} + output = drift(json.dumps(data)) + outputData = json.loads(output) + status = outputData['status'] + + if status == 'SUCCESS': + Msg = str(outputData['data']) + else: + Msg = 'Error during drift analysis' + now = datetime.now() # current date and time + date_time = now.strftime(""%m/%d/%Y, %H:%M:%S"") + data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.write(data,'monitoring') + return jsonify(json.loads(output))"""""" + filedata += """""" + +class matricesapi(Resource): + + def get(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if sqlite_dbObj.table_exists('metrices'): + df1 = sqlite_dbObj.read('metrices') + else: + df1 = pd.DataFrame() + #print(df1) + if sqlite_dbObj.table_exists('monitoring'): + df2 = sqlite_dbObj.read('monitoring') + else: + df2 = pd.DataFrame() + + msg = {'Deployed Version':str(modelVersion)} + + if df1.shape[0] > 0: + msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])}) + else: + msg.update({'noOfPredictCalls':'0'}) + driftDetails = [] + for idx in reversed(df2.index): + driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])} + driftDetails.append(driftd) + msg.update({'driftDetails':driftDetails}) + return jsonify(msg) + +class performanceapi(Resource): + + def get(self): + return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) + + def post(self): + sqlite_dbObj = sqlite_db(dataPath,'data.db') + if not sqlite_dbObj.table_exists('monitoring'): + data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) + trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' + if not sqlite_dbObj.table_exists('prodData'): + return jsonify({'status':'Error','msg':'Prod data not available'}) + data = sqlite_dbObj.read('prodData') + filetimestamp = str(int(time.time())) + dataFile = dataPath/('AION_' + filetimestamp+'.csv') + data.to_csv(dataFile, index=False) + data = request.get_json() + data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} + output = odrift(json.dumps(data)) + return jsonify(json.loads(output)) + """""" + filedata += """""" +api.add_resource(predictapi, '/AION/{serviceName}/predict') +api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file') +api.add_resource(matricesapi, '/AION/{serviceName}/metrices')"""""".format(serviceName=usecaseid) + if problemType.lower() == 'classification' or problemType.lower() == 'regression': + filedata += """""" +api.add_resource(explainapi, '/AION/{serviceName}/explain') +api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') +api.add_resource(performanceapi, '/AION/{serviceName}/performance') +"""""".format(serviceName=usecaseid) + filedata += """""" +if __name__ == '__main__': + args = parser.parse_args() + app.run(args.ipaddress,port = args.port,debug = True)"""""" + f = open(filename, ""wb"") + f.write(str(filedata).encode('utf8')) + f.close() + data = {'version':version} + filename = os.path.join(datalocation,'config.json') + with open(filename, ""w"") as outfile: + json.dump(data, outfile) + outfile.close() ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +import platform +import json +import shutil +import logging +from pathlib import Path + +def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None): + self.selectorfile += 'import pandas as pd' + self.selectorfile += '\\n' + self.selectorfile += 'import joblib' + self.selectorfile += '\\n' + self.selectorfile += 'import os' + self.selectorfile += '\\n' + self.selectorfile += 'import numpy as np' + self.selectorfile += '\\n' + self.selectorfile += 'class selector(object):' + self.selectorfile += '\\n' + self.selectorfile += ' def apply_selector(self,df):' + self.selectorfile += '\\n' + if pcaModel_pickle_file != '': + self.selectorfile += "" pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','""+pcaModel_pickle_file+""'))"" + self.selectorfile += '\\n' + self.selectorfile += ' bpca_features = '+str(bpca_features) + self.selectorfile += '\\n' + self.selectorfile += ' apca_features = '+str(apca_features) + self.selectorfile += '\\n' + self.selectorfile += ' df = pcaModel.transform(df[bpca_features])' + self.selectorfile += '\\n' + self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)' + self.selector" +"file += '\\n' + if(len(features) != 0) and model_type != 'BM25': + if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder': + self.selectorfile += ' df = df['+str(features)+']' + self.selectorfile += '\\n' + self.selectorfile += ' return(df)' + filename = os.path.join(deploy_path,'script','selector.py') + f = open(filename, ""wb"") + self.log.info('-------> Feature Selector File Location :'+filename) + f.write(str(self.selectorfile).encode('utf8')) + f.close() + featurefile = 'import json' + featurefile +='\\n' + featurefile += 'def getfeatures():' + featurefile +='\\n' + featurefile +=' try:' + featurefile +='\\n' + featurelist = [] + if 'profiler' in config: + if 'input_features_type' in config['profiler']: + inputfeatures = config['profiler']['input_features_type'] + for x in inputfeatures: + featurelt={} + featurelt['feature'] = x + print(x,inputfeatures[x]) + if x == targetFeature: + featurelt['Type'] = 'Target' + else: + if inputfeatures[x] in ['int','int64','float','float64']: + featurelt['Type'] = 'Numeric' + elif inputfeatures[x] == 'object': + featurelt['Type'] = 'Text' + elif inputfeatures[x] == 'category': + featurelt['Type'] = 'Category' + else: + featurelt['Type'] = 'Unknown' + featurelist.append(featurelt) + + featurefile +=' features = '+str(featurelist) + featurefile +='\\n' + featurefile +=' outputjson = {""status"":""SUCCESS"",""features"":features}' + featurefile +='\\n' + featurefile +=' output = json.dumps(outputjson)' + featurefile +='\\n' + featurefile +=' print(""Features:"",output)' + featurefile +='\\n' + featurefile +=' return(output)' + featurefile +='\\n' + featurefile +=' except Exception as e:' + featurefile +='\\n' + featurefile +=' output = {""status"":""FAIL"",""message"":str(e).strip(\\'""\\')}' + featurefile +='\\n' + featurefile +=' print(""Features:"",json.dumps(output))' + featurefile +='\\n' + featurefile +=' return (json.dumps(output))' + featurefile +='\\n' + featurefile +='if __name__ == ""__main__"":' + featurefile +='\\n' + featurefile +=' output = getfeatures()' + filename = os.path.join(deploy_path,'featureslist.py') + f = open(filename, ""wb"") + f.write(str(featurefile).encode('utf8')) + f.close() + +def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig): + self.modelfile += ' def __init__(self):' + self.modelfile += '\\n' + if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()==""autoencoder""): + modelfile=modelfile.replace('.sav','') + self.modelfile+="" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif(learner_type == 'TextDL' or learner_type == 'DL'): + if modelName.lower() == 'googlemodelsearch': + self.modelfile += ' import autokeras as ak' + self.modelfile += '\\n' + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))"" + self.modelfile += '\\n' + else: + if scoreParam == 'recall': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'recall': recall_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[recall_m])' + self.modelfile += '\\n' + elif scoreParam == 'precision': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'precision': precision_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[precision_m])' + self.modelfile += '\\n' + elif scoreParam == 'roc_auc': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[tf.keras.metrics.AUC()])' + self.modelfile += '\\n' + elif scoreParam == 'f1_score': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'f1_score': f1_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[f1_m])' + self.modelfile += '\\n' + elif scoreParam == 'r2': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'r2': r_square},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[r_square])' + self.modelfile += '\\n' + elif scoreParam == 'rmse': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects={'rmse': rmse_m},compile=False)"" + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[rmse_m])' + self.modelfile += '\\n' + elif scoreParam == 'mse': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif scoreParam == 'mae': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + + elif scoreParam == 'accuracy': + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + else: + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif(learner_type == 'Text Similarity'): + self.modelfile += "" self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','""+preprocessing_pipe+""'))"" + self.modelfile += '\\n' + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})"" + self.modelfile += '\\n' + elif(learner_type in ['similarityIdentification','contextualSearch']): + if scoreParam == 'VectorDB Cosine': + vectorfiledbname = 'trainingdataVecDB' + self.modelfile += f""\\ + \\n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\\ + \\n client = chromadb.PersistentClient(path=persist_directory)\\ + \\n self.collection_name = '{vectorfiledbname}'\\ + \\n self.collection = client.get_collection(self.collection_name)\\n"" + + else: + self.modelfile += "" self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\\n\\n"" + elif(learner_type == 'ImageClassification'): + self.modelfile += ' self.config='+str(imageconfig) + self.modelfile += '\\n' + if(modelName.lower() == 'densenet'): + self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))' + else: + self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights=""imagenet"", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))' + self.modelfile += '\\n' + self.modelfile += ' headModel = baseModel.output' + self.modelfile += '\\n' + self.modelfile += ' headModel = Flatten(name=""flatten"")(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = Dense(1024, activation=\\'relu\\')(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = Dropout(0.5)(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = Dense(2, activation=\\'sigmoid\\')(headModel)' + self.modelfile += '\\n' + self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)' + self.modelfile += '\\n' + self.modelfile += ' opt = Adam(lr=self.config[\\'lr\\'])' + self.modelfile += '\\n' + self.modelfile += ' self.model.compile(loss=""binary_crossentropy"", optimizer=opt, metrics=[""accuracy""])' + self.modelfile += '\\n' + self.modelfile += "" self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif(learner_type == 'objectDetection'): + self.modelfile += "" self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\\n"" + self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+""/export/pipeline.config""\\n' + self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+""/export/checkpoint/""\\n' + self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+""/export/label_map.pbtxt""\\n' + self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\\n' + self.modelfile += ' self.detection_model = model_builder.build(model_config=configs[""model""], is_training=False)\\n' + self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\\n' + self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, ""ckpt-0"")).expect_partial()\\n' + self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\\ + use_display_name=True)\\n' + elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'): + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + elif modelName.lower() == 'neural architecture search': + self.modelfile += ' import autokeras as ak' + self.modelfile += '\\n' + self.modelfile += "" self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'),custom_objects=ak.CUSTOM_OBJECTS)"" + self.modelfile += '\\n' + else: + self.modelfile += "" self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','""+modelfile+""'))"" + self.modelfile += '\\n' + +def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None): + self.modelfile += ' def predict(self,X,features_names):' + self.modelfile += '\\n' + + if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()==""autoencoder""): + + self.modelfile += f"" X=X[{feature}]\\n"" + self.modelfile += f"" X = np.asarray(X).astype('float32')\\n"" + self.modelfile += f"" reconstructed = self.model.predict(X)\\n"" + self.modelfile += f"" predict_" +"loss = tf.keras.losses.mae(reconstructed,X)\\n"" + self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\\n' + self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\\n' + self.modelfile += ' prediction_df = pd.DataFrame()\\n' + self.modelfile += ' prediction_df[""loss""] = predict_loss\\n' + self.modelfile += ' prediction_df[""max_threshold""] = max_threshold\\n' + self.modelfile += ' prediction_df[""min_threshold""] = min_threshold\\n' + self.modelfile += ' prediction_df[""anomaly""] = np.where((prediction_df[""loss""] > prediction_df[""max_threshold""]) | (prediction_df[""loss""] <= prediction_df[""min_threshold""]), True, False)\\n' + self.modelfile += ' return prediction_df\\n' + + + elif(learner_type == 'RecommenderSystem'): + self.modelfile += ' predictions = []' + self.modelfile += '\\n' + self.modelfile += ' for index,row in X.iterrows():' + self.modelfile += '\\n' + self.modelfile += ' score = self.model.predict(int(row[""uid""]),int(row[""iid""]))' + self.modelfile += '\\n' + self.modelfile += ' predictions.append(score.est)' + self.modelfile += '\\n' + self.modelfile += ' return predictions' + elif(learner_type in ['similarityIdentification','contextualSearch']): + tfeatures = list(modelFeatures.split("","")) + if indexFeature != '' and indexFeature != 'NA': + ifeatures = indexFeature.split("","") + for ifes in ifeatures: + if ifes not in tfeatures: + tfeatures.append(ifes) + if model_type == 'BM25': + self.modelfile += f""\\n\\ + tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\\n\\ + bm25 = BM25Okapi(tokenized_corpus)\\n\\ + tokenized_query = [doc.split(' ') for doc in X.tokenize]\\n\\ + logcnt = 5\\n\\ + output = []\\n\\ + for query in tokenized_query:\\n\\ + doc_scores = bm25.get_scores(query)\\n\\ + related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\\n\\ + x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\n\\ + x['Score'] = doc_scores[related_docs_indices]\\n\\ + x['Score'] = round(x['Score'],2).astype(str)+'%'\\n\\ + output.append(x)\\n\\ + return output\\n"" + elif scoreParam == 'VectorDB Cosine': + featuresVecDB = modelFeatures.split("","") + self.modelfile += ' logcnt = 5\\n' + self.modelfile += f"" columns = {featuresVecDB}\\n"" + self.modelfile += f""\\ + \\n output = []\\ + \\n for rowindex, row in X.iterrows():\\ + \\n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\\ + \\n results = self.collection.query(\\ + \\n query_embeddings=queryembedding.tolist(),\\ + \\n n_results=logcnt\\ + \\n )\\ + \\n x = pd.DataFrame(columns=columns)\\ + \\n for i in range(0, len(results['ids'][0])):\\ + \\n documentAry = results['documents'][0][i]\\ + \\n documentAry = documentAry.split(' ~&~ ')\\ + \\n for j in range(0, len(documentAry)):\\ + \\n x.at[i,columns[j]] = documentAry[j]\\ + \\n x.at[i,'Score'] = results['distances'][0][i]\\ + \\n output.append(x)\\ + \\n return output"" + else: + self.modelfile += ' columns = self.train_input.columns.tolist()\\n' + self.modelfile += ' logcnt = 5\\n' + self.modelfile += f"" train_input = self.train_input[{tfeatures}]\\n"" + for tf in tfeatures: + self.modelfile += f"" columns.remove('{tf}')\\n"" + self.modelfile += f""\\ + \\n results = cosine_similarity(self.train_input[columns],X)\\ + \\n output = []\\ + \\n for i in range(results.shape[1]):\\ + \\n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\\ + \\n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\ + \\n scores = []\\ + \\n for j in range(0,logcnt):\\ + \\n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\\ + \\n x['Score'] = scores\\ + \\n output.append(x)\\ + \\n return output"" + elif(learner_type == 'Text Similarity'): + self.modelfile += ' X[""'+firstDocFeature+'""] = X[""'+firstDocFeature+'""].astype(str)' + self.modelfile += '\\n' + self.modelfile += ' X[""'+secondDocFeature+'""] = X[""'+secondDocFeature+'""].astype(str)' + self.modelfile += '\\n' + self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X[""'+firstDocFeature+'""].values)' + self.modelfile += '\\n' + self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X[""'+secondDocFeature+'""].values)' + self.modelfile += '\\n' + self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\\'post\\')' + self.modelfile += '\\n' + self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\\'post\\')' + self.modelfile += '\\n' + self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])' + self.modelfile += '\\n' + self.modelfile += ' return(prediction)' + self.modelfile += '\\n' + elif(learner_type == 'ImageClassification'): + self.modelfile += ' predictions = []' + self.modelfile += '\\n' + self.modelfile += ' for index, row in X.iterrows(): ' + self.modelfile += '\\n' + self.modelfile += ' img = cv2.imread(row[\\'imagepath\\'])' + self.modelfile += '\\n' + self.modelfile += ' img = cv2.resize(img, (self.config[\\'img_width\\'],self.config[\\'img_height\\']))' + self.modelfile += '\\n' + self.modelfile += ' img = image.img_to_array(img)' + self.modelfile += '\\n' + self.modelfile += ' img = np.expand_dims(img, axis=0)' + self.modelfile += '\\n' + self.modelfile += ' img = img/255' + self.modelfile += '\\n' + self.modelfile += ' prediction = self.model.predict(img)' + self.modelfile += '\\n' + self.modelfile += ' prediction = np.argmax(prediction,axis=1)' + self.modelfile += '\\n' + self.modelfile += ' predictions.append(prediction[0])' + self.modelfile += '\\n' + self.modelfile += ' return(predictions)' + self.modelfile += '\\n' + elif(learner_type == 'objectDetection'): + self.modelfile += ' @tf.function\\n' + self.modelfile += ' def detect_fn(image):\\n' + self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\\n' + self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\\n' + self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\\n' + self.modelfile += ' return detections\\n' + self.modelfile += ' def load_image_into_numpy_array(path):\\n' + self.modelfile += ' return np.array(Image.open(path))\\n' + self.modelfile += ' imageLocation = []\\n' + self.modelfile += ' for i, row in X.iterrows():\\n' + self.modelfile += ' if (""confidance"" in row) and row[""confidance""] <= 1.0:\\n' + self.modelfile += ' confidance = row[""confidance""]\\n' + self.modelfile += ' else:\\n' + self.modelfile += ' confidance = 0.8\\n' + self.modelfile += ' imageName = str(Path(row[""imagepath""]).stem)+""_output""+str(Path(row[""imagepath""]).suffix)\\n' + self.modelfile += ' image_np = load_image_into_numpy_array(row[""imagepath""])\\n' + self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\\n' + self.modelfile += ' detections = detect_fn(input_tensor)\\n' + self.modelfile += ' num_detections = int(detections.pop(""num_detections""))\\n' + self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\\n\\ + for key, value in detections.items()}\\n' + self.modelfile += ' detections[""num_detections""] = num_detections\\n' + self.modelfile += ' detections[""detection_classes""] = detections[""detection_classes""].astype(np.int64)\\n' + self.modelfile += ' label_id_offset = 1\\n' + self.modelfile += ' image_np_with_detections = image_np.copy()\\n' + self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\\n\\ + image_np_with_detections,\\n\\ + detections[""detection_boxes""],\\n\\ + detections[""detection_classes""]+label_id_offset,\\n\\ + detections[""detection_scores""],\\n\\ + self.category_index,\\n\\ + use_normalized_coordinates=True,\\n\\ + max_boxes_to_draw=200,\\n\\ + min_score_thresh=confidance,\\n\\ + agnostic_mode=False)\\n' + + self.modelfile += ' plt.figure()\\n' + self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\\n' + self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\\n' + self.modelfile += ' plt.show()\\n' + self.modelfile += ' return imageLocation\\n' + else: + if(learner_type == 'DL' and model != 'Neural Network'): + self.modelfile += ' X = np.expand_dims(X, axis=2)' + self.modelfile += '\\n' + if(learner_type == 'TextDL'): + self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' + self.modelfile += '\\n' + elif(learner_type == 'TextML'): + self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)' + self.modelfile += '\\n' + elif(learner_type == 'DL' and model_type == 'Classification'): + self.modelfile += ' X = X.astype(np.float32)' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' + self.modelfile += '\\n' + else: + if(model_type == 'Classification' or model_type == 'TLClassification'): + if model == 'Neural Architecture Search': + self.modelfile += ' X = X.astype(np.float32)' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(self.model.predict(X))' + self.modelfile += '\\n' + else: + if optimizationmethod == 'genetic': + self.modelfile += '\\n' + self.modelfile += ' try:' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))' + self.modelfile += '\\n' + self.modelfile += ' except:' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(self.model.predict(X))' + else: + self.modelfile += ' X = X.astype(np.float32)' + self.modelfile += '\\n' + if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': + self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)' + self.modelfile += '\\n' + self.modelfile += ' return pd.DataFrame(q.numpy())' + else: + self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)' + self.modelfile += '\\n' + elif model_type == 'Regression' and model == 'NAS': + self.modelfile += \\ +"""""" + X = X.astype(np.float32) + return self.model.predict(X) +"""""" + elif(learner_type == 'TS'): + if model.lower()" +"== 'fbprophet': + self.modelfile += ' sessonal_freq=""'+str(sessonal_freq)+'""' + self.modelfile += '\\n' + self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=in" +"case_name'],self.params['usecase_ver'], self.name) + + def create_idrift(self): + pass + + def create_odrift(self): + pass + + def create_utils_folder(self): + common.create_util_folder(self.deploy_path) + + """""" +/** +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* © Copyright HCL Technologies Ltd. 2021, 2022 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +*/ +"""""" + +from importlib.metadata import version +import sys + + +class importModule(): + + def __init__(self): + self.importModule = {} + self.stdlibModule = [] + self.localModule = {} + + def addLocalModule(self,module, mod_from=None, mod_as=None): + if module == '*': + if module not in self.localModule.keys(): + self.localModule[module]= [mod_from] + else: + self.localModule[module].append(mod_from) + elif module not in self.localModule.keys(): + self.localModule[module] = {'from':mod_from, 'as':mod_as} + + def addModule(self, module, mod_from=None, mod_as=None): + if module not in self.importModule.keys(): + self.importModule[module] = {'from':mod_from, 'as':mod_as} + if module in sys.stdlib_module_names: + self.stdlibModule.append(module) + elif isinstance(self.importModule[module], list): + if mod_as not in [x['as'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as not in [x['from'] for x in self.importModule[module]]: + self.importModule[module].append({'from':mod_from, 'as':mod_as}) + elif mod_as != self.importModule[module]['as']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + elif mod_from != self.importModule[module]['from']: + as_list = [self.importModule[module]] + as_list.append({'from':mod_from, 'as':mod_as}) + self.importModule[module] = as_list + + def getModules(self): + return (self.importModule, self.stdlibModule) + + def getBaseModule(self, extra_importers=[]): + modules_alias = { 'sklearn':'scikit-learn', + 'genetic_selection':'sklearn-genetic', + 'google': 'google-cloud-storage', + 'azure':'azure-storage-file-datalake'} + local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} + modules = [] + require = """" + if extra_importers: + extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] + importers_module = [self.importModule] + extra_importers + for importer_module in importers_module: + for k,v in importer_module.items(): + if v['from']: + mod = v['from'].split('.')[0] + else: + mod = k + if mod in modules_alias.keys(): + mod = modules_alias[mod] + modules.append(mod) + modules = list(set(modules)) + for mod in modules: + try: + if mod in local_modules.keys(): + require += f""{local_modules[mod]}\\n"" + else: + require += f""{mod}=={version(mod)}\\n"" + except : + if mod not in sys.stdlib_module_names: + raise + return require + + def getCode(self): + def to_string(k, v): + mod = '' + if v['from']: + mod += 'from {} '.format(v['from']) + mod += 'import {}'.format(k) + if v['as']: + mod += ' as {} '.format(v['as']) + return mod + + modules = """" + local_modules = """" + std_lib_modules = """" + third_party_modules = """" + for k,v in self.importModule.items(): + if k in self.stdlibModule: + std_lib_modules = std_lib_modules + '\\n' + to_string(k, v) + elif isinstance(v, dict): + third_party_modules = third_party_modules + '\\n' + to_string(k, v) + elif isinstance(v, list): + for alias in v: + third_party_modules = third_party_modules + '\\n' + to_string(k, alias) + for k,v in self.localModule.items(): + if k != '*': + local_modules = local_modules + '\\n' + to_string(k, v) + else: + for mod_from in v: + local_modules = local_modules + '\\n' + f'from {mod_from} import {k}' + if std_lib_modules: + modules = modules + ""\\n#Standard Library modules"" + std_lib_modules + if third_party_modules: + modules = modules + ""\\n\\n#Third Party modules"" + third_party_modules + if local_modules: + modules = modules + ""\\n\\n#local modules"" + local_modules + '\\n' + return modules + + def copyCode(self, importer): + self.importModule, self.stdlibModule = importer.getModules() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os,sys +import platform +import json +import shutil +import logging +from pathlib import Path +from prediction_package import production +from prediction_package import prediction_transformation as cs + +class DeploymentManager: + def __init__(self): + self.requirementfile='' + self.modelfile='' + self.s2i_environmentfile='' + self.selectorfile='' + self.profilerfile='' + self.readmepackagename='' + self.pythonpackage='' + self.log = logging.getLogger('eion') + + def include_import_file(self,learner_type,method,scoreParam,model_type,model): + if((learner_type == 'DL') or (learner_type == 'TextDL')): + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras import backend as K' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'): + self.modelfile += 'import joblib' + self.modelfile += '\\n' + self.modelfile += 'import os' + self.modelfile += '\\n' + self.modelfile += 'import pandas as pd' + self.modelfile += '\\n' + self.modelfile += 'import numpy as np' + self.modelfile += '\\n' + self.modelfile += 'from pathlib import Path' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + self.modelfile += 'from keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'import warnings' + self.modelfile += '\\n' + self.modelfile += 'from sklearn.preprocessing import StandardScaler' + self.modelfile += '\\n' + self.modelfile += 'warnings.filterwarnings(""ignore"")' + self.modelfile += '\\n' + if(learner_type == 'ImageClassification'): + self.modelfile += 'import os' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.models import Sequential' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.preprocessing import image' + self.modelfile += '\\n' + self.modelfile += 'import numpy as np' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.layers import Input' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.models import Model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.optimizers import Adam' + self.modelfile += '\\n' + self.modelfile += 'import cv2' + self.modelfile += '\\n' + if(learner_type == 'objectDetection'): + self.modelfile += 'import os\\n' + self.modelfile += 'from object_detection.utils import label_map_util\\n' + self.modelfile += 'from object_detection.utils import config_util\\n' + self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\\n' + self.modelfile += 'from object_detection.builders import model_builder\\n' + self.modelfile += 'import tensorflow as tf\\n' + self.modelfile += 'import numpy as np\\n' + self.modelfile += 'from PIL import Image\\n' + self.modelfile += 'import matplotlib.pyplot as plt\\n' + self.modelfile += 'import pandas as pd\\n' + self.modelfile += 'from pathlib import Path\\n' + if(learner_type == 'Text Similarity'): + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras import backend as K' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + if(model == 'Neural Architecture Search'): + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.modelfile += 'from tensorflow.keras import backend as K' + self.modelfile += '\\n' + self.modelfile += 'import tensorflow as tf' + self.modelfile += '\\n' + self.modelfile += 'import joblib' + self.modelfile += '\\n' + self.modelfile += 'import os' + self.modelfile += '\\n' + self.modelfile += 'import pandas as pd' + self.modelfile += '\\n' + self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\\n' + self.modelfile += 'import numpy as np\\n' + self.modelfile += 'from pathlib import Path\\n' + if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': + self.modelfile += 'from tensorflow import constant' + self.modelfile += '\\n' + self.modelfile += 'from tf_agents.trajectories import time_step' + self.modelfile += '\\n' + self.requirementfile += 'tensorflow==2.5.0' + if model.lower() == 'lstm' or model.lower() == 'mlp': + self.modelfile += 'from tensorflow.keras.models import load_model' + self.modelfile += '\\n' + self.requirementfile += 'tensorflow==2.5.0' + if(learner_type == 'Text Similarity'): + self.modelfile += 'def cosine_distance(vests):' + self.modelfile += '\\n'; + self.modelfile += ' x, y = vests' + self.modelfile += '\\n'; + self.modelfile += ' x = K.l2_normalize(x, axis=-1)' + self.modelfile += '\\n'; + self.modelfile += ' y = K.l2_normalize(y, axis=-1)' + self.modelfile += '\\n'; + self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)' + self.modelfile += '\\n'; + self.modelfile += 'def cos_dist_output_shape(shapes):' + self.modelfile += '\\n'; + self.modelfile += ' shape1, shape2 = shapes' + self.modelfile += '\\n'; + self.modelfile += ' return (shape1[0],1)' + self.modelfile += '\\n'; + + if(learner_type == 'TextDL' or learner_type == 'DL'): + if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'): + self.modelfile += 'def recall_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())' + self.modelfile += '\\n'; + self.modelfile += ' return recall' + self.modelfile += '\\n'; + if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'): + self.modelfile += 'def precision_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))' + self.modelfile += '\\n'; + self.modelfile += ' precision = true_positives / (predicted_" +"positives + K.epsilon())' + self.modelfile += '\\n'; + self.modelfile += ' return precision' + self.modelfile += '\\n'; + if(scoreParam.lower() == 'f1_score'): + self.modelfile += 'def f1_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' precision = precision_m(y_true, y_pred)' + self.modelfile += '\\n'; + self.modelfile += ' recall = recall_m(y_true, y_pred)' + self.modelfile += '\\n'; + self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))' + self.modelfile += '\\n'; + if(scoreParam.lower() == 'rmse'): + self.modelfile += 'def rmse_m(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))' + self.modelfile += '\\n'; + if(scoreParam.lower() =='r2'): + self.modelfile += 'def r_square(y_true, y_pred):' + self.modelfile += '\\n'; + self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))' + self.modelfile += '\\n'; + self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))' + self.modelfile += '\\n'; + self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))' + self.modelfile += '\\n'; + if(learner_type.lower() in ['similarityidentification','contextualsearch']): + self.modelfile += 'from pathlib import Path\\n' + if model_type == 'BM25': + self.modelfile += 'from rank_bm25 import BM25Okapi\\n' + elif scoreParam == 'VectorDB Cosine': + self.modelfile += 'import chromadb\\n' + else: + self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\\n' + + self.pythonpackage += '========== Python Packags Requires =========' + self.pythonpackage += '\\n' + self.pythonpackage += 'scikit-learn' + self.pythonpackage += '\\n' + self.pythonpackage += 'scipy' + self.pythonpackage += '\\n' + self.pythonpackage += 'numpy' + self.pythonpackage += '\\n' + if((learner_type == 'DL') or (learner_type =='TextDL')): + self.modelfile += 'import numpy as np' + self.modelfile += '\\n' + self.requirementfile += 'scikit-learn==0.21.3' + self.requirementfile += '\\n' + self.requirementfile += 'scipy==1.3.3' + self.requirementfile += '\\n' + self.requirementfile += 'numpy==1.17.4' + self.requirementfile += '\\n' + + if(learner_type == 'TextML'): + self.requirementfile += 'spacy==2.2.3' + self.requirementfile += '\\n' + self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz' + self.requirementfile += '\\n' + + if(learner_type == 'DL' or learner_type == 'TextDL'): + self.requirementfile += 'keras==2.3.1' + self.requirementfile += '\\n' + self.requirementfile += 'tensorflow==2.0.0b1' + self.requirementfile += '\\n' + + if(learner_type == 'RecommenderSystem'): + self.requirementfile += 'surprise' + self.requirementfile += '\\n' + if(method == 'package'): + self.modelfile += 'import surprise' + self.modelfile += '\\n' + self.modelfile += 'import statsmodels' + self.modelfile += '\\n' + self.requirementfile += 'statsmodels==0.10.2' + self.requirementfile += '\\n' + + def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False): + self.readme='========== Files Structures ==========' + self.readme+='\\n' + self.readme+=modelfile+' ------ Trained Model' + self.readme+='\\n' + self.readme+='aion_prediction.py --> Python package entry point' + self.readme+='\\n' + if not single_file: + self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric' + self.readme+='\\n' + self.readme+='script/selector.py --> Feature Selection' + self.readme+='\\n' + self.readme+='script/trained_model.py --> Read the model file and call the prediction' + self.readme+='\\n' + self.readme+='script/output_format.py --> Output formatter file' + self.readme+='\\n' + self.readme+= self.pythonpackage + self.readme+= '========== How to call the model ==========' + self.readme+='\\n' + self.readme+= '============== From Windows Terminal ==========' + self.readme+='\\n' + if method == 'optimus_package': + self.readme += 'python aion_prediction.py filename.json' + self.readme +='\\n' + self.readme += '========== Embedded Methods ==========' + self.readme +='\\n' + self.readme += 'Function Name: predict_from_json - When input is Json Data' + self.readme +='\\n' + self.readme += 'Function Name: predict_from_file - When input is Json File' + self.readme +='\\n' + else: + callpython = 'python aion_prediction.py ""[{' + for x in features: + if(callpython != 'python prediction.py ""[{'): + callpython += ',' + callpython += '\\\\\\""'+str(x)+'\\\\\\""'+':'+'\\\\\\""'+str(x)+'_value'+'\\\\\\""' + callpython += '}]""' + self.readme += callpython + self.readme+='\\n' + self.readme+= '============== From Linux Terminal ==========' + self.readme+='\\n' + callpython = 'python aion_prediction.py \\'[{' + temp =callpython + for x in features: + if(callpython != temp): + callpython += ',' + callpython += '""'+str(x)+'""'+':'+'""'+str(x)+'_value'+'""' + callpython += '}]\\'' + self.readme += callpython + self.readme+='\\n' + self.readme+= '============== Output ==========' + self.readme+='\\n' + self.readme+= '{""status"":""SUCCESS"",""data"":[{""Data1"":""Value"",""prediction"":""Value""}]}' ## For Single Row/Record' + self.readme+='\\n' + self.readme+= '{""status"":""SUCCESS"",""data"":[{""Data1"":""Value"",""prediction"":""Value""},{""Data1"":""Value"",""prediction"":""Value""}]} ## For Multiple Row/Record' + self.readme+='\\n' + self.readme+= '{""status"":""ERROR"",""message"":""description""} ## In Case Exception or Error' + self.readme+='\\n' + #print(self.readme) + filename = os.path.join(deploy_path,'readme.txt') + self.log.info('-------> Readme File Location: '+filename) + f = open(filename, ""wb"") + f.write(str(self.readme).encode('utf8')) + f.close() + def create_class(self,classname): + #self.modelfile += 'class '+classname+'(object):' + self.modelfile += 'class trained_model(object):' + self.modelfile += '\\n' + + def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''): + profiler = deploy.get('profiler',{}) + if isinstance(features, str): + features = features.split(',') + code = f"""""" +import scipy +import joblib +import numpy as np +import pandas as pd +from pathlib import Path +"""""" + if text_feature: + code += """""" +import importlib.util\\n"""""" + + if wordToNumericFeatures: + code += """""" +from word2number import w2n + +def s2n(value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan +"""""" + if 'code' in deploy.get('preprocess',{}).keys(): + code += deploy['preprocess']['code'] + + if profiler.get('conversion_method','').lower() == 'glove': + code += """""" +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + from text.Embedding import load_pretrained + from text import TextProcessing + model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') + embed_size, loaded_model = load_pretrained(model_path) + self.model.set_params(text_process__vectorizer__external_model = loaded_model) + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + elif profiler.get('conversion_method','').lower() == 'fasttext': + code += """""" +def get_pretrained_model_path(): + try: + from AION.appbe.dataPath import DATA_DIR + modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' + except: + modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' + + if not modelsPath.exists(): + modelsPath.mkdir(parents=True, exist_ok=True) + return modelsPath + +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + if not importlib.util.find_spec('fasttext'): + raise ValueError('fastText not installed') + else: + import os + import fasttext + import fasttext.util + cwd = os.getcwd() + os.chdir(get_pretrained_model_path()) + fasttext.util.download_model('en', if_exists='ignore') + loaded_model = fasttext.load_model('cc.en.300.bin') + os.chdir(cwd) + self.model.set_params(text_process__vectorizer__external_model = loaded_model) + self.model.set_params(text_process__vectorizer__external_model_type = 'binary') + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + else: + code += """""" +class inputprofiler(object): + + def __init__(self): + self.model = None + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if preprocess_path.exists(): + self.model = joblib.load(preprocess_path) + else: + raise ValueError('Preprocess model not found') + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) +"""""" + if 'code' in deploy.get('preprocess',{}).keys(): + code += "" df = preprocess( df)\\n"" + if wordToNumericFeatures: + code += f"""""" + df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))"""""" + if profiler.get('unpreprocessed_columns'): + code += f"""""" + unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}'] + df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True) + """""" + if profiler.get('force_numeric_conv'): + code += f"""""" + df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce') + """""" + code += f"""""" + if self.model: + df = self.model.transform(df)"""""" + code += f"""""" + columns = {output_columns} + if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns=columns) + else: + df = pd.DataFrame(df, columns=columns) + """""" + ##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + pass + else: + if profiler.get('unpreprocessed_columns'): + code += f"""""" + df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data + """""" + if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': + ##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now. + # code += f"""""" + # df.set_index('{datetimeFeature}', inplace=True)"""""" + code += f"""""" + return(df,'{datetimeFeature}')\\n"""""" + else: + code += f"""""" + return(df)"""""" + return code + + + def no_profiling_code(self, features): + if isinstance(features, str): + features = features.split(',') + return f"""""" +import pandas as pd +import numpy as np + +class inputprofiler(object): + + def apply_profiler(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + return df[{features}] + """""" + + def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None" +",datetimeFeature=''): + filename = str(Path(deploy_path)/'script'/'inputprofiler.py') + if 'profiler' in config: + if model_type == 'BM25': + code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features']) + elif model == 'KaplanMeierFitter': + code = self.no_profiling_code(features) + elif model.lower() in ['arima', 'fbprophet']: #task 12627 + code = self.no_profiling_code('noofforecasts') + else: + code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature) + if code: + with open(filename,'w',encoding=""utf-8"") as f: + f.write(code) + self.log.info('-------> Profiler File Location :'+filename) + return + self.profilerfile += 'import pandas as pd' + self.profilerfile += '\\n' + self.profilerfile += 'import joblib' + self.profilerfile += '\\n' + self.profilerfile += 'import os' + self.profilerfile += '\\n' + self.profilerfile += 'from word2number import w2n' + self.profilerfile += '\\n' + + self.profilerfile += 'import numpy as np' + self.profilerfile += '\\nfrom pathlib import Path\\n' + #print(""1"") + #print(profiler) + if(learner_type == 'Text Similarity' or len(text_features) > 0): + self.profilerfile += 'from text import TextProcessing' + self.profilerfile += '\\n' + self.profilerfile += 'def textCleaning(textCorpus):' + self.profilerfile += '\\n' + self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()' + self.profilerfile += '\\n' + self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)' + self.profilerfile += '\\n' + self.profilerfile += ' return(textCorpus)' + self.profilerfile += '\\n' + + self.profilerfile += 'class inputprofiler(object):' + self.profilerfile += '\\n' + self.profilerfile += ' def s2n(self,value):' + self.profilerfile += '\\n' + self.profilerfile += ' try:' + self.profilerfile += '\\n' + self.profilerfile += ' x=eval(value)' + self.profilerfile += '\\n' + self.profilerfile += ' return x' + self.profilerfile += '\\n' + self.profilerfile += ' except:' + self.profilerfile += '\\n' + self.profilerfile += ' try:' + self.profilerfile += '\\n' + self.profilerfile += ' return w2n.word_to_num(value)' + self.profilerfile += '\\n' + self.profilerfile += ' except:' + self.profilerfile += '\\n' + self.profilerfile += ' return np.nan ' + self.profilerfile += '\\n' + self.profilerfile += ' def apply_profiler(self,df):' + self.profilerfile += '\\n' + if(len(wordToNumericFeatures) > 0): + for w2nFeature in wordToNumericFeatures: + if w2nFeature not in features: + continue + self.profilerfile += "" df['""+w2nFeature+""']=df['""+w2nFeature+""'].apply(lambda x: self.s2n(x))"" + self.profilerfile += '\\n' + self.profilerfile += "" df = df.replace(r'^\\s*$', np.NaN, regex=True)"" + self.profilerfile += '\\n' + self.profilerfile += ' try:' + self.profilerfile += '\\n' + self.profilerfile += ' df.dropna(how=""all"",axis=1,inplace=True)' + self.profilerfile += '\\n' + self.profilerfile += ' except:' + self.profilerfile += '\\n' + self.profilerfile += ' df.fillna(0)' + self.profilerfile += '\\n' + + if model_type.lower() != 'timeseriesforecasting': #task 11997 + self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/""model""/""preprocess_pipe.pkl""\\n' + self.profilerfile += ' if preprocess_path.exists():\\n' + self.profilerfile += ' model = joblib.load(preprocess_path)\\n' + if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder': + self.profilerfile += f"" df[{features}] = model.transform(df[{features}])\\n"" + else: + self.profilerfile += f"" df = model.transform(df)\\n"" + if 'operation' in profiler: + y = profiler['operation'] + for action in y: + feature = action['feature'] + #if feature not in features: + # continue + operation = action['Action'] + if(operation == 'Drop'): + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + self.profilerfile += "" df.drop(columns=['""+feature+""'],inplace = True)"" + self.profilerfile += '\\n' + if(operation == 'FillValue'): + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + fvalue = action['value'] + self.profilerfile += "" df['""+feature+""'] = df['""+feature+""'].fillna(value='""+fvalue+""')"" + self.profilerfile += '\\n' + if(operation == 'Encoder'): + value = action['value'] + value = value.replace(""\\n"", ""\\\\n"") + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + self.profilerfile += "" le_dict=""+str(value) + self.profilerfile += '\\n' + self.profilerfile += "" df['""+feature+""'] = df['""+feature+""'].apply(lambda x: le_dict.get(x,-1))"" + self.profilerfile += '\\n' + self.profilerfile += "" if -1 in df['""+feature+""'].values:"" + self.profilerfile += '\\n' + self.profilerfile += "" raise Exception('Category value of ""+feature+"" not present in training data')"" + self.profilerfile += '\\n' + if 'conversion' in profiler: + catergoryConverton = profiler['conversion'] + #print(catergoryConverton) + if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton): + self.profilerfile += "" encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))"" + self.profilerfile += '\\n' + self.profilerfile += "" CategoryFeatures = ""+str(catergoryConverton['features']) + self.profilerfile += '\\n' + if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding': + self.profilerfile += "" transformed_data = encoder.transform(df[CategoryFeatures]).toarray()"" + self.profilerfile += '\\n' + self.profilerfile += "" feature_labels = encoder.get_feature_names(CategoryFeatures)"" + self.profilerfile += '\\n' + self.profilerfile += "" transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) "" + self.profilerfile += '\\n' + else: + self.profilerfile += "" transformed_data = encoder.transform(df[CategoryFeatures])"" + self.profilerfile += '\\n' + self.profilerfile += "" dataColumns=list(df.columns)"" + self.profilerfile += '\\n' + self.profilerfile += "" nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))"" + self.profilerfile += '\\n' + self.profilerfile += "" dataArray=df[nonNormFeatures]"" + self.profilerfile += '\\n' + self.profilerfile += "" df = pd.concat([dataArray, transformed_data],axis=1)"" + self.profilerfile += '\\n' + y = json.loads(numericToLabel_json) + + for feature_details in y: + feature = feature_details['feature'] + if feature not in features: + continue + label = feature_details['Labels'] + bins = feature_details['Bins'] + self.profilerfile += "" if '""+feature+""' in df.columns:"" + self.profilerfile += '\\n' + self.profilerfile += "" cut_bins=""+str(bins) + self.profilerfile += '\\n' + self.profilerfile += "" cut_labels=""+str(label) + self.profilerfile += '\\n' + self.profilerfile += "" df['""+feature+""'] = pd.cut(df['""+feature+""'],bins=cut_bins,labels=cut_labels)"" + self.profilerfile += '\\n' + self.profilerfile += "" df['""+feature+""'] = df['""+feature+""'].fillna(value=0)"" + self.profilerfile += '\\n' + + if(len(text_features) > 0): + if(len(text_features) > 1): + self.profilerfile += ' merge_features = '+str(text_features) + self.profilerfile += '\\n' + self.profilerfile += ' df[\\'combined\\'] = df[merge_features].apply(lambda row: \\' \\'.join(row.values.astype(str)), axis=1)' + self.profilerfile += '\\n' + self.profilerfile += ' features = [\\'combined\\']' + self.profilerfile += '\\n' + else: + self.profilerfile += "" features = ""+str(text_features) + self.profilerfile += '\\n' + if model_type == 'BM25': + self.profilerfile += """"""\\ + df_text = df[features[0]] + pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) + df['tokenize'] = pipe.transform(df_text)\\n"""""".format(preprocessing_pipe=preprocessing_pipe) + elif conversion_method == 'sentenceTransformer': + self.profilerfile += """"""\\ + df_text = df[features[0]] + from sentence_transformers import SentenceTransformer + model = SentenceTransformer(\\'sentence-transformers/msmarco-distilroberta-base-v2\\') + df_vect = model.encode(df_text) + for empCol in {text_features}: + df = df.drop(columns=[empCol]) + if isinstance(df_vect, np.ndarray): + df1 = pd.DataFrame(df_vect) + else: + df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names()) + df1 = df1.add_suffix(\\'_vect\\') + df = pd.concat([df, df1],axis=1)\\n"""""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) + else: + self.profilerfile += """"""\\ + df_text = df[features[0]] + pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) + df_vect=pipe.transform(df_text) + for empCol in {text_features}: + df = df.drop(columns=[empCol]) + if isinstance(df_vect, np.ndarray): + df1 = pd.DataFrame(df_vect) + else: + df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names()) + df1 = df1.add_suffix(\\'_vect\\') + df = pd.concat([df, df1],axis=1)\\n"""""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) + + if(learner_type == 'Text Similarity'): + self.profilerfile += ' df[\\''+firstDocFeature+'\\'] = textCleaning(df[\\''+firstDocFeature+'\\'])' + self.profilerfile += '\\n' + self.profilerfile += ' df[\\''+secondDocFeature+'\\'] = textCleaning(df[\\''+secondDocFeature+'\\'])' + self.profilerfile += '\\n' + if len(normFeatures) > 0 and normalizer != '': + self.profilerfile += "" normFeatures = ""+str(normFeatures) + self.profilerfile += '\\n' + self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\\'..\\',\\'model\\',\\''+normalizer+'\\'))' + self.profilerfile += '\\n' + self.profilerfile += ' dataColumns=list(df.columns)' + self.profilerfile += '\\n' + self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))' + self.profilerfile += '\\n' + self.profilerfile += ' dataframe=df[normFeatures]' + self.profilerfile += '\\n' + self.profilerfile += ' transDf = normalizepipe.transform(dataframe)' + self.profilerfile += '\\n' + self.profilerfile += ' nontransDF=df[nonNormFeatures].values' + self.profilerfile += '\\n' + self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures' + self.profilerfile += '\\n' + self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)' + self.profilerfile += '\\n' + self.profilerfile += ' df=scaledDf' + self.profilerfile += '\\n' + else: + self.profilerfile += ' df=df.dropna()\\n' + self.profilerfile += ' return(df)' + filename = os.path.join(deploy_path,'script','inputprofiler.py') + self.log.info('------" +"-> Profiler File Location :'+filename) + f = open(filename, ""w"",encoding=""utf-8"") + f.write(str(self.profilerfile)) + f.close() + + def isEnglish(self, s): + try: + s.encode(encoding='utf-8').decode('ascii') + exce" +"== 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': #task 11997 + predictionObj.create_drift_file(deploy_path,features,targetFeature,model_type) + if model_type.lower() =" +".params['features']['text_feat']: + obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) + else: + obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) + + def create_odrift(self): + obj = aionPrediction() + if self.params['features']['text_feat']: + obj.create_regression_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat']) + else: + obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) + + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + code = f"""""" +class trainer(): +"""""" + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') +"""""" + run_code = f"""""" + def run(self, df):\\ +"""""" + if self.params['training']['algo'] in ['Neural Architecture Search']: + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + self.importer.addModule(module='autokeras',mod_as='ak') + init_code += f"""""" + self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) +"""""" + run_code += """""" + df = df.astype(np.float32) + return self.model.predict(df).reshape(1, -1) +"""""" + elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: + self.importer.addModule(module='numpy',mod_as='np') + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code += f"""""" + self.model = load_model(model_file) +"""""" + run_code += """""" + df = np.expand_dims(df, axis=2) + df = df.astype(np.float32) + return self.model.predict(df).reshape(1, -1) +"""""" + else: + self.importer.addModule('joblib') + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + df = df.astype(np.float32) + return self.model.predict(df).reshape(1, -1) + """""" + return code + init_code + run_code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" +class output_format(): + + def __init__(self): + pass + + def run(self, raw_df, output): + raw_df['prediction'] = output[0] + raw_df['prediction'] = raw_df['prediction'].round(2) + outputjson = raw_df.to_json(orient='records',double_precision=5) + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + """""" + +class clustering( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.feature_reducer = False + if not self.name: + self.name = 'clustering' + + def training_code( self): + self.importer.addModule('joblib') + self.importer.addModule(module='pandas',mod_as='pd') + code = f"""""" +class trainer(): +"""""" + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') +"""""" + run_code = f"""""" + def run(self, df):\\ +"""""" + if self.params['training']['algo'] == 'DBSCAN': + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + return self.model.fit_predict(df) +"""""" + else: + init_code += f"""""" + self.model = joblib.load(model_file) +"""""" + run_code += """""" + return self.model.predict(df).reshape(1, -1) + """""" + return code + init_code + run_code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" +class output_format(): + + def __init__(self): + pass + + def run(self, raw_df, output): + raw_df['prediction'] = output[0] + raw_df['prediction'] = raw_df['prediction'].round(2) + outputjson = raw_df.to_json(orient='records',double_precision=2) + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + """""" + return code + +if __name__ == '__main__': + config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin" +"_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expect" +"ing_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', '" +"lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_" +"vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_ve" +"ct', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_ve" +"ct', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', '" +"ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_ve" +"ct', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect'," +"'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_" +"vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}} + deployer = get_deployer('classification',params=config) + deployer.run( ) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import shutil +import subprocess +from os.path import expanduser +import platform +deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target') +modelname='AION_12' +version='1' +def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata): + modelPath = os.path.join(deploymentfolder) + filename = os.path.join(deploymentfolder,'docker_image') + modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py') + shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh') + aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl') + drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl') + sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages') + model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version) + docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version) + model_sitepackage = os.path.join(model_dockerSetup,'site-packages') + model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py') + model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh') + model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl') + model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl') + + try: + os.mkdir(model_dockerSetup) + except Exception as e: + print(""Error in creating Setup directpry ""+str(e)) + pass + shutil.copytree(modelPath, docker_setup) + if textdata: + shutil.copytree(sitepackage, model_sitepackage) + modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing') + ''' + try: + os.makedirs(modelpretrainpath, exist_ok=True) + except Exception as e: + print(""Error in creating Setup directpry ""+str(e)) + pass + ''' + home = expanduser(""~"") + if platform.system() == 'Windows': + hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing') + else: + hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing') + shutil.copytree(hostpretrainpath, modelpretrainpath) + + shutil.copyfile(modelservice, model_dockerSetupservicefile) + shutil.copyfile(shellscript, model_dockershellscript) + shutil.copyfile(aix, model_aix) + shutil.copyfile(drift,model_drift) + try: + os.mkdir(filename) + except: + pass + requirementfilename = os.path.join(model_dockerSetup,'requirements.txt') + installfilename = os.path.join(model_dockerSetup,'install.py') + dockerfile = os.path.join(model_dockerSetup,'Dockerfile') + dockerdata='FROM python:3.8-slim-buster' + dockerdata+='\\n' + if textdata: + dockerdata+='WORKDIR /root' + dockerdata+='\\n' + dockerdata+='COPY HCLT HCLT' + dockerdata+='\\n' + dockerdata+='WORKDIR /app' + dockerdata+='\\n' + dockerdata+='COPY requirements.txt requirements.txt' + dockerdata+='\\n' + dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version + dockerdata+='\\n' + if textdata: + dockerdata+='COPY site-packages site-packages' + dockerdata+='\\n' + " +"dockerdata+='COPY install.py install.py' + dockerdata+='\\n' + dockerdata+='COPY run_modelService.py run_modelService.py' + dockerdata+='\\n' + dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl' + dockerdata+='\\n' + dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl' + dockerdata+='\\n' + dockerdata+='COPY start_modelservice.sh start_modelservice.sh' + dockerdata+='\\n' + if textdata: + dockerdata+='''RUN apt-get update \\ + && apt-get install -y build-essential manpages-dev \\ + && python -m pip install --no-cache-dir --upgrade pip \\ + && python -m pip install --no-cache-dir pandas==1.2.4 \\ + && python -m pip install --no-cache-dir numpy==1.19.5 \\ + && python -m pip install --no-cache-dir joblib==1.0.1 \\ + && python -m pip install --no-cache-dir Cython==0.29.23 \\ + && mv site-packages/* /usr/local/lib/python3.8/site-packages \\ + && python -m pip install --no-cache-dir scipy==1.6.3 \\ + && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir scikit-learn==0.24.2 \\ + && python -m pip install --no-cache-dir spacy==2.2.3 \\ + && python -m pip install --no-cache-dir nltk==3.6.2 \\ + && python -m pip install --no-cache-dir textblob==0.15.3 \\ + && python -m pip install --no-cache-dir gensim==3.8.3 \\ + && python -m pip install --no-cache-dir demoji==1.1.0 \\ + && python -m pip install --no-cache-dir lxml==4.6.3 \\ + && python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \\ + && python -m pip install --no-cache-dir Unidecode==1.2.0 \\ + && python -m pip install --no-cache-dir pyspellchecker==0.6.2 \\ + && python -m pip install --no-cache-dir pycontractions==2.0.1 \\ + && python -m pip install --no-cache-dir tensorflow==2.4.1 \\ + && python -m pip install --no-cache-dir nltk==3.6.2 \\ + && python -m pip install --no-cache-dir -r requirements.txt \\ + && python install.py \\ + && chmod +x start_modelservice.sh +ENTRYPOINT [""./start_modelservice.sh""] +''' + else: + dockerdata+='''RUN apt-get update \\ + && apt-get install -y build-essential manpages-dev \\ + && python -m pip install --no-cache-dir --upgrade pip \\ + && python -m pip install --no-cache-dir pandas==1.2.4 \\ + && python -m pip install --no-cache-dir numpy==1.19.5 \\ + && python -m pip install --no-cache-dir joblib==1.0.1 \\ + && python -m pip install --no-cache-dir Cython==0.29.23 \\ + && python -m pip install --no-cache-dir scipy==1.6.3 \\ + && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\ + && python -m pip install --no-cache-dir scikit-learn==0.24.2 \\ + && python -m pip install --no-cache-dir -r requirements.txt \\ + && chmod +x start_modelservice.sh +ENTRYPOINT [""./start_modelservice.sh""] +''' + f = open(dockerfile, ""w"") + f.write(str(dockerdata)) + f.close() + requirementdata='' + requirementdata+='word2number==1.1' + if learner_type == 'DL': + requirementdata+='\\n' + requirementdata+='tensorflow==2.5.0' + f = open(requirementfilename, ""w"") + f.write(str(requirementdata)) + f.close() + if textdata: + installfile=''' +import nltk +import ssl +try: + _create_unverified_https_context = ssl._create_unverified_context +except AttributeError: + pass +else: + ssl._create_default_https_context = _create_unverified_https_context +nltk.download('punkt') +nltk.download('wordnet') +nltk.download('stopwords') +nltk.download('averaged_perceptron_tagger')''' + f = open(installfilename, ""w"") + f.write(str(installfile)) + f.close() + try: + command = 'docker pull python:3.8-slim-buster' + os.system(command); + #subprocess.check_call([""chmod"", ""+x"", ""start_modelservice.sh""], cwd=model_dockerSetup) + subprocess.check_call([""docker"", ""build"", ""-t"",modelname.lower()+"":""+version,"".""], cwd=model_dockerSetup) + subprocess.check_call([""docker"", ""save"", ""-o"",modelname.lower()+""_""+version+"".tar"",modelname.lower()+"":""+version], cwd=model_dockerSetup) + dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+""_""+version+"".tar"") + shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+""_""+version+"".tar"")) + shutil.rmtree(model_dockerSetup) + return 'Success','SUCCESSFULLY' + except Exception as e: + print(""Error: ""+str(e)) + shutil.rmtree(model_dockerSetup) + return 'Error',str(e) + +#createDockerImage(deploymentfolder,modelname,version) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import subprocess +import os +import glob +import sys +import python_minifier +def encrypt_files(path): + cwd = os.getcwd() + secure_path = os.path.join(path,'SecuredScripts') + try: + if not os.path.exists(secure_path): + os.mkdir(secure_path) + files = [f for f in glob.glob(path + ""/*.py"")] + for file in files: + #encrypted_file_details[0] = file + #file = files[0] + #print(file) + #filename_w_dir = os.path.splitext(file) + filename_w_ext = os.path.basename(file) + filename, file_extension = os.path.splitext(filename_w_ext) + file_folder_path = os.path.join(secure_path,filename) + #print(file_folder_path) + + if not os.path.exists(file_folder_path): + os.mkdir(file_folder_path) + + # Minify python source code + minify_file = os.path.join(file_folder_path,filename+'_minify.py') + pythonfolder,_ = os.path.split(sys.executable) + pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe') + minify_command = ""\\""""+sys.executable+""\\"" \\""""+pyminify_script+ ""\\"" \\"""" + file + ""\\"" > \\"""" + minify_file+""\\"""" + subprocess.call(minify_command, shell=True) + # Change directory to folder path + os.chdir(file_folder_path) + + # Obfuscate minified file + pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe') + obfusc_commmand = ""\\""""+sys.executable+""\\"" \\""""+pyarmor_script+""\\"" obfuscate \\"""" + minify_file+""\\"""" + #print(obfusc_commmand) + subprocess.call(obfusc_commmand, shell=True) + + # Change directory to dist path + obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py') + #print(obfusc_file) + chdirpath = os.path.join(file_folder_path,'dist') + os.chdir(chdirpath) + + # Compress obfuscated file + compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py') + #print(compressed_file) + pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe') + compress_command = ""\\""""+sys.executable+""\\"" \\""""+pyminifier_script+""\\"" --gzip -o \\"""" +compressed_file + ""\\"" \\"""" + obfusc_file+""\\"""" + #print(compress_command) + subprocess.call(compress_command, shell=True) + + #compile_command = sys.executable+'-m py_compile ""' + compressed_file+'""' + #print(compile_command) + #subprocess.call(compile_command , shell=True) + #encrypted_file_details['compiled_file'] = file + #compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc') + #encrypted_file_details[1] = compiled_file + #encrypted_file_list.append(encrypted_file_details) + #encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc' + #encrypt_command = ""python "" + cwd + ""\\\\Encrypt_Key_Dcrypt.py "" + compiled_file + ' ' + encrypted_file + "" --g -e"" + #print(encrypt_command) + #subprocess.call(encrypt_command, shell=True) + #encrypted_file_list += ']' + #return(encrypted_file_list) + os.chdir(path) + except OSError as err: + print (""Creation of the directory %s failed ""+str(err)) + + +# Driver function +if __name__==""__main__"": + path = sys.argv[1] + encrypt_files(path) + + +#(base) C:\\Himanshu\\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py +#Obfuscate +#(base) C:\\Himanshu\\DataPreprocessing>pyarmor obfuscate C:\\Himanshu\\DataPreprocessing\\DataPreprocessing_minify.py +#Compression +#(base) C:\\Himanshu\\DataPreprocessing>pyminifier --gzip -o C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_compressed.py C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_minify.py +#(base) C:\\Himanshu\\DataPreprocessing>cd dist +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.py ""DocumentText"" ""Label"" 90 "".csv"" ""C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"" +#Compiling compressed .py to .pyc file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python -m py_compile DataPreprocessing_compressed.py +#Encrypt .pyc file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py C:\\Himanshu\\DataPreprocessing\\dist\\__pycache__\\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e +#Decrypt file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d +#Run decrypted file +#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc ""DocumentText"" ""Label"" 90 "".csv"" ""C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"" ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import platform +import sys +import subprocess +import glob +import shutil +import time +from aion_deployment.EncryptPythonSourceCode import encrypt_files +import json +def encrypt(alldirs): + for dir in alldirs: + try: + encrypt_files(dir) + except Exception as error_obj: + print(""Exception in encrypting"", error_obj) + print(""-""*50) +def replace_by_compressed(alldirs): + for dir in alldirs: + try: + #print(""Processing dir"", dir) + files = [f for f in glob.glob(dir + ""/*.py"")] + secure_path = os.path.join(dir, 'SecuredScripts') + time.sleep(6) + for file in files: + try: + filename_w_ext = os.path.basename(file) + filename, file_extension = os.path.splitext(filename_w_ext) + if filename == ""__init__"": + continue + #print(""Processing file"", file) + file_folder_path = os.path.join(secure_path, filename, 'dist') + compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py') + shutil.copy(compressed_file_path, dir) + os.remove(file) + new_compressed_file_path = os.path.join(dir, filename+'_compressed.py') + target_file_path = os.path.join(dir, filename_w_ext) + os.rename(new_compressed_file_path, target_file_path) + if filename == 'aion_prediction': + shutil.copytree(os.path.join(file_folder_path" +", 'pytransform'), os.path.join(dir, 'pytransform')) + except Exception as error_obj: + print(""Exception in file "", error_obj) + shutil.rmtree(secure_path) + except Exception as error_obj: + print(""Exception in dir "", error_obj) + +def start_Obfuscate(path): + project_path = path + subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] + alldirs = [ + project_path, + ] + for subdir in subdirs: + if(subdir != 'pytransform'): + alldirs.append(os.path.join(project_path, subdir)) + encrypt(alldirs) + replace_by_compressed(alldirs) + +if __name__==""__main__"": + project_path = sys.argv[1] + print(""project_path"", project_path) + subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] + alldirs = [ + project_path, + ] + for subdir in subdirs: + alldirs.append(os.path.join(project_path, subdir)) + encrypt(alldirs) + print(""*""*50) + replace_by_compressed(alldirs) + + +# python eion_compress.py ""C:\\Users\\ashwani.s\\Desktop\\22April\\22April\\Mohita"" ""C:\\Users\\ashwani.s\\Desktop\\eion\\eion"" > logfile.log + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import numpy as np +import scipy +import warnings +import scipy.stats as st +import logging +import json +class inputdrift(): + def __init__(self,conf): + self.log = logging.getLogger('eion') + + def get_input_drift(self,ndf,hdf,outputfolder): + selectedColumns = self.features.split(',') + dataalertcount=0 + distributionChangeColumns="""" + distributionChangeMessage=[] + for i in range(0,len(selectedColumns)): + data1=hdf[selectedColumns[i]] + data2=ndf[selectedColumns[i]] + if(data1.dtype !=""str"" and data2.dtype !=""str"" ): + cumulativeData=data1.append(data2) + teststaticValue=teststatic(self,data1,data2) + if (teststaticValue < 0.05): + distributionName1,sse1=DistributionFinder(self,data1) + distributionName2,sse2=DistributionFinder(self,data2) + if(distributionName1 == distributionName2): + dataalertcount = dataalertcount + else: + dataalertcount = dataalertcount+1 + distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+"","" + changedColumn = {} + changedColumn['Feature'] = selectedColumns[i] + changedColumn['KS_Training'] = teststaticValue + changedColumn['Training_Distribution'] = distributionName1 + changedColumn['New_Distribution'] = distributionName2 + distributionChangeMessage.append(changedColumn) + + else : + dataalertcount = dataalertcount + + else : + response =""Selected Columns should be Numerical Values"" + + if(dataalertcount == 0): + resultStatus=""Model is working as expected"" + else : + resultStatus=json.dumps(distributionChangeMessage) + + return(dataalertcount,resultStatus) + +def DistributionFinder(self,data): + try: + distributionName ="""" + sse =0.0 + KStestStatic=0.0 + dataType="""" + if(data.dtype == ""float64""): + dataType =""Continuous"" + elif(data.dtype ==""int""): + dataType=""Discrete"" + elif(data.dtype ==""int64""): + dataType=""Discrete"" + if(dataType == ""Discrete""): + distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson] + index, counts = np.unique(data.astype(int),return_counts=True) + + if(len(index)>=2): + best_sse = np.inf + y1=[] + total=sum(counts) + mean=float(sum(index*counts))/total + variance=float((sum(index**2*counts) -total*mean**2))/(total-1) + dispersion=mean/float(variance) + theta=1/float(dispersion) + r=mean*(float(theta)/1-theta) + + for j in counts: + y1.append(float(j)/total) + + pmf1=st.bernoulli.pmf(index,mean) + pmf2=st.binom.pmf(index,len(index),p=mean/len(index)) + pmf3=st.geom.pmf(index,1/float(1+mean)) + pmf4=st.nbinom.pmf(index,mean,r) + pmf5=st.poisson.pmf(index,mean) + + sse1 = np.sum(np.power(y1 - pmf1, 2.0)) + sse2 = np.sum(np.power(y1 - pmf2, 2.0)) + sse3 = np.sum(np.power(y1 - pmf3, 2.0)) + sse4 = np.sum(np.power(y1 - pmf4, 2.0)) + sse5 = np.sum(np.power(y1- pmf5, 2.0)) + + sselist=[sse1,sse2,sse3,sse4,sse5] + + for i in range(0,len(sselist)): + if best_sse > sselist[i] > 0: + best_distribution = distributions[i].name + best_sse = sselist[i] + + elif (len(index) == 1): + best_distribution = ""Constant Data-No Distribution"" + best_sse = 0.0 + + distributionName =best_distribution + sse=best_sse + + elif(dataType == ""Continuous""): + + distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] + + best_distribution = st.norm.name + best_sse = np.inf + datamin=data.min() + datamax=data.max() + nrange=datamax-datamin + + y, x = np.histogram(data.astype(float), bins='auto', density=True) + x = (x + np.roll(x, -1))[:-1] / 2.0 + + for distribution in distributions: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') + + params = distribution.fit(data.astype(float)) + # Separate parts of parameters + arg = params[:-2] + loc = params[-2] + scale = params[-1] + + # Calculate fitted PDF and error with fit in distribution + pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) + sse = np.sum(np.power(y - pdf, 2.0)) + if(best_sse >sse > 0): + best_distribution = distribution.name + best_sse = sse + + distributionName =best_distribution + sse=best_sse + except: + response = str(sys.exc_info()[0]) + message='Job has Failed'+response + print(message) + return distributionName,sse + +##KStestStatic -pvalue finding +def teststatic(self,data1,data2): + try: + teststatic =st.ks_2samp(data1,data2) + pValue=0.0 + scipyVersion =scipy.__version__ + if(scipyVersion <= ""0.14.1""): + pValue =teststatic[1] + else: + pValue =teststatic.pvalue + except: + response = str(sys.exc_info()[0]) + print(""Input Drift Job Failed ""+response) + return pValue + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from pathlib import Path +from AION.prediction_package.imports import importModule +from AION.prediction_package.aion_prediction import aionPrediction +from AION.prediction_package.utility import TAB_CHAR +from AION.prediction_package import utility +from AION.prediction_package.base import deployer +from AION.prediction_package import common +import numpy as np + + + + +def get_deployer( params): + + if params['training']['algo'] == 'ARIMA': + return arima(params) + elif params['training']['algo'] == 'LSTM': + return lstm(params) + elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO': + return lstmencdec_mviuvo(params) + elif params['training']['algo'] == 'MLP': + return mlp(params) + elif params['training']['algo'] == 'VAR': + return var(params) + elif params['training']['algo'] == 'FBPROPHET': + return fbprophet(params) + else: + raise ValueError(f""Algorithm {params['training']['algo']} for time series forecasting is not supported"") + +def _profiler_code(params, importer): + """""" + This will create the profiler file based on the config file. + separated file is created as profiler is required for input drift also. + """""" + imported_modules = [ + {'module': 'json', 'mod_from': None, 'mod_as': None}, + {'module': 'scipy', 'mod_from': None, 'mod_as': None}, + {'module': 'joblib', 'mod_from': None, 'mod_as': None}, + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} + ] + utility.import_modules(importer, imported_modules) + if 'code' in params['profiler'].get('preprocess',{}).keys(): + code = params['profiler']['preprocess']['code'] + else: + code = """" + code += """""" + +class inputprofiler(): + """""" + init_code = """""" + def __init__(self): + """""" + init_code += """""" + # preprocessing + preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' + if not preprocess_path.exists(): + raise ValueError(f'Preprocess model file not found: {preprocess_path}') + self.profiler = joblib.load(preprocess_path) + """""" + run_code = """""" + def run(self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + """""" + if 'code' in params['profiler'].get('preprocess',{}).keys(): + run_code += """""" + df = preprocess( df)"""""" + if params['profiler'].get('unpreprocessed_columns'): + run_code += f"""""" + unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}'] + df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True) + """""" + if params['profiler'].get('force_numeric_conv'): + run_code += f"""""" + df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""""" + run_code += _profiler_main_code(params) + if params['profiler'].get('unpreprocessed_columns'): + run_code += f"""""" + df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data +"""""" + run_code += """""" return df +"""""" + utility.import_modules(importer, imported_modules) + import_code = importer.getCode() + return import_code + code + init_code + run_code + +def _profiler_main_code(params): + code = f"""""" + df = self.profiler.transform(df) + columns = {params['profiler']['output_features']} + if isinstance(df, scipy.sparse.spmatrix): + df = pd.DataFrame(df.toarray(), columns=columns) + else: + df = pd.DataFrame(df, columns=columns) + """""" + return code + +class arima( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def profiler_code( self" +"): + imported_modules = [ + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + ] + importer = importModule() + utility.import_modules(importer, imported_modules) + code = """""" + + +class inputprofiler(): + + def __init__(self): + pass + + def run( self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + return df[['noofforecasts']] +"""""" + return importer.getCode() + code + + def feature_engg_code(self): + self.importer.addModule(module='pandas',mod_as='pd') + return f"""""" +class selector(): + + def __init__(self): + pass + + def run(self, df): + return df +"""""" + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + self.importer.addModule(module='Path',mod_from='pathlib') + self.importer.addModule(module='numpy',mod_as='np') + self.importer.addModule(module='joblib') + return f"""""" +class trainer(): + + def __init__(self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = joblib.load(model_file) + + def run(self,df): + return self.model.predict(n_periods=int(df[""noofforecasts""][0])) + """""" + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" + +class output_format(): + + def __init__( self): + pass + + def run(self,raw_df,df): + df = df.round(2) + df = json.dumps(df.tolist()) + outputjson = {""status"":""SUCCESS"",""data"":eval(df)} + return(json.dumps(outputjson)) +"""""" + +class lstm( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def profiler_code(self): + importer = importModule() + return _profiler_code( self.params, importer) + + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + self.importer.addModule(module='Path',mod_from='pathlib') + code = f"""""" +class trainer(): +"""""" + init_code, run_code = self._get_train_code() + return code + init_code + run_code + + def _get_train_code(self): + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = load_model(model_file) + """""" + + run_code = f"""""" + def run(self, df): + lag_order={self.params['training']['lag_order']} + xt = df.values + scaler_file = (Path(__file__).parent/""model"")/""{self.params['training']['scaler_file']}"" + if not scaler_file.exists(): + raise ValueError(f'Scaling file not found: {{scaler_file}}') + loaded_scaler_model = joblib.load(scaler_file) + xt = xt.astype('float32') + xt = loaded_scaler_model.transform(xt) + noOfPredictions = 10 + pred_data = xt + y_future = [] + for i in range(noOfPredictions): +"""""" + if len(self.params['selector']['output_features']) == 1: + run_code += f"""""" + pred_data = pred_data[-lag_order:] + pred_data = pred_data.reshape((1,lag_order,1)) + pred = self.model.predict(pred_data) + predoutput = loaded_scaler_model.inverse_transform(pred) + y_future.append(predoutput.flatten()[-1]) + pred_data = np.append(pred_data,pred) + pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) + for i in range(0, len(y_future)): + pred.iloc[i] = y_future[i] + return pred +"""""" + else: + run_code += f"""""" + pdata = pred_data[-lag_order:] + pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) + pred = self.model.predict(pdata) + predoutput = loaded_scaler_model.inverse_transform(pred) + y_future.append(predoutput) + pred_data = np.append(pred_data,pred,axis=0) + pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) + for i in range(0, len(y_future)): + pred.iloc[i] = y_future[i] + return pred +"""""" + return init_code, run_code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" + +class output_format(): + + def __init__( self): + pass + + def run(self,raw_df,df): + df = df.round(2) + df = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(df)} + return(json.dumps(outputjson)) +"""""" + +class lstmencdec_mviuvo( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + outputFeatrues = params['profiler']['output_features'] + self.targetColIndx = outputFeatrues.index(params['features']['target_feat']) + selectedColDict = params['selector']['output_features'] + self.selectedCols = list() + for col in selectedColDict: + self.selectedCols.append(col) + + def profiler_code(self): + importer = importModule() + return _profiler_code( self.params, importer) + + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + self.importer.addModule(module='Path',mod_from='pathlib') + code = f"""""" +class trainer(): +"""""" + init_code, run_code = self._get_train_code() + return code + init_code + run_code + + def _get_train_code(self): + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = load_model(model_file) + """""" + + run_code = f"""""" + def run(self, df): + targetColIndx = {self.targetColIndx} + lag_order={self.params['training']['lag_order']} + xt = df.values + scaler_file = (Path(__file__).parent/""model"")/""{self.params['training']['scaler_file']}"" + if not scaler_file.exists(): + raise ValueError(f'Scaling file not found: {{scaler_file}}') + loaded_scaler_model = joblib.load(scaler_file) + xt = xt.astype('float32') + xt = loaded_scaler_model.transform(xt) + noOfPredictions = 10 + pred_data = xt + y_future = [] + pdata = pred_data[-lag_order:] + pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) + pred = self.model.predict(pdata) + pred_1d = pred.ravel() + pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, {len(self.params['selector']['output_features'])}) + pdata_2d[:,targetColIndx] = pred_1d + pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d) + predout = pred_2d_inv[:, targetColIndx] + predout = predout.reshape(len(pred_1d),1) + pred = pd.DataFrame(index=range(0,len(predout)),columns=['{self.params['features']['target_feat']}']) + for i in range(0, len(predout)): + pred.iloc[i] = predout[i] + return pred +"""""" + return init_code, run_code + + def feature_engg_code(self): + self.importer.addModule(module='pandas',mod_as='pd') + + return f"""""" +class selector(): + + def __init__(self): + pass + + def run(self, df): + return df[{self.selectedCols}] +"""""" + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" + +class output_format(): + + def __init__( self): + pass + + def run(self,raw_df,df): + df = df.round(2) + df = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(df)} + return(json.dumps(outputjson)) +"""""" + +class mlp( lstm): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + self.importer.addModule(module='Path',mod_from='pathlib') + code = f"""""" +class trainer(): +"""""" + init_code, run_code = self._get_train_code() + return code + init_code + run_code + + def _get_train_code(self): + self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') + init_code = f"""""" + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = load_model(model_file)"""""" + + run_code = f"""""" + def run(self, df): + lag_order={self.params['training']['lag_order']} + xt = df.values + scaler_file = (Path(__file__).parent/""model"")/""{self.params['training']['scaler_file']}"" + if not scaler_file.exists(): + raise ValueError(f'Scaling file not found: {{scaler_file}}') + loaded_scaler_model = joblib.load(scaler_file) + xt = xt.astype('float32') + xt = loaded_scaler_model.transform(xt) + noOfPredictions = 10 + pred_data = xt + y_future = [] + for i in range(noOfPredictions): +"""""" + if len(self.params['selector']['output_features']) == 1: + run_code += f"""""" + pred_data = pred_data[-lag_order:] + pred_data = pred_data.reshape((1,lag_order)) + pred = self.model.predict(pred_data) + predoutput = loaded_scaler_model.inverse_transform(pred) + y_future.append(predoutput.flatten()[-1]) + pred_data = np.append(pred_data,pred) + pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) + for i in range(0, len(y_future)): + pred.iloc[i] = y_future[i] + return pred +"""""" + else: + run_code += f"""""" + pdata = pred_data[-lag_order:] + pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) + pred = self.model.predict(pdata) + predoutput = loaded_scaler_model.inverse_transform(pred) + y_future.append(predoutput) + pred_data = np.append(pred_data,pred,axis=0) + pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) + for i in range(0, len(y_future)): + pred.iloc[i] = y_future[i] + return pred +"""""" + return init_code, run_code + +class var( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def profiler_code(self): + importer = importModule() + code = _profiler_code( self.params, importer) + return code + + def feature_engg_code(self): + self.importer.addModule(module='pandas',mod_as='pd') + return f"""""" +class selector(): + + def __init__(self): + pass + + def run(self, df): + return df[{self.params['selector']['output_features']}] +"""""" + def training_code( self): + self.importer.addModule(module='joblib') + return f"""""" +class trainer(): + + def __init__( self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = joblib.load(model_file) + + def run(self,df): + lag_order = self.model.k_ar + return self.model.forecast(df.values[-lag_order:],steps={self.params['training']['no_of_prediction']}) + """""" + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return f"""""" + +class output_format(): + + def __init__( self): + pass + + def invertTransformation(self,predictions): + datasetdf = pd.read_csv((" +"Path(__file__).parent/""data"")/""trainingdata.csv"") + dictDiffCount = {self.params['training']['dictDiffCount']} + target_features = ""{self.params['features']['target_feat']}"" + columns = target_features.split(',') + pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns) + for j in range(0,len(columns)): + for i in range(0, len(predictions)): + pred.iloc[i][j] = round(predictions[i][j],2) + prediction = pred + for col in columns: + if col in dictDiffCount: + if dictDiffCount[col]==2: + prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum() + prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum() + prediction = pred + return(prediction) + + def run(self,raw_df,df): + df = self.invertTransformation(df) + df = df.to_json(orient='records',double_precision=2) + outputjson = {{""status"":""SUCCESS"",""data"":json.loads(df)}} + return(json.dumps(outputjson)) +"""""" + +class fbprophet( deployer): + + def __init__(self, params={}): + super().__init__( params) + self.name = 'timeseriesforecasting' + + def profiler_code( self): + imported_modules = [ + {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, + {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, + ] + importer = importModule() + utility.import_modules(importer, imported_modules) + code = """""" + + +class inputprofiler(): + + def __init__(self): + pass + + def run( self,df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + return df[['noofforecasts']] +"""""" + return importer.getCode() + code + + def feature_engg_code(self): + self.importer.addModule(module='pandas',mod_as='pd') + return f"""""" +class selector(): + + def __init__(self): + pass + + def run(self, df): + return df +"""""" + def training_code( self): + self.importer.addModule(module='pandas',mod_as='pd') + self.importer.addModule(module='Path',mod_from='pathlib') + self.importer.addModule(module='joblib') + code = f"""""" +class trainer(): + + def __init__(self): + model_file = (Path(__file__).parent/""model"")/""{self.params['training']['model_file']}"" + if not model_file.exists(): + raise ValueError(f'Trained model file not found: {{model_file}}') + self.model = joblib.load(model_file) + +"""""" + code += f"""""" + def run(self,df): + sessonal_freq = '{self.params['training']['sessonal_freq']}' + ts_prophet_future = self.model.make_future_dataframe(periods=int(df[""noofforecasts""][0]),freq=sessonal_freq,include_history = False) + """""" + if (self.params['training']['additional_regressors']): + code += f"""""" + additional_regressors={self.params['training']['additional_regressors']} + ts_prophet_future[additional_regressors] = dataFrame[additional_regressors] + ts_prophet_future.reset_index(drop=True) + ts_prophet_future=ts_prophet_future.dropna() + """""" + code += """""" + train_forecast = self.model.predict(ts_prophet_future) + prophet_forecast_tail=train_forecast[[\\'ds\\', \\'yhat\\', \\'yhat_lower\\',\\'yhat_upper\\']].tail( int(df[""noofforecasts""][0])) + return(prophet_forecast_tail)"""""" + return code + + def formatter_code(self): + self.importer.addModule('json') + self.importer.addModule('pandas', mod_as='pd') + return """""" + +class output_format(): + + def __init__( self): + pass + + def run(self,raw_df,df): + df = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(df)} + return(json.dumps(outputjson)) + """""" + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +import pandas as pd +from pandas import json_normalize + +#from selector import selector +#from inputprofiler import inputprofiler +#from trained_model import trained_model +#from output_format import output_format + +from autogluon.tabular import TabularDataset, TabularPredictor +from autogluon.core.utils.utils import setup_outputdir +from autogluon.core.utils.loaders import load_pkl +from autogluon.core.utils.savers import save_pkl +import os.path + + + +class MultilabelPredictor(): + + """""" Tabular Predictor for predicting multiple columns in table. + Creates multiple TabularPredictor objects which you can also use individually. + You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)` + + Parameters + ---------- + labels : List[str] + The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object. + path : str + Path to directory where models and intermediate outputs should be saved. + If unspecified, a time-stamped folder called ""AutogluonModels/ag-[TIMESTAMP]"" will be created in the working directory to store all models. + Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all. + Otherwise files from first `fit()` will be overwritten by second `fit()`. + Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors. + problem_types : List[str] + The ith element is the `problem_type` for the ith TabularPredictor stored in this object. + eval_metrics : List[str] + The ith element is the `eval_metric` for the ith TabularPredictor stored in this object. + consider_labels_correlation : bool + Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others. + If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion). + Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels. + kwargs : + Arguments passed into the initialization of each TabularPredictor. + + """""" + + multi_predictor_file = 'multilabel_predictor.pkl' + + def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs): + if len(labels) < 2: + raise ValueError(""MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column)."") + self.path = setup_outputdir(path, warn_if_exist=False) + self.labels = labels + self.consider_labels_correlation = consider_labels_correlation + self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label + if eval_metrics is None: + self.eval_metrics = {} + else: + self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))} + problem_type = None + eval_metric = None + for i in range(len(labels)): + label = labels[i] + path_i = self.path + ""Predictor_"" + label + if problem_types is not None: + problem_type = problem_types[i] + if eval_metrics is not None: + eval_metric = self.eval_metrics[i] + self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs) + + def fit(self, train_data, tuning_data=None, **kwargs): + """""" Fits a separate TabularPredictor to predict each of the labels. + + Parameters + ---------- + train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame + See documentation for `TabularPredictor.fit()`. + kwargs : + Arguments passed into the `fit()` call for each TabularPredictor. + """""" + if isinstance(train_data, str): + train_data = TabularDataset(train_data) + if tuning_data is not None and isinstance(tuning_data, str): + tuning_data = TabularDataset(tuning_data) + train_data_og = train_data.copy() + if tuning_data is not None: + tuning_data_og = tuning_data.copy() + save_metrics = len(self.eval_metrics) == 0 + for i in range(len(self.labels)): + label = self.labels[i] + predictor = self.get_predictor(label) + if not self.consider_labels_correlation: + labels_to_drop = [l for l in self.labels if l!=label] + else: + labels_to_drop = [labels[j] for j in range(i+1,len(self.labels))] + train_data = train_data_og.drop(labels_to_drop, axis=1) + if tuning_data is not None: + tuning_data = tuning_data_og.drop(labels_to_drop, axis=1) + print(f""Fitting TabularPredictor for label: {label} ..."") + predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs) + self.predictors[label] = predictor.path + if save_metrics: + self.eval_metrics[label] = predictor.eval_metric + self.save() + + def predict(self, data, **kwargs): + """""" Returns DataFrame with label columns containing predictions for each label. + + Parameters + ---------- + data : str or autogluon.tabular.TabularDataset or pd.DataFrame + Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`. + kwargs : + Arguments passed into the predict() call for each TabularPredictor. + """""" + return self._predict(data, as_proba=False, **kwargs) + + def predict_proba(self, data, **kwargs): + """""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label. + + Parameters + ---------- + data : str or autogluon.tabular.TabularDataset or pd.DataFrame + Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`. + kwargs : + Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call). + """""" + return self._predict(data, as_proba=True, **kwargs) + + def evaluate(self, data, **kwargs): + """""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label. + + Parameters + ---------- + data : str or autogluon.tabular.TabularDataset or pd.DataFrame + Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`. + kwargs : + Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call). + """""" + data = self._get_data(data) + eval_dict = {} + for label in self.labels: + print(f""Evaluating TabularPredictor for label: {label} ..."") + predictor = self.get_predictor(label) + eval_dict[label] = predictor.evaluate(data, **kwargs) + if self.consider_labels_correlation: + data[label] = predictor.predict(data, **kwargs) + return eval_dict + + def save(self): + """""" Save MultilabelPredictor to disk. """""" + for label in self.labels: + if not isinstance(self.predictors[label], str): + self.predictors[label] = self.predictors[label].path + save_pkl.save(path=self.path+self.multi_predictor_file, object=self) + print(f""MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path}')"") + + @classmethod + def load(cls, path): + """""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """""" + path = os.path.expanduser(path) + if path[-1] != os.path.sep: + path = path + os.path.sep + return load_pkl.load(path=path+cls.multi_predictor_file) + + def get_predictor(self, label): + """""" Returns TabularPredictor which is used to predict this label. """""" + predictor = self.predictors[label] + if isinstance(predictor, str): + return TabularPredictor.load(path=predictor) + return predictor + + def _get_data(self, data): + if isinstance(data, str): + return TabularDataset(data) + return data.copy() + + def _predict(self, data, as_proba=False, **kwargs): + data = self._get_data(data) + if as_proba: + predproba_dict = {} + for label in self.labels: + print(f""Predicting with TabularPredictor for label: {label} ..."") + predictor = self.get_predictor(label) + if as_proba: + predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs) + data[label] = predictor.predict(data, **" +"kwargs) + if not as_proba: + return data[self.labels] + else: + return predproba_dict + + + +def predict(data): + try: + if os.path.splitext(data)[1] == "".tsv"": + df=pd.read_csv(data,encoding='utf-8',sep='\\t') + elif os.path.splitext(data)[1] == "".csv"": + df=pd.read_csv(data,encoding='utf-8') + else: + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + df = json_normalize(jsonData) + #df0 = df.copy() + + + #profilerobj = inputprofiler() + #df = profilerobj.apply_profiler(df) + #selectobj = selector() + #df = selectobj.apply_selector(df) + + + #modelobj = trained_model() + #output = modelobj.predict(df,"""") + + # Load the Test data for Prediction + # ----------------------------------------------------------------------------# + test_data = df#TabularDataset(data) #'testingDataset.csv' + #subsample_size = 2 + # ----------------------------------------------------------------------------# + + + # Specify the corresponding target features to be used + # ----------------------------------------------------------------------------# + #labels = ['education-num','education','class'] + configFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),'etc','predictionConfig.json') + with open(configFile, 'rb') as cfile: + data = json.load(cfile) + labels = data['targetFeature'] + # ----------------------------------------------------------------------------# + + for x in labels: + if x in list(test_data.columns): + test_data.drop(x,axis='columns', inplace=True) + # ----------------------------------------------------------------------------# + #test_data = test_data.sample(n=subsample_size, random_state=0) + #print(test_data) + #test_data_nolab = test_data.drop(columns=labels) + #test_data_nolab.head() + test_data_nolab = test_data + # ----------------------------------------------------------------------------# + + + # Load the trained model from where it's stored + # ----------------------------------------------------------------------------# + model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'ModelPath') + multi_predictor = MultilabelPredictor.load(model_path) + # ----------------------------------------------------------------------------# + + + # Start the prediction and perform the evaluation + # ----------------------------------------------------------------------------# + predictions = multi_predictor.predict(test_data_nolab) + + for label in labels: + df[label+'_predict'] = predictions[label] + + #evaluations = multi_predictor.evaluate(test_data) + #print(evaluations) + #print(""Evaluated using metrics:"", multi_predictor.eval_metrics) + # ----------------------------------------------------------------------------# + + + # ----------------------------------------------------------------------------# + #outputobj = output_format() + #output = outputobj.apply_output_format(df0,output) + outputjson = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + output = json.dumps(outputjson) + print(""predictions:"",output) + return(output) + # ----------------------------------------------------------------------------# + + except KeyError as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + + +if __name__ == ""__main__"": + output = predict(sys.argv[1]) + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import warnings +warnings.filterwarnings(""ignore"") +import json +import os +import sys +import pandas as pd +import numpy as np + +from pandas import json_normalize +from autogluon.text import TextPredictor +import os.path + + + +def predict(data): + try: + + if os.path.splitext(data)[1] == "".tsv"": + df=pd.read_csv(data,encoding='utf-8',sep='\\t') + elif os.path.splitext(data)[1] == "".csv"": + df=pd.read_csv(data,encoding='utf-8') + else: + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + df = json_normalize(jsonData) + + model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'text_prediction') + + predictor = TextPredictor.load(model_path) + predictions = predictor.predict(df) + df['predict'] = predictions + outputjson = df.to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + output = json.dumps(outputjson) + print(""predictions:"",output) + return(output) + + except KeyError as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + except Exception as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(""predictions:"",json.dumps(output)) + return (json.dumps(output)) + + +if __name__ == ""__main__"": + output = predict(sys.argv[1]) + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- +import logging +logging.getLogger('tensorflow').disabled = True +import json +import mlflow +import mlflow.sklearn +import mlflow.sagemaker as mfs +# from sklearn.ensemble import RandomForestRegressor +from sklearn.metrics import mean_squared_error +from sklearn.model_selection import train_test_split +# from sklearn import datasets +import time +import numpy as np +# Load dataset +# from sklearn.datasets import load_iris +import pickle +# Load the pickled model +# from matplotlib import pyplot +import sys +import os +import boto3 +import subprocess +import os.path +from os.path import expanduser +import platform +from pathlib import Path + + +class aionMlopsService: + def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName): + try: + self.model=model + self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy + self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly) + self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName) + self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri) + self.experiment_name=experiment_name + self.mlflow_modelname=mlflow_modelname + self.awsaccesskey_id=awsaccesskey_id + self.awssecretaccess_key=awssecretaccess_key + self.aws_session_token=aws_session_token + self.mlflow_container_name=mlflow_container_name + self.aws_region=aws_region + self.aws_id=aws_id + self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn + self.sm_app_name=sm_app_name + self.sm_deploy_option=sm_deploy_option + self.delete_ecr_repository=delete_ecr_repository + self.ecrRepositoryName=ecrRepositoryName + + from appbe.dataPath import LOG_LOCATION + sagemakerLogLocation = LOG_LOCATION + + try: + os.makedirs(sagemakerLogLocation) + except OSError as e: + if (os.path.exists(sagemakerLogLocation)): + pass + else: + raise OSError('sagemakerLogLocation error.') + + self.sagemakerLogLocation=str(sagemakerLogLocation) + + + filename_mlops = 'mlopslog_'+str(int(time.time())) + filename_mlops=filename_mlops+'.log' + # filename = 'mlopsLog_'+Time() + filepath = os.path.join(self.sagemakerLogLocation, filename_mlops) + logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') + # logging.basicConfig(filename=""uq_logging.log"", format='%(asctime)s %(message)s',filemode='w') + # logging.basicConfig(filename=""uq_logging.log"", format=' %(message)s',filemode='w') + # logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO) + self.log = logging.getLogger('aionMLOps') + self.log.setLevel(logging.DEBUG) + # mlflow.set_experiment(self.experiment_name) + + except Exception as e: + self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def mlflowSetPath(self,path): + track_dir=os.path.join(path,'mlruns') + uri=""file:""+str(Path(track_dir)) + return uri + + #Currently not used this delete ecr repository option + def ecr_repository_delete(self,rep_name): + # import subprocess + client = boto3.client('ecr') + repositories = client.describe_repositories() + ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True) + mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true']) + self.log.info('Success: deleted aws ecr repository which contains mlops image.') + + def check_sm_deploy_status(self,app_name): + sage_client = boto3.client('sagemaker', region_name=self.aws_region) + endpoint_description = sage_client.describe_endpoint(EndpointName=app_name) + endpoint_status = endpoint_description[""EndpointStatus""] + try: + failure_reason=endpoint_description[""FailureReason""] + self.log.info(""sagemaker end point creation failure reason is: ""+str(failure_reason)) + except: + pass + endpoint_status=str(endpoint_status) + return endpoint_status + + def invoke_sm_endpoint(self,app_name, input_json): + client = boto3.session.Session().client(""sagemaker-runtime"", self.aws_region) + + response = client.invoke_endpoint( + EndpointName=app_name, + Body=input_json, + ContentType='application/json; format=pandas-split', + ) + # preds = response['Body'].read().decode(""ascii"") + preds = response['Body'].read().decode(""ascii"") + preds = json.loads(preds) + # print(""preds: {}"".format(preds)) + return preds + + def predict_sm_app_endpoint(self,X_test): + #print(X_test) + import pandas as pd + prediction=None + AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) + AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) + AWS_SESSION_TOKEN=str(self.aws_session_token) + region = str(self.aws_region) + #Existing model deploy options + # mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) + # mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) + try: + import subprocess + cmd = 'aws configure set region_name '+region + os.system(cmd) + cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID + os.system(cmd) + cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY + os.system(cmd) + ''' + aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region]) + aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID]) + aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY]) + ''' + except: + pass + #Create a session for aws" +"communication using aws boto3 lib + # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) + session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + + #X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2) + # query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient=""split"") + try: + query_input = pd.DataFrame(X_test).to_json(orient=""split"") + #print(query_input) + prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input) + # self.log.info(""sagemaker end point Prediction: \\n""+str(prediction)) + + except Exception as e: + print(e) + return prediction + + + def deleteSagemakerApp(self,app_name,region): + # import mlflow.sagemaker as mfs + # region = 'ap-south-1' + # app_name = 'aion-demo-app' + mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) + # print(""AION mlops sagemaker application endpoint is deleted....\\n"") + self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name)) + + def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path): + + region = str(self.aws_region) + aws_id = str(self.aws_id) + iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn) + app_name = str(self.sm_app_name) + + model_uri = str(model_path) + app_status=False + mlflow_root_dir = None + try: + os.chdir(str(self.sagemakerLogLocation)) + mlflow_root_dir = os.getcwd() + self.log.info('mlflow root dir: '+str(mlflow_root_dir)) + except: + self.log.info(""path issue."") + + try: + c_status=self.check_sm_deploy_status(app_name) + #if ((c_status == ""Failed"") or (c_status == ""OutOfService"")): + if ((c_status == ""Failed"") or (c_status.lower() == ""failed"")): + app_status=False + self.log.info(""Sagemaker endpoint status: Failed.\\n"") + mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) + elif ((c_status.lower() == ""inservice"") or (c_status == ""InService"")): + app_status=True + self.log.info(""Sagemaker endpoint status: InService. Running sagemaker endpoint name: \\n""+str(app_name)) + else: + app_status=False + pass + except: + # print(""deploy status error.\\n"") + pass + + #aws ecr model app_name should contain only [[a-zA-Z0-9-]] + import re + if app_name: + pattern = re.compile(""[A-Za-z0-9-]+"") + # if found match (entire string matches pattern) + if pattern.fullmatch(app_name) is not None: + #print(""Found match: "") + pass + else: + app_name = 'aion-demo-app' + else: + app_name = 'aion-demo-app' + + mlflow_image=mlflow_container_name+':'+tag_id + image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image + deploy_option=""create"" + self.log.info('deploy_option: \\n'+str(deploy_option)) + if (deploy_option.lower() == ""create""): + # Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE + if not (app_status): + try: + mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode=""create"",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url) + self.log.info('sagemaker endpoint created and model deployed. Application name is: \\n'+str(app_name)) + except: + self.log.info('Creating end point application issue.Please check the connection and aws credentials \\n') + else: + self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\\n') + + + elif (deploy_option.lower() == ""delete""): + # import mlflow.sagemaker as mfs + # # region = 'ap-south-1' + # # app_name = 'aion-demo-app' + # mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) + # print(""Mlflow sagemaker application endpoint is deleted....\\n"") + # self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name)) + pass + elif (deploy_option.lower() == ""add""): + pass + elif (deploy_option.lower() == ""replace""): + pass + else: + pass + + return app_status + + def mlflow2sagemaker_deploy(self): + self.log.info(' ') + deploy_status=False + app_name = str(self.sm_app_name) + self.log.info('Sagemaker Application Name: '+str(app_name)) + + uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation) + mlflow.set_tracking_uri(uri_mlflow) + mlops_trackuri=mlflow.get_tracking_uri() + mlops_trackuri=str(mlops_trackuri) + self.log.info('mlops tracking uri: '+str(mlops_trackuri)) + localhost_deploy=False + try: + #Loading aion model to deploy in sagemaker + mlflow.set_experiment(self.experiment_name) + self.log.info('Endpoint Name: '+str(self.experiment_name)) + # Assume, the model already loaded from joblib in aionmlflow2smInterface.py file. + aionmodel2deploy=self.model + # run_id = None + # experiment_id=None + + + # Use the loaded pickled model to make predictions + # pred = knn_from_pickle.predict(X_test) + with mlflow.start_run(run_name='AIONMLOps') as run: + + # aionmodel2deploy.fit(X_train, y_train) + # predictions = aionmodel2deploy.predict(X_test) + mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname) + run_id = run.info.run_uuid + experiment_id = run.info.experiment_id + self.log.info('AION mlops experiment run_id: '+str(run_id)) + self.log.info('AION mlops experiment experiment_id: '+str(experiment_id)) + self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname)) + artifact_uri = {mlflow.get_artifact_uri()} + # print(""1.artifact_uri: \\n"",artifact_uri) + mlflow.end_run() + #If we need, we can check the mlflow experiments. + # try: + # mlflow_client = mlflow.tracking.MlflowClient('./mlruns') + # exp_list = mlflow_client.list_experiments() + # except: + # pass + #print(""mlflow exp_list: \\n"",exp_list) + mlflow_modelname=str(self.mlflow_modelname) + + mlops_trackuri=mlops_trackuri.replace('file:','') + mlops_trackuri=str(mlops_trackuri) + # mlflow_root_dir = os.getcwd() + mlflow_root_dir = None + try: + os.chdir(str(self.sagemakerLogLocation)) + mlflow_root_dir = os.getcwd() + self.log.info('mlflow root dir: '+str(mlflow_root_dir)) + except: + self.log.info(""path issue."") + model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname) + # model_path=mlops_trackuri+'\\\\%s\\\\%s\\\\artifacts\\\\%s' % (experiment_id, run_id,mlflow_modelname) + self.log.info(""local host aion mlops model_path is: ""+str(model_path)) + time.sleep(2) + + + #print(""Environment variable setup in the current working dir for aws sagemaker cli connection... \\n"") + self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \\n ') + AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) + AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) + AWS_SESSION_TOKEN=str(self.aws_session_token) + region = str(self.aws_region) + #Existing model deploy options + mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) + mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) + import subprocess + cmd = 'aws configure set region_name '+region + os.system(cmd) + cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID + os.system(cmd) + cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY + os.system(cmd) + #Create a session for aws communication using aws boto3 lib + # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) + session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) + # session = boto3.session.Session( + # aws_access_key_id=AWS_ACCESS_KEY_ID, + # aws_secret_access_key=AWS_SECRET_ACCESS_KEY, + # aws_session_token=AWS_SESSION_TOKEN + # ) + # awsclient = session.resource('ecr') + # s3 = session.resource('s3') + self.log.info('aws environment variable setup done... \\n') + try: + os.chdir(mlflow_root_dir) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(mlflow_root_dir)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) + + mlflow_container_name=str(self.mlflow_container_name) + mlflow_version=mlflow.__version__ + tag_id=mlflow_version + + + if (self.mlflowtosagemakerPushOnly.lower() == ""true""): + self.log.info('Selected option is \\n') + aws_id=str(self.aws_id) + arn=str(self.iam_sagemakerfullaccess_arn) + mlflow_image=mlflow_container_name+':'+tag_id + image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image + # print(""image_url:========= \\n"",image_url) + deploy_status=True + try: + model_path=mlflowtosagemakerdeployModeluri + # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. + self.log.info('Deploy existing model container-Model path given by user: '+str(model_path)) + try: + os.chdir(model_path) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(model_path)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(model_path)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(model_path)) + + try: + mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName) + deploy_status=True + self.log.info('AION mlops pushed the docker container to aws ecr. \\n ') + except: + self.log.info(""error in pushing existing container to ecr.\\n"") + deploy_status=False + + + time.sleep(2) + #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. + try: + # print("" Changing directory to mlflow root dir....\\n"") + os.chdir(mlflow_root_dir) + except FileNotFoundError: + self.log.info('model path is not a directory. '+str(mlflow_root_dir)) + except NotADirectoryError: + self.log.info('model path is not a directory. '+str(mlflow_root_dir)) + # print(""{0} is not a directory"".format(mlflow_root_dir)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(ml" +"flow_root_dir)) + + # self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) + try: + if (deploy_status): + self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) + self.log.info('AION creates docker container and push the container into aws ecr.. ') + time.sleep(2) + except: + self.log.info('AION deploy error.check connection and aws config parameters. ') + deploy_status=False + # self.log.info('model deployed in sagemaker. ') + except Exception as e: + self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \\n'+str(e)) + elif (self.mlflowtosagemakerPushOnly.lower() == ""false""): + if (self.mlflowtosagemakerDeploy.lower() == ""true""): + self.log.info('Selected option is \\n') + deploy_status=True + try: + # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. + try: + os.chdir(model_path) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(model_path)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(model_path)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(model_path)) + try: + mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name]) + self.log.info('AION mlops creates docker container and push the container into aws ecr.. ') + deploy_status=True + time.sleep(2) + except: + self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') + deploy_status=False + self.log.info('Now deploying the model container to sagemaker starts....\\n ') + # Once docker push completes, again going back to mlflow parent dir for deployment + #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. + try: + os.chdir(mlflow_root_dir) + except FileNotFoundError: + self.log.info('model_path does not exist. '+str(mlflow_root_dir)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) + + # app_name = str(self.sm_app_name) + try: + if (deploy_status): + self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path) + except: + self.log.info('mlops deploy error.check connection') + deploy_status=False + + except Exception as e: + exc = {""status"":""FAIL"",""message"":str(e).strip('""')} + out_exc = json.dumps(exc) + self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n') + + elif(self.mlflowtosagemakerDeploy.lower() == ""false""): + deploy_status=False + localhost_deploy=True + self.log.info('Selected option is \\n') + self.log.info(""User selected create-Deploy sagemaker option as False,"") + self.log.info(""Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. "") + try: + # ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns. + try: + os.chdir(model_path) + self.log.info('After change to AION mlops model dir, cwd: '+str(model_path)) + except FileNotFoundError: + self.log.info('Directory does not exist. '+str(model_path)) + except NotADirectoryError: + self.log.info('model_path is not a directory. '+str(model_path)) + except PermissionError: + self.log.info('Issue in permissions to change to model dir. '+str(model_path)) + + # mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) + + try: + if not (deploy_status): + mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) + self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with command.\\n ') + localhost_deploy=True + time.sleep(2) + except: + self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') + deploy_status=False + localhost_deploy=False + + # print(""AION mlops creates docker container and push the container into aws ecr.\\n"") + self.log.info('AION mlops creates docker container and stored locally... ') + time.sleep(2) + except Exception as e: + localhost_deploy=False + # print(""mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n"") + self.log.info('AION mlops failed to creates docker container in local machine.\\n'+str(e)) + else: + self.log.info('Deploy option not selected, Please check. ') + localhost_deploy=False + deploy_status=False + else: + pass + + + + + + localhost_container_status=""Notdeployed"" + mlflow2sm_deploy_status=""Notdeployed"" + if localhost_deploy: + localhost_container_status=""success"" + mlflow2sm_deploy_status=""Notdeployed"" + # print(""AION creates local docker container successfully.Please check in docker repository."") + self.log.info(""AION creates local docker container successfully.Please check in docker repository."") + # else: + # localhost_container_status=""failed"" + # # print(""AION failed to create local docker container successfully.Please check in docker repository."") + # self.log.info(""AION failed to create local docker container successfully.Please check in docker repository."") + + if (deploy_status): + # Finally checking whether mlops model is deployed to sagemaker or not. + app_name = str(self.sm_app_name) + deploy_s = self.check_sm_deploy_status(app_name) + if (deploy_s == ""InService""): + # print(""AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n"") + self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n'+str(app_name)) + mlflow2sm_deploy_status=""success"" + localhost_container_status=""Notdeployed"" + else: + # print(""AION Mlflow model not able to deploy at aws sagemaker\\n"") + self.log.info('AION mlops model not able to deploy at aws sagemaker.\\n') + mlflow2sm_deploy_status=""failed"" + localhost_container_status=""Notdeployed"" + + # else: + # mlflow2sm_deploy_status=""None"" + + + return mlflow2sm_deploy_status,localhost_container_status + + except Exception as inst: + exc = {""status"":""FAIL"",""message"":str(inst).strip('""')} + out_exc = json.dumps(exc) + import json +import ast +import sys +import time +from pathlib import Path +import pandas as pd +from AION.llm import llm_utils + +bench_mark_file = {'code':'code_eval.sh', 'doc': 'doc_eval.sh'} +DB_TABLE = 'llm_benchmarking' + +def bench_mark(hypervisor,instanceid,model,usecaseid,eval='code'): + + output = {} + started = False + if eval not in bench_mark_file.keys(): + raise ValueError(f""Evaluation for '{eval}' is not supported.\\nSupported types are {list(bench_mark_file.keys())}"") + db = benchmark_db( DB_TABLE, usecaseid) + db.update_state('running') + try: + server = llm_utils.hypervisor( hypervisor,instanceid) + if not server.is_already_running(): + started, msg = server.start() + if not started: + raise ValueError( msg) + ssh = server.ssh_details() + pem_file = str(Path(__file__).parent/ssh['keyFilePath']) + sh_file = llm_utils.remote_code_dir(as_str=True) + '/' + bench_mark_file[eval] + cmd = sh_file + ' ' + usecaseid + ' '+ str(model) + print(cmd) + from llm.ssh_command import run_ssh_cmd + buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', cmd) + if isinstance( buf, str): + print( buf) + else: + print( buf.decode('utf-8')) + if buf: + if 'eval_output:' in buf: + output = buf.split('eval_output:')[-1].rstrip() + output = ast.literal_eval( output) + record = {} + record['state'] = 'Success' + record['eval_type'] = eval + record['result_type'] = 'value' if eval =='code' else 'dict' + record['result'] = output + db.put_record( record) + else: + record = {} + record['state'] = 'Error' + record['eval_type'] = eval + db.put_record( record) + return output + except Exception as e: + print(e) + record = {} + record['state'] = 'Error' + record['eval_type'] = eval + record['result_type'] = 'value' if eval =='code' else 'dict' + record['result'] = [{'error': str(e)}] + db.put_record( record) + output = {'status':'Error','msg':str(e)} + return output + +class benchmark_db(): + + def __init__(self, table_name, usecaseid): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = str(Path(DATA_DIR)/'sqlite') + self.sqlite_obj = sqlite_db(file_path,'config.db') + self.table_name = table_name + self.usecaseid = usecaseid + self.columns = ['usecaseid','state','eval_type','result_type','result'] + self.sqlite_obj.create_table(self.table_name, self.columns, ['TEXT' for x in self.columns]) + + def put_record(self, record={}): + db_data = self.sqlite_obj.get_data(self.table_name,'usecaseid',self.usecaseid) + if (len(db_data) > 0): + self.sqlite_obj.update_dict_data(record,f'""usecaseid""=""{self.usecaseid}""',self.table_name) + else: + data = {x:[str(record[x])] if x in record.keys() else [''] for x in self.columns} + data['usecaseid'] = self.usecaseid + self.sqlite_obj.write_data(pd.DataFrame.from_dict(data),self.table_name) + + def update_state(self, state, error=None): + data = {x:'' for x in self.columns} + data['state'] = state + data['usecaseid'] = self.usecaseid + if error: + data['result'] = error + self.put_record( data) + + def read_data(self): + return self.sqlite_obj.read_data(self.table_name) + +if __name__ == '__main__': + + run_code_benchmarking = False + if run_code_benchmarking: + #for code + bench_mark('aws','i-0c7bfeddd00658f45','CodeLLaMA-2-7B','AI0025_1',eval='code') + else: + # for document + bench_mark('aws','i-0c7bfeddd00658f45','LLaMA-2-7B','AI0041_1',eval='doc') + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = str(Path(DATA_DIR)/'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + data = sqlite_obj.read_data('llm_benchmarking') + print(data) + import json +import os +import sys +import pandas as pd +import time +from stat import S_ISDIR, S_ISREG +import paramiko +from pathlib import Path +import logging +import boto3 +from botocore.exceptions import ClientError +import re +remote_data_dir = '/home/ubuntu/AION/data/storage' +remote_config_dir = '/home/ubuntu/AION/data/config' +running_state_code = 16 +stopped_state_code = 80 +sh_file_path = '/home/ubuntu/AION/llm/sbin/run_experiment.sh ' +prompt_command = '/home/ubuntu/AION/llm/sbin/run_inference.sh' + +def create_instance(image_id, instance_type, security_group_id,region,instance_name,aws_access_key_id,aws_secret_key): + try: + ec2 = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, region_name=region) + response = ec2.run_instances(ImageId=image_id, InstanceType=instance_type, SecurityGroupIds=[security_group_id], MaxCount=1, MinCount=1, TagSpecifications=[{'ResourceType': 'instance', 'Tags': [{'Key': 'Name', 'Value': instance_name}]}]) + #print('Instance ID:', response['Instances'][0]['InstanceId']) + return response['Instances'][0]['InstanceId'],'' + except Exception as e: + print(e) + return '',str(e) + +def check_instance(aws_access_key_id, aws_secret_key, instance_id,region): + ip = '' + ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=" +"aws_access_key_id, + aws_secret_access_key=aws_secret_key) + response = ec2.describe_instance_status(InstanceIds=[instance_id],IncludeAllInstances=True) + if response['InstanceStatuses'][0]['InstanceState']['Name'] == 'running': + ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress'] + return 'Running',ip + else: + return 'NotRunning',ip +def get_instance_ip(aws_access_key_id, aws_secret_key, instance_id,region): + try: + ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_key) + response = ec2.describe_instances(InstanceIds=[instance_id]) + + + ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress'] + return ip + except Exception as e: + print(e) + return '' + +def start_instance(aws_access_key_id, aws_secret_key, instance_id,region): + + + ip = '' + ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_key) + response = ec2.describe_instance_status(InstanceIds=[instance_id],IncludeAllInstances=True) + if response['InstanceStatuses'][0]['InstanceState']['Name'] == 'running': + print(""Instance is already running"") + try: + response = ec2.start_instances(InstanceIds=[instance_id], DryRun=False) + #print(response) + instance_status_code = 0 + while instance_status_code != running_state_code: + response = ec2.describe_instances(InstanceIds=[instance_id]) + instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] + if instance_status_code == running_state_code: + ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress'] + break + except Exception as e: + print(e) + return 'Fail',str(e),'' + return 'Success','Success',ip +def is_running(instance_id,region,aws_access_key_id,aws_secret_key): + try: + ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_key) + response = ec2.describe_instance_status(InstanceIds=[instance_id], IncludeAllInstances=True) + if 'Reservations' in response and len(response['Reservations']) > 0: + state = response['Reservations'][0]['Instances'][0]['State']['Name'] + return state + elif 'InstanceStatuses' in response: + return response['InstanceStatuses'][0]['InstanceState']['Name'] + else : + return 'failed' + except: + return ""error"" + +def terminate_instance(instance_id,region,aws_access_key_id,aws_secret_key): + try: + ec2 = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, region_name=region) + response = ec2.terminate_instances(InstanceIds=[instance_id]) + return response['TerminatingInstances'][0]['InstanceId'],'success' + except Exception as e: + print(e),'failed' + +def copy_files_to_server(ip, pem_file,local_data_file_path,local_config_file_path,username): + try: + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + while True: + try: + client.connect(host, username=username, key_filename=pem_file) + sftp = client.open_sftp() + break + except: + time.sleep(10) + + try: + sftp.stat(remote_data_dir) + print(f""Path: '{remote_data_dir}' already exist"") + except FileNotFoundError: + sftp.mkdir(remote_data_dir) + try: + sftp.stat(remote_config_dir) + print(f""Path: '{remote_config_dir}' already exist"") + except FileNotFoundError: + sftp.mkdir(remote_config_dir) + Train_Data_InRemoteArtifacts = sftp.listdir(remote_data_dir) + for traindata_files in Train_Data_InRemoteArtifacts: + print(traindata_files) + if not traindata_files.endswith('.ipynb_checkpoints'): + sftp.remove(remote_data_dir +'/'+ traindata_files) + if os.path.isdir(local_data_file_path): + list_pdf_json_files = os.listdir(local_data_file_path) + for file_pdf_json in list_pdf_json_files: + sftp.put(local_data_file_path+'/'+file_pdf_json, remote_data_dir+'/'+ file_pdf_json) + print(file_pdf_json + "" data copied successfully"") + else: + filename = os.path.basename(local_data_file_path) + directoryname= os.path.dirname(local_data_file_path) + sftp.put(directoryname+'/'+filename, remote_data_dir+'/'+ filename) + print(filename + "" data copied successfully"") + + config_InRemoteArtifacts = sftp.listdir(remote_config_dir) + for config_file in config_InRemoteArtifacts: + print(config_file) + if not config_file.endswith('.ipynb_checkpoints'): + sftp.remove(remote_config_dir +'/'+ config_file) + + if local_config_file_path != '': + if os.path.isdir(local_config_file_path): + list_pdf_json_files = os.listdir(local_config_file_path) + for file_pdf_json in list_pdf_json_files: + sftp.put(local_config_file_path+'/'+file_pdf_json, remote_config_dir+'/'+ file_pdf_json) + print(file_pdf_json + "" config copied successfully"") + else: + # updated_config_file_path = create_config(local_config_file_path) + filename = os.path.basename(local_config_file_path) + directoryname= os.path.dirname(local_config_file_path) + sftp.put(directoryname+'/'+filename, remote_config_dir+'/'+ 'config.json') + print(filename + "" config copied successfully"") + + + remote_files = sftp.listdir(remote_config_dir) + print(""List of uploaded files"",remote_files) + sftp.close() + client.close() + except Exception as e: + print(e) + +def check_status(ip,username,pem_file): + logoutput = read_log_file(ip,username,pem_file) + if ""aion_llmfinetuning_Status"" in logoutput: + return True + else: + return False + +def read_log_file(ip,username,pem_file): + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect(host, username=username, key_filename=pem_file) + log_file_path = '/home/ubuntu/AION/data/log/fine_tuning_log.log' + stdin, stdout, stderr = client.exec_command(f'tail -n 500 {log_file_path}') + time.sleep(5) + client.close() + return stdout.read().decode() + + +def run_ssh_cmd(ip,pem_file,username,log,command): + try: + buf = '' + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + while True: + try: + client.connect(host, username=username, key_filename=pem_file) + break + except: + time.sleep(10) + + stdin, stdout, stderr =client.exec_command(command) + + for line in stdout: + if log != '': + log.info(line.strip()) + else: + if buf != '': + buf= buf+'\\n' + buf = buf+line.strip() + print(buf) + client.close() + return buf + except Exception as e: + print(str(e)) + raise Exception(str(e)) + +def start_logging(deployFolder,modelName,version): + try: + deployLocation = Path(deployFolder)/modelName/str(version)/'log' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'model_training_logs.log' + filehandler = logging.FileHandler(deployLocation/name, 'w','utf-8') + log = logging.getLogger('log_llm') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + return log + except Exception as e: + print(str(e)) + +def update_sqllite_data(usecaseid,variable,variable_value): + try: + print(usecaseid) + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + data = sqlite_obj.get_data('LLMTuning','usecaseid',usecaseid) + if (len(data) > 0): + sqlite_obj.update_data('""'+variable+'""=""'+variable_value+'""','""usecaseid""=""'+str(usecaseid)+'""','LLMTuning') + return('Success') + + data = dict(usecaseid=usecaseid,ip='',instance='',hypervisor='AWS',status='NA') + data.update({variable:variable_value}) + df = pd.DataFrame(data, index=[0]) + sqlite_obj.write_data(df,'LLMTuning') + return('Success') + except Exception as e: + print(e) + return('Error') + +def LLM_predict(cloudconfig,instanceid,promptfile): + with open(cloudconfig, 'r') as config_f: + cloud_infra = json.load(config_f) + config_f.close() + aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID'] + aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey'] + region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName'] + ip = start_instance(aws_access_key_id, aws_secret_key, instanceid,region) + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['keyFilePath']) + username = cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['userName'] + copy_files_to_server(ip,pem_file,promptfile,'',username) + promptfile = os.path.basename(promptfile) + command = prompt_command+' '+remote_data_dir+'/'+ promptfile + buf = run_ssh_cmd(ip, pem_file, username,'',command) + return buf + +def LLM_tuning_lemma7b(config,cloudconfig): + with open(config, 'r') as config_f: + config_data = json.load(config_f) + config_f.close() + modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion'] + log = start_logging(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion']) + with open(cloudconfig, 'r') as config_f: + cloud_infra = json.load(config_f) + config_f.close() + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID'] + aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey'] + instance_type = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceSetting']['InstanceType'] + security_group_id = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceSetting']['SecurityGroupId'] + region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName'] + image_id = cloud_infra['AWS_EC2']['LLaMa7B']['amiId'] + pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['keyFilePath']) + username = cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['userName'] + datafile = config_data['basic']['dataLocation'] + instance_name = config_data['basic']['modelName']+'_'+str(config_data['basic']['modelVersion'])+'_LLMTuning' + configfile = config + ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP'] + if image_id != '': + log.info(""Status:-|... Create Instance"") + instance_id = create_instance(image_id, instance_type, security_group_id,region,instance_name) + elif cloud_infra['AWS_EC2']['LLaMa7B']['InstanceId'] != '': + instance_id = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceId'] + update_sqllite_data(modelid,'instance',instance_id) + print(instance_id) + else: + instance_id = '' + ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP'] + if instance_id != '': + log.info(""Status:-|... Start Instance"") + update_sqllite_data(modelid,'status','Initialize') + ip = start_instance(aws_access_key_id, aws_secret_key, instance_id,region) + print(ip) + if ip != '': + update_sqllite_data(modelid,'ip',ip) + try: + log.info(""Status:-|... Copy Files"") + copy_files_to_server(ip,pem_file,datafile,configfile,username) + log.info(""Status:-|... Start LLM Tuning"") + update_sqllite_data(modelid,'status','Tuning') + run_ssh_cmd(ip, pem_file, username,log,sh_file_path) + log_data = read_log_file(ip,username,pem_file) + outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1) + + outputStr = outputStr.strip() + outputStr = json.loads(outputStr) + from llm.llm_tuning import save" +"_output + outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr) + print(outputStr) + if ""Tuning completed Successfully"" in log_data: + update_sqllite_data(modelid,'status','Success') + output = json.dumps(outputStr) + print(f""\\naion_learner_status:{output}\\n"") + return output + else: + update_sqllite_data(modelid,'status','Error') + output = json.dumps(outputStr) + print(f""\\naion_learner_status:{output}\\n"") + return output + except Exception as e: + print(e) + log.info(str(e)) + output = {'status':'FAIL','message':str(e),'LogFile':''} + output = json.dumps(output) + print(f""\\naion_learner_status:{output}\\n"") + return output + else: + output = {'status':'FAIL','message':'Not Configured Properly','LogFile':''} + output = json.dumps(output) + print(f""\\naion_learner_status:{output}\\n"") + return output + +def stop_server_instance(aws_access_key_id, aws_secret_access_key, instance_id,region): + ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + try: + ec2.stop_instances(InstanceIds=[instance_id, ], DryRun=True) + except ClientError as e: + if 'DryRunOperation' not in str(e): + raise + + # Dry run succeeded, call stop_instances without dryrun + try: + response = ec2.stop_instances(InstanceIds=[instance_id], DryRun=False) + response = ec2.describe_instances(InstanceIds=[instance_id]) + instance_status_code = 0 + while instance_status_code != stopped_state_code: + response = ec2.describe_instances(InstanceIds=[instance_id]) + instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] + if instance_status_code == stopped_state_code: + print(""Instance Stopped"") + break + return ""Process Completed"" + except ClientError as e: + print(e) + return ""Process failed"" + +if __name__ == ""__main__"": + status,msg = LLM_tuning_lemma7b(sys.argv[1],sys.argv[2]) + print(status, msg) + +def check_file_on_server(file_path, ip, pem_file): + is_wait = True + try: + host = ip + username = ""ubuntu"" + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect(host, username=username, key_filename=pem_file) + + sftp = client.open_sftp() + sftp.stat(file_path) + print(""Model File created On Server"") + is_wait = False + except IOError: + is_wait = True + print(""Model training is in progress.."") + return is_wait + + +def removeremotefolder_files(sftp, path): + try: + files = sftp.listdir(path=path) + for f in files: + filepath = path + ""/"" + f + print(filepath) + if isdir(sftp, filepath): + removeremotefolder_files(sftp, filepath) + else: + sftp.remove(filepath) + sftp.rmdir(path) + except IOError as e: + print(e) + + +def isdir(sftp, path): + try: + return S_ISDIR(sftp.stat(path).st_mode) + except IOError: + return False + + +def get_remote_folder(ip, remotedir, localdir, pem_file, preserve_mtime=False): + host = ip + username = ""ubuntu"" + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect(host, username=username, key_filename=pem_file) + sftp = client.open_sftp() + + for entry in sftp.listdir(remotedir): + remotepath = remotedir + ""/"" + entry + localpath = os.path.join(localdir, entry) + mode = sftp.stat(remotepath).st_mode + if S_ISDIR(mode): + try: + os.mkdir(localpath, mode=777) + except OSError: + pass + get_remote_folder(ip, remotepath, localpath, pem_file, preserve_mtime) + elif S_ISREG(mode): + sftp.get(remotepath, localpath) + print(""{} downloaded successfully"".format(entry)) + + import json +import os +import sys +import pandas as pd +import time +from stat import S_ISDIR, S_ISREG +from pathlib import Path +import logging +import re +import tarfile +from llm import llm_utils +#remote_data_dir = '/home/ubuntu/AION/data/storage' +remote_data_rawdata_dir = '/home/aion/data/storage/raw_data' +remote_data_processeddata_dir = '/home/aion/data/storage/processed_data' +remote_config_dir = '/home/aion/data/config' +sh_file_path = '/home/aion/llm/sbin/llm_model_finetuning.sh' +unstructured_script_path = '/home/aion/llm/sbin/llm_model_finetuning.sh' +def start_logging(deployFolder,modelName,version): + try: + deployLocation = Path(deployFolder)/modelName/str(version)/'log' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'model_training_logs.log' + filehandler = logging.FileHandler(deployLocation/name, 'w','utf-8') + log = logging.getLogger('log_llm') + log.propagate = False + for hdlr in log.handlers[:]: # remove the existing file handlers + if isinstance(hdlr,logging.FileHandler): + log.removeHandler(hdlr) + log.addHandler(filehandler) + log.setLevel(logging.INFO) + return log + except Exception as e: + print(str(e)) + +def update_sqllite_data(usecaseid,variable,variable_value): + try: + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + column_names = sqlite_obj.column_names('LLMTuning') + #print(column_names) + if 'region' not in column_names: + query = 'Alter Table LLMTuning ADD region TEXT' + sqlite_obj.execute_query(query) + if 'image' not in column_names: + query = 'Alter Table LLMTuning ADD image TEXT' + sqlite_obj.execute_query(query) + + data = sqlite_obj.get_data('LLMTuning','usecaseid',usecaseid) + if (len(data) > 0): + sqlite_obj.update_data('""'+variable+'""=""'+variable_value+'""','""usecaseid""=""'+str(usecaseid)+'""','LLMTuning') + return('Success') + + data = dict(usecaseid=usecaseid,ip='',instance='',hypervisor='NA',status='NA',region='',image='') + data.update({variable:variable_value}) + df = pd.DataFrame(data, index=[0]) + sqlite_obj.write_data(df,'LLMTuning') + return('Success') + except Exception as e: + print(e) + return('Error') + +def save_output(deployFolder,modelName,version,outputstr,hypervisor,instance): + try: + deployLocation = Path(deployFolder)/modelName/str(version)/'etc' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'output.json' + dpath = Path(deployFolder)/modelName/str(version) + outputstr['data']['deployLocation'] = str(dpath) + outputstr['data']['vmDetails'] = str(hypervisor)+' Instance: '+str(instance) + outputstr['data']['LogFile'] = str(dpath/'log'/'model_training_logs.log') + with open(deployLocation/name, 'w',encoding='utf-8') as f: + json.dump(outputstr, f) + f.close() + + return (outputstr) + except Exception as e: + print(str(e)) + print(outputstr) + +def llm_logs(config,cloudconfig,instanceid,hypervisor,mlmodels): + try: + + with open(config, 'r') as config_f: + config_data = json.load(config_f) + config_f.close() + modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion'] + from appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + region = amiDetails['regionName'] + from llm.aws_instance_api import check_instance + status,ip = check_instance(aws_access_key_id, aws_secret_key, instanceid,region) + if status.lower() == 'running': + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + from llm.ssh_command import read_log_file + logs = read_log_file(ip,username,'',pem_file) + deployFolder = config_data['basic']['deployLocation'] + modelName = config_data['basic']['modelName'] + version = config_data['basic']['modelVersion'] + deployLocation = Path(deployFolder)/modelName/str(version)/'log' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'model_training_logs.log' + with open(deployLocation/name, 'r+',encoding='utf-8') as f: + lines = [line.rstrip('\\n') for line in f] + for log in logs: + if log not in lines: + # inserts on top, elsewise use lines.append(name) to append at the end of the file. + lines.insert(0, log) + f.seek(0) # move to first position in the file, to overwrite ! + f.write('\\n'.join(lines)) + else: + status = {'status':'Error','msg':'Instance not running'} + output = json.dumps(status) + deployFolder = config_data['basic']['deployLocation'] + modelName = config_data['basic']['modelName'] + version = config_data['basic']['modelVersion'] + deployLocation = Path(deployFolder)/modelName/str(version)/'log' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'model_training_logs.log' + with open(deployLocation/name, 'r+',encoding='utf-8') as f: + f.write('aion_learner_status:'+str(output)) + f.close() + + else: + credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials'] + projectID = cloud_infra['gcpCredentials']['projectID'] + amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid) + zone = amiDetails['regionName'] + username = username = amiDetails['ssh']['userName'] + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + password = '' + from llm.gcp_compute_api import check_instance + status,ip = check_instance(credentialsJson,projectID, zone, instanceid) + if status.lower() == 'running': + from llm.ssh_command import read_log_file + logs = read_log_file(ip,username,'',pem_file) + deployFolder = config_data['basic']['deployLocation'] + modelName = config_data['basic']['modelName'] + version = config_data['basic']['modelVersion'] + deployLocation = Path(deployFolder)/modelName/str(version)/'log' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'model_training_logs.log' + with open(deployLocation/name, 'r+',encoding='utf-8') as f: + lines = [line.rstrip('\\n') for line in f] + for log in logs: + if log not in lines: + lines.insert(0, log) + f.seek(0) # move to first position in the file, to overwrite ! + f.write('\\n'.join(lines)) + else: + status = {'status':'Error','msg':'Instance not running'} + output = json.dumps(status) + deployFolder = config_data['basic']['deployLocation'] + modelName = config_data['basic']['modelName'] + version = config_data['basic']['modelVersion'] + deployLocation = Path(deployFolder)/modelName/str(version)/'log' + deployLocation.mkdir(parents=True, exist_ok=True) + name = 'model_training_logs.log' + with open(deployLocation/name, 'r+',encoding='utf-8') as f: + f.write('aion_learner_status:'+str(output)) + f.close() + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) +def tardirectory(path,tarfilepath,ext): + with tarfile.open(tarfilepath, 'w:tar') as tarhandle: + for root, dirs, files in os.walk(path): + for f in files: + if ext == 'doc': + if f.endswith('.' + 'doc') or f.endswith('.' + 'docx'): + tarhandle.add(os.path.join(root, f), arcname=f) + else: + if f.endswith('.'+ext): + tarhandle.add(os.path.join(root, f),arcname=f) + tarhandle.close() + +def getAMIDetails(config,selectedAMI): + y = {} + for x in config: + if x['id'] == selectedAMI: + return x + return y +def run(config): + with open(config, 'r') as config_f: + config_data = json.load(config_f) + config_f.close() + modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion'] + log = start_logging(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data" +"['basic']['modelVersion']) + from appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + filetimestamp = str(int(time.time())) + instance_name = config_data['basic']['modelName']+'-'+str(config_data['basic']['modelVersion'])+'-LLM-'+filetimestamp + instance_name = instance_name.lower() + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + if os.path.isdir(config_data['basic']['dataLocation']): + from appbe.dataPath import DATA_FILE_PATH + filetimestamp = str(int(time.time())) + tarfilepath = os.path.join(DATA_FILE_PATH,filetimestamp+'.tar') + tardirectory(config_data['basic']['dataLocation'],tarfilepath,config_data['basic']['folderSettings']['fileExtension']) + config_data['basic']['dataLocationUnstructured'] = tarfilepath + with open(config, ""w"") as outfile: + json.dump(config_data, outfile) + outfile.close() + + if cloud_infra['computeInfrastructure'] == 'GCP': + log.info(""Status:-|... Compute Infrastructure GCP GCE"") + credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials'] + #credentialsJson = ""C:\\AION\\GCP-Instance-Utilityv2\\GCP-Instance-Utility\\ers-research.json"" + selectedID = cloud_infra['gcpCredentials']['selectedID'] + projectID = cloud_infra['gcpCredentials']['projectID'] + zone = cloud_infra['gcpCredentials']['regionName'] + selectMachineType = cloud_infra['gcpCredentials']['machineType'] + if selectMachineType.lower() == 'image': + amiDetails = getAMIDetails(cloud_infra['GCP']['machineImage'],selectedID) + machineImageName = amiDetails['id'] + + else: + amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], selectedID) + zone = amiDetails['regionName'] + + machineImageName = '' + instance_name = selectedID + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + if machineImageName != '': + log.info(""Status:-|... Create Instance Start"") + try: + server = llm_utils.gcp_server("""",machineImageName) + ip,msg = server.create(instance_name) + log.info(""Status:-|... Create Instance End"") + if ip == '': + if ""resources available"" in msg: + msg = ""The respective zone (or region) does not have enough resources available to fulfill the request. Please try after some time."" + output_json = {""status"": ""FAIL"", ""message"": str(msg), ""LogFile"": ''} + output = json.dumps(output_json) + log.info(""Status:-|... Failed to create the instance. ""+str(msg)) + print(f""\\naion_learner_status:{output}\\n"") + return output + except Exception as e: + print(str(e)) + output_json = {""status"":""FAIL"",""message"":'Failed to create the instance.',""LogFile"":''} + output = json.dumps(output_json) + log.info(""Status:-|... Failed to create the instance."") + print(f""\\naion_learner_status:{output}\\n"") + return output + else: + server = llm_utils.gcp_server(instance_name, """") + server.start() + ip = server.ip + if ip != '': + time.sleep(20) + if selectMachineType.lower() == 'machineimage': + update_sqllite_data(modelid, 'image', machineImageName) + update_sqllite_data(modelid,'hypervisor','GCP') + update_sqllite_data(modelid, 'region', zone) + update_sqllite_data(modelid,'ip',ip) + update_sqllite_data(modelid,'instance',instance_name) + from llm.ssh_command import copy_files_to_server + + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + datafile = config_data['basic']['dataLocationUnstructured'] + else: + datafile = config_data['basic']['dataLocation'] + log.info(""Status:-|... Upload tuning data Start"") + try: + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + copy_files_to_server(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir) + else: + copy_files_to_server(ip, pem_file, datafile, config, username,'', remote_data_processeddata_dir,remote_config_dir) + time.sleep(20) + log.info(""Status:-|... Upload tuning data End"") + log.info(""Status:-|... Start LLM Tuning"") + update_sqllite_data(modelid,'status','Tuning') + from llm.ssh_command import run_ssh_cmd + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + script_path = unstructured_script_path + else: + script_path = sh_file_path + print(script_path) + run_ssh_cmd(ip,pem_file, username,'',log,script_path) + from llm.ssh_command import read_log_file + log_data = read_log_file(ip,username,'',pem_file) + outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1) + + outputStr = outputStr.strip() + outputStr = json.loads(outputStr) + outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr,'GCP GCE',instance_name) + vmRunning = config_data['basic'].get('vmRunning','KeepRunning') + if vmRunning.lower() != 'keeprunning': + from llm.gcp_compute_api import stop_instance + server.stop() + if ""Tuning Completed Successfully"" in log_data: + update_sqllite_data(modelid,'status','Success') + output = json.dumps(outputStr) + print(f""\\naion_learner_status:{output}\\n"") + return output + else: + server.showndown() + update_sqllite_data(modelid,'status','Error') + output = json.dumps(outputStr) + print(f""\\naion_learner_status:{output}\\n"") + return output + except Exception as e: + print(e) + server.showndown() + output_json = {""status"": ""FAIL"", ""message"": str(e), ""LogFile"": ''} + output = json.dumps(output_json) + log.info(""Status:-|... "" + str(e)) + print(f""\\naion_learner_status:{output}\\n"") + return output + else: + output_json = {""status"":""FAIL"",""message"":'Failed to initialize the instance',""LogFile"":''} + output = json.dumps(output_json) + log.info(""Status:-|... Failed to initialize the instance"") + print(f""\\naion_learner_status:{output}\\n"") + return output + elif cloud_infra['computeInfrastructure'] == 'AWS': + log.info(""Status:-|... Compute Infrastructure AWS EC2"") + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + selectMachineType = cloud_infra['awsCredentials']['machineType'] + selectedID = cloud_infra['awsCredentials']['selectedID'] + region = cloud_infra['awsCredentials']['regionName'] + if selectMachineType.lower() == 'ami': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'],selectedID) + instance_type = amiDetails['instanceSetting']['instanceType'] + security_group_id = cloud_infra['awsCredentials']['securitygroupid'] + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], selectedID) + region = amiDetails['regionName'] + #region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName'] + image_id = amiDetails['id'] + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + datafile = config_data['basic']['dataLocationUnstructured'] + else: + datafile = config_data['basic']['dataLocation'] + if selectMachineType.lower() == 'ami': + log.info(""Status:-|... Create Instance Start"") + server = llm_utils.aws_server('', image_id) + instance_id,msg = server.create(instance_name) + if instance_id == '': + output_json = {""status"":""FAIL"",""message"":'Failed to initialize the instance. '+str(msg),""LogFile"":''} + output = json.dumps(output_json) + log.info(""Status:-|... Failed to initialize the instance"") + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status:{output}\\n"") + return output + log.info(""Status:-|... Create Instance End"") + elif selectMachineType.lower() == 'instance': + instance_id = image_id + update_sqllite_data(modelid,'instance',instance_id) + server = llm_utils.aws_server( instance_id, '') + else: + output_json = {""status"":""FAIL"",""message"":'AMI is not configured',""LogFile"":''} + output = json.dumps(output_json) + log.info(""Status:-|... AMI is not configured"") + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status:{output}\\n"") + return output + # instance_id = '' + # ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP'] + try: + from appbe.models import get_published_models + already_published,published_usecase = get_published_models(instance_id) + if already_published: + Msg = f'Fine Tuned Model-{published_usecase} is already published at the same instance, Please unpublish the mentioned model to proceed.' + output_json = {""status"":""Error"",""message"":Msg,""LogFile"":''} + output = json.dumps(output_json) + log.info(""Status:-|... A Model is already Published at the same instance."") + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status:{output}\\n"") + return output + except Exception as e: + log.info(str(e)) + print(str(e)) + if instance_id != '': + log.info(""Status:-|... Start Instance"") + if selectMachineType.lower() == 'ami': + update_sqllite_data(modelid, 'image', image_id) + update_sqllite_data(modelid, 'region', region) + update_sqllite_data(modelid,'instance',instance_id) + update_sqllite_data(modelid,'hypervisor','AWS') + update_sqllite_data(modelid,'status','Initialize') + + status,msg = server.start() + ip = server.ip + time.sleep(20) + if status and ip != '': + update_sqllite_data(modelid,'ip',ip) + try: + log.info(""Status:-|... Copy Files"") + from llm.ssh_command import copy_files_to_server + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + print(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir) + copy_files_to_server(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir) + else: + print(ip, pem_file, datafile, config, username, '', remote_data_processeddata_dir, remote_config_dir) + copy_files_to_server(ip, pem_file, datafile, config, username, '', remote_data_processeddata_dir,remote_config_dir) + time.sleep(20) + log.info(""Status:-|... Start LLM Tuning"") + update_sqllite_data(modelid,'status','Tuning') + from llm.ssh_command import run_ssh_cmd + if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True': + script_path = unstructured_script_path + else: + script_path = sh_file_path + #print(script_path) + #sys.exit() + run_ssh_cmd(ip, pem_file, username,'',log,script_path) + from llm.ssh_command import read_log_file + log_data = read_log_file(ip,username,'',pem_file) + outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1) + + outputStr = outputStr.strip() + outputStr = json.loads(outputStr) + outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr,'AWS EC2',instance_id) + vmRunning = config_data['basic'].get('vmRunning','KeepRunning') + if vmRunning.lower() != 'keeprunning': + server.stop() + if ""Tuning Completed Successfully"" in log_data: + update_sqllite_data(modelid,'status','Success') + output = json.dumps(outputStr) + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status:{output}\\n"") + return output + else: + server.showndown() + update_sqllite_data(modelid,'status','Error') + output = json.dumps(outputStr) + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status:{output}\\n"") + return output + except Exception as e: + print(e) + log.info(str(e)) + server.showndown() + output = {'status': 'FAIL', 'message': str(e), 'LogFile': ''} + output = json.dumps(output) + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status:{output}\\n"") + return output + else: + output = {'status':'FAIL','message':msg,'LogFile':''} + output = json.dumps(output) + print(f""\\naion_learner_status:{output}\\n"") + log.info(f""\\naion_learner_status" +":{output}\\n"") + return output + else: + output_json = {""status"":""FAIL"",""message"":'Failed to initialize the instance',""LogFile"":''} + output = json.dumps(output_json) + log.info(""Status:-|... Failed to initialize the instance"") + print(f""\\naion_learner_status:{output}\\n"") + return output import json +import os +import time +remote_data_dir = '/home/aion/data/storage/prompt' +remote_config_dir = '/home/aion/data/config' +prompt_command = '/home/aion/llm/sbin/llm_predict.sh' +command_prepare_model = '/home/aion/llm/sbin/llm_merge_weights.sh' +command_start_service = '/home/aion/llm/sbin/llm_publish_model.sh' +command_stop_service = 'publish.py' +from AION.llm import llm_utils +from pathlib import Path +def getAMIDetails(config,selectedAMI): + y = {} + for x in config: + print(x) + if x['id'] == selectedAMI: + return x + return y + +def get_ip(cloudconfig,instanceid,hypervisor,region,image): + try: + # with open(cloudconfig, 'r') as config_f: + # cloud_infra = json.load(config_f) + # config_f.close() + from appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + if image != '' and image != 'NA': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + from llm.aws_instance_api import get_instance_ip + return get_instance_ip(aws_access_key_id, aws_secret_key, instanceid,region) + elif hypervisor == 'GCP': + #print(hypervisor,instanceid) + server = llm_utils.hypervisor( hypervisor,instanceid) + if server.is_machine_running(): + return server.ip + else: + return '' + + except Exception as e: + print(e) + raise Exception +def kill_inference_server(cloudconfig,instanceid,hypervisor,region,image): + # with open(cloudconfig, 'r') as config_f: + # cloud_infra = json.load(config_f) + # config_f.close() + from appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + if hypervisor == 'AWS': + ip = get_ip(cloudconfig,instanceid,hypervisor,region,image) + if ip == '': + print(""Machine is not running."") + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + command = 'pkill -f'+ ' '+command_stop_service + from llm.ssh_command import run_ssh_cmd + buf = run_ssh_cmd(ip, pem_file, username, '', '', command) + elif hypervisor == 'GCP': + server = llm_utils.hypervisor( hypervisor,instanceid) + if server.is_machine_running(): + ssh = server.ssh_details() + pem_file = str(Path(__file__).parent/ssh['keyFilePath']) + from llm.ssh_command import run_ssh_cmd + command = 'pkill -f'+ ' '+command_stop_service + buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'],'','',command) + else: + raise Exception(""Error"") + + + +def LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image): + + from appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + # with open(cloudconfig, 'r') as config_f: + # cloud_infra = json.load(config_f) + # config_f.close() + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + if image != '' and image != 'NA': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + from llm.aws_instance_api import start_instance + status,msg,ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region) + print(status,msg,ip) + if status.lower() == 'success': + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + + command = command_prepare_model + ' ' + usecaseid + ' '+ str(model) + print(command) + from llm.ssh_command import run_ssh_cmd + buf = run_ssh_cmd(ip, pem_file, username, '', '', command) + + if ""Error"" in buf: + print(""Error in Merging model"") + raise Exception(""Error in Merging model"") + print(""merging finished"") + command = command_start_service+' '+ usecaseid + buf = run_ssh_cmd(ip, pem_file, username, '', '', command) + print(""inference server running"") + return buf + else: + print(msg) + return msg + elif hypervisor == 'GCP': + amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + server = llm_utils.hypervisor(hypervisor,instanceid) + if not server.is_machine_running(): + started, msg = server.start() + if not started: + raise ValueError( msg) + ssh = server.ssh_details() + pem_file = str(Path(__file__).parent/ssh['keyFilePath']) + from llm.ssh_command import run_ssh_cmd + #print(model) + #print(usecaseid) + command = command_prepare_model + ' ' + usecaseid + ' '+ str(model) + buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', command) + if ""Error"" in buf: + print(""Error in Merging model"") + raise Exception(""Error in Merging model"") + #print(""merging finished"") + command = command_start_service+' '+ usecaseid + buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', command) + #print(""inference server running"") + return buf + else: + print(""Not configured for gcp"") + raise Exception(""Eror"") + + +def LLM_predict(cloudconfig,instanceid,promptfile,hypervisor,model,usecaseid,region,image,temperature,maxtokens,modelType): + from appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + try: + temperature = float(temperature) + except: + temperature = 0.4 + try: + maxtokens = int(maxtokens) + except: + maxtokens = 2048 + + print(""===="") + print(float(temperature)) + print(""===="") + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + if image != '' and image != 'NA': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + from llm.aws_instance_api import start_instance + #print(aws_access_key_id, aws_secret_key, instanceid, region) + status,msg,ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region) + if status.lower() == 'success': + pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath']) + username = amiDetails['ssh']['userName'] + from llm.ssh_command import copy_files_to_server + #print(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir) + copy_files_to_server(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir) + promptfile = os.path.basename(promptfile) + if modelType == 'BaseModel': + command = prompt_command + ' ' + 'BaseModel' + ' ' + remote_data_dir + '/' + promptfile + ' ' + str( + model) + ' ' + str(temperature) + ' ' + str(maxtokens) + else: + command = prompt_command+' '+usecaseid+' '+remote_data_dir+'/'+ promptfile+' '+str(model)+' '+str(temperature)+' '+str(maxtokens) + print(command) + from llm.ssh_command import run_ssh_cmd + buf = run_ssh_cmd(ip, pem_file, username,'','',command) + return buf + else: + return msg + else: + server = llm_utils.hypervisor( hypervisor,instanceid) + if not server.is_machine_running(): + started, msg = server.start() + if not started: + raise ValueError( msg) + ssh = server.ssh_details() + pem_file = str(Path(__file__).parent/ssh['keyFilePath']) + from llm.ssh_command import copy_files_to_server + copy_files_to_server(server.ip,pem_file,promptfile,'',ssh['userName'],'',remote_data_dir,remote_config_dir) + promptfile = os.path.basename(promptfile) + if modelType == 'BaseModel': + command = prompt_command + ' ' + 'BaseModel' + ' ' + remote_data_dir + '/' + promptfile + ' ' + str( + model) + ' ' + str(temperature) + ' ' + str(maxtokens) + else: + command = prompt_command+' '+usecaseid+' '+remote_data_dir+'/'+ promptfile+' '+str(model)+' '+str(temperature)+' '+str(maxtokens) + #command = '/home/aion/llm/sbin/llm_model_finetuning.sh' + #print(command) + from llm.ssh_command import run_ssh_cmd + #print(ssh['userName'],pem_file) + buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'],'','',command) + return buf import json +import os +import time +from pathlib import Path + +BASE_DIR = '/home/aion' +DATA_DIR = '/home/aion/data' +CONFIG_DIR = '/home/aion/data/config' +PROMPT_DATA_DIR = '/home/aion/data/prompt_data' +CODE_DIR = '/home/aion/llm/sbin' + +def remote_base_dir(as_str=False): + if as_str: + return BASE_DIR + return Path( BASE_DIR) + +def remote_data_dir(as_str=False): + if as_str: + return DATA_DIR + return Path( DATA_DIR) + +def remote_config_dir(as_str=False): + if as_str: + return CONFIG_DIR + return Path( CONFIG_DIR) + +def remote_code_dir(as_str=False): + if as_str: + return CODE_DIR + return Path( CODE_DIR) + +def remote_prompt_data_dir(as_str=False): + if as_str: + return DATA_DIR + return Path( DATA_DIR) + +def get_ami_details(config,selectedAMI): + y = {} + for x in config: + if x['id'] == selectedAMI: + return x + return y + +def get_ip(cloudconfig,instanceid,hypervisor,region,image): + from AION.appbe.compute import readComputeConfig + cloud_infra = readComputeConfig() + if hypervisor == 'AWS': + aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] + aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] + if image != '' and image != 'NA': + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) + else: + amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) + if region == '' or region == 'NA': + region = amiDetails['regionName'] + from llm.aws_instance_api import get_instance_ip + return get_instance_ip(aws_access_key_id, aws_secret_key, instanceid,region) + elif hypervisor == 'GCP': + credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials'] + amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid) + zone = amiDetails['regionName'] + projectID = cloud_infra['gcpCredentials']['projectID'] + from llm.gcp_compute_api import check_instance + status,ip = check_instance(credentialsJson,projectID, zone, instanceid) + return ip + else: + raise ValueError(f""Given hypervisor '{hypervisor}' is not supported"") + +def hypervisor( hypervisor,instanceid='', image=''): + if not hypervisor: + raise ValueError('No hypervisor provided') + if hypervisor.lower() == 'aws': + return aws_server(instanceid, image) + elif hypervisor.lower() == 'gcp': + return gcp_server(instanceid, image) + else: + raise ValueError(f""Hyperviser '{hypervisor}' is not supported"") + +class gcp_server(): + def __init__( self, instanceid='', image=''): + self.hypervisor_name = 'gcp' + from AION.appbe.compute import readComputeConfig + self.cloud_infra = readComputeConfig() + if image and image.lower() != 'na': + self.ami_details = get_ami_details(self.cloud_infra['GCP']['machineImage'], image) + self.instanceid = '' + elif instanceid and instanceid.lower() != 'na': + self.ami_details = get_ami_details( self.cloud_infra['GCP']['instances'], instanceid) + self." +"instanceid = instanceid + else: + raise ValueError(""Either provide 'image name' or 'instance id'"") + self.credentialsJson = self.cloud_infra['gcpCredentials']['gcpCredentials'] + self.projectID = self.cloud_infra['gcpCredentials']['projectID'] + self.zone = self.ami_details['regionName'] + self.stopped = False + self.ip = '' + self.created = False + + def is_machine_running(self): + from llm.gcp_compute_api import check_instance + status,self.ip = check_instance(self.credentialsJson,self.projectID,self.zone,self.instanceid) + return 'running' == status.lower() + + def start(self): + from AION.llm.gcp_compute_api import is_running + from AION.llm.gcp_compute_api import check_instance + from AION.llm.gcp_compute_api import start_instance + status = is_running(self.credentialsJson, self.projectID, self.zone, self.instanceid).lower() + if 'running' == status: + stratus, self.ip = check_instance(self.credentialsJson, self.projectID, self.zone, self.instanceid) + self.already_running = True + return True + else: + status, self.ip = start_instance(self.credentialsJson, self.projectID, self.zone, self.instanceid) + return status == 'Success' + + def create(self,key_name = None): + from AION.llm.gcp_compute_api import create_instance + machineImageName = self.ami_details['id'] + machineImageProjectID = self.ami_details['machineImageProjectID'] + self.ip,msg = create_instance(self.credentialsJson,self.projectID,self.zone,key_name,machineImageName,machineImageProjectID) + if self.ip != '': + self.created = True + return self.ip,msg + + def stop(self): + if self.already_running or self.stopped: + return True + from AION.llm.gcp_compute_api import stop_server_instance + status = stop_server_instance(self.credentialsJson,self.projectID, self.zone, self.instanceid) + if status == 'Process Completed': + self.stopped = True + return True + return False + + def showndown(self): + if self.created: + self.terminate() + else: + if self.already_running or self.stopped: + return True + from AION.llm.gcp_compute_api import stop_server_instance + status = stop_server_instance(self.credentialsJson,self.projectID, self.zone, self.instanceid) + if status == 'Process Completed': + self.stopped = True + return True + return False + + def terminate(self): + from AION.llm.gcp_compute_api import terminate_instance + msg, status = terminate_instance(self.projectID, self.zone, self.instanceid) + print(msg) + return status == 'success' + + def ssh_details(self): + return self.ami_details['ssh'] + +class aws_server(): + + def __init__(self, instanceid='', image='', boot_up_time=0): + self.hypervisor_name = 'aws' + from AION.appbe.compute import readComputeConfig + self.cloud_infra = readComputeConfig() + if image and image.lower() != 'na': + self.ami_details = get_ami_details(self.cloud_infra['AWS_EC2']['amis'], image) + self.instanceid = '' + self.machine_type = 'ami' + elif instanceid and instanceid.lower() != 'na': + self.ami_details = get_ami_details( self.cloud_infra['AWS_EC2']['instances'], instanceid) + self.instanceid = instanceid + self.machine_type = 'instance' + else: + raise ValueError(""Either provide 'image name' or 'instance id'"") + self.access_key = self.cloud_infra['awsCredentials']['accessKey'] + self.secret_key = self.cloud_infra['awsCredentials']['secretAccessKey'] + self.securitygroupid = self.cloud_infra['awsCredentials']['securitygroupid'] + self.region = self.ami_details['regionName'] + self.already_running = False + self.boot_up_time = boot_up_time + self.stopped = False + self.created = False + + def is_already_running(self): + return self.already_running + def is_machine_running(self): + from AION.llm.aws_instance_api import is_running + status = is_running(self.instanceid, self.region, self.access_key, self.secret_key).lower() + return 'running' == status.lower() + def start(self): + from AION.llm.aws_instance_api import is_running + from AION.llm.aws_instance_api import get_instance_ip + from AION.llm.aws_instance_api import start_instance + status = is_running(self.instanceid, self.region, self.access_key, self.secret_key).lower() + if 'running' == status: + self.ip = get_instance_ip(self.access_key, self.secret_key, self.instanceid, self.region) + self.already_running = True + return True, 'already running' + else: + status, msg, self.ip = start_instance(self.access_key, self.secret_key, self.instanceid, self.region) + return status == 'Success', msg + + + def create(self, key_name=None): + from AION.llm.aws_instance_api import create_instance + image_id = self.ami_details['id'] + security_group_ids = self.ami_details['instanceSetting']['securityGroupIds'] + if not key_name: + key_name = self.ami_details['instanceSetting']['keyName'] + instance_type = self.ami_details['instanceSetting']['instanceType'] + self.instanceid,msg = create_instance(image_id, instance_type, self.securitygroupid, self.region, key_name, + self.access_key, self.secret_key) + if self.instanceid != '': + self.created = True + return self.instanceid,msg + def showndown(self): + from AION.llm.aws_instance_api import stop_server_instance + if self.created: + return self.terminate() + else: + if self.already_running or self.stopped: + return True + status = stop_server_instance(self.access_key, self.secret_key, self.instanceid, self.region) + if status == 'Process Completed': + self.stopped = True + return True + return False + + def stop(self): + from AION.llm.aws_instance_api import stop_server_instance + if self.already_running or self.stopped: + return True + status = stop_server_instance(self.access_key, self.secret_key, self.instanceid, self.region) + if status == 'Process Completed': + self.stopped = True + return True + return False + + def terminate(self): + from AION.llm.aws_instance_api import terminate_instance + msg, status = terminate_instance(self.instanceid, self.region, self.access_key, self.secret_key) + print(msg) + return status == 'success' + + def ssh_details(self): + return self.ami_details['ssh'] + from google.cloud import compute_v1 +import os + +PROJECT_ID = 'ers-research' +ZONE = 'us-west1-b' +INSTANCE_NAME = 'aion-llm-a100-vm1' +MACHINE_IMAGE_NAME = 'aion-40gb-a100-image' +MACHINE_IMAGE_PROJECT_ID = 'ers-research' + +def create_instance(credentialsJson,project_id, zone, instance_name, machine_image_name, machine_image_project_id): + try: + os.environ[""GOOGLE_APPLICATION_CREDENTIALS""] = credentialsJson + compute = compute_v1.InstancesClient() + + machine_image = compute_v1.MachineImagesClient().get(project=machine_image_project_id, machine_image=machine_image_name) + instance = compute_v1.Instance() + instance.name = instance_name + instance.machine_type = f""zones/{zone}/machineTypes/a2-ultragpu-1g"" + instance.source_machine_image = machine_image.self_link + + boot_disk = compute_v1.AttachedDisk() + boot_disk.auto_delete = True + boot_disk.boot = True + instance.disks = [boot_disk] + + network_interface = compute_v1.NetworkInterface() + access_config = compute_v1.AccessConfig() + access_config.type = ""ONE_TO_ONE_NAT"" + network_interface.access_configs = [access_config] + instance.network_interfaces = [network_interface] + operation = compute.insert(project=project_id, zone=zone, instance_resource=instance) + operation.result() + + instance = compute.get(project=project_id, zone=zone, instance=instance_name) + print(""--->instace created "") + print(instance.network_interfaces[0]) + return instance.network_interfaces[0].access_configs[0].nat_i_p,'' + except Exception as e: + print(e) + return '',str(e) + +def is_running(credentialsJson,project_id, zone, instance_name): + os.environ[""GOOGLE_APPLICATION_CREDENTIALS""] = credentialsJson + compute = compute_v1.InstancesClient() + instance = compute.get(project=project_id, zone=zone, instance=instance_name) + status = instance.status + return status +def check_instance(credentialsJson,project_id, zone, instance_name): + os.environ[""GOOGLE_APPLICATION_CREDENTIALS""] = credentialsJson + compute = compute_v1.InstancesClient() + instance = compute.get(project=project_id, zone=zone, instance=instance_name) + status = instance.status + + if status.lower() == 'running': + print(instance.network_interfaces[0].access_configs[0].nat_i_p) + ip = instance.network_interfaces[0].access_configs[0].nat_i_p + else: + ip = '' + return status,ip +def start_instance(credentialsJson,project_id, zone, instance_name): + try: + os.environ[""GOOGLE_APPLICATION_CREDENTIALS""] = credentialsJson + compute = compute_v1.InstancesClient() + operation = compute.start(project=project_id, zone=zone, instance=instance_name) + operation.result() + instance = compute.get(project=project_id, zone=zone, instance=instance_name) + status = instance.status + if status.lower() == 'running': + print(instance.network_interfaces[0].access_configs[0]) + ip = instance.network_interfaces[0].access_configs[0].nat_i_p + else: + ip = '' + except Exception as e: + print(e) + status = 'Error' + ip = '' + return status,ip + +def stop_instance(credentialsJson,project_id, zone, instance_name): + os.environ[""GOOGLE_APPLICATION_CREDENTIALS""] = credentialsJson + compute = compute_v1.InstancesClient() + operation = compute.stop(project=project_id, zone=zone, instance=instance_name) + operation.result() + + +def terminate_instance(project_id, zone, instance_name): + try: + compute = compute_v1.InstancesClient() + operation = compute.delete(project=project_id, zone=zone, instance=instance_name) + operation.result() + return """",""suceess"" + except Exception as e: + return str(e),""error"" + + +# if __name__ == '__main__': +# ip_address = create_instance(PROJECT_ID, ZONE, INSTANCE_NAME, MACHINE_IMAGE_NAME, MACHINE_IMAGE_PROJECT_ID) +# print(f""IP address of the new VM: {ip_address}"") +# #start_instance(PROJECT_ID, ZONE, INSTANCE_NAME) +# # stop_instance(PROJECT_ID, ZONE, INSTANCE_NAME) +# # terminate_instance(PROJECT_ID, ZONE, INSTANCE_NAME) + +class CachePrompt: + tableName = 'cachePrompts' + def __init__(self, conn): + self.conn = conn + + def selectFromCache(self,prompt ,usecaseId ,modelType,temperature=None ,max_token=None): + searchFromLLMFlag = False + try: + query = f'''SELECT * FROM {CachePrompt.tableName} WHERE usecaseId= ""{usecaseId}"" AND prompt = ""{prompt}"" AND modelType = ""{modelType}""''' + if temperature: + query += f''' AND temperature= ""{temperature}""''' + if max_token: + query += f''' AND max_token= ""{max_token}""''' + cursor = self.conn.execute(query) + results = [x for x in cursor.fetchone()] + column_names = list(map(lambda x:x[0],cursor.description)) + response = results[column_names.index('response')] + return searchFromLLMFlag,response + except Exception as e: + print(e) + searchFromLLMFlag = True + return searchFromLLMFlag,'' + + + + + def deleteRecord(self ,usecaseId,max_records=5): + q_exitingRecords = f'''SELECT count(*) FROM {CachePrompt.tableName} WHERE usecaseId= ""{usecaseId}"" ''' + cursor = self.conn.execute(q_exitingRecords) + numberOfRecords = cursor.fetchone()[0] + if numberOfRecords >= max_records: + idForDeletion = f'SELECT * FROM {CachePrompt.tableName} WHERE usecaseId= ""{usecaseId}"" ORDER BY created_at ASC LIMIT 1;' + cursor = self.conn.execute(idForDeletion) + id2del =[x[0] for x in cursor][0] + sql_delete_query = f""""""DELETE from {CachePrompt.tableName} WHERE id = {id2del};"""""" + self.conn.execute(sql_delete_query) + self.conn.commit() + + def insertRecord(self,prompt,response,usecaseId ,modelType,temperature=None ,max_token=None, max_records=5): + self.conn.execute('''CREATE TABLE IF NOT EXISTS cachePrompts + (ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + prompt TEXT NOT NULL, + context TEXT , + usecaseId text NOT NULL, + modelType text NOT NULL, + temperature float NOT NULL, + max_token INT, + response TEXT NOT NULL, + created_at TEXT DEFAULT CURRENT_TIMESTAMP );''') + cur = self.conn.execute(f""select * from {CachePrompt.tableName}"").fetchall() + print(cur) + self.deleteRecord(usecaseId,max_records=5) + self.conn.execute(f""INSERT INTO {CachePrompt.tableName} (prompt, usecaseId,modelType,temperature,max_token,response) \\ + VALUES (?, ?, ?,?, ?, ?)"", (prompt, usecaseId,model" +"Type,temperature, max_token, response)); + self.conn.commit() + + + + + import paramiko +from pathlib import Path +import logging +import json +import os +import sys +import pandas as pd +import time +import timeit +import re +running_state_code = 16 +stopped_state_code = 80 +#prompt_command = '/home/aion/AION/llm/sbin/run_inference.sh' +log_file_path = '/home/aion/data/log/fine_tuning_log.log' +def read_file_from_server(ip,username,password,pem_file,remote_file_name,localfilepath): + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if pem_file != '': + client.connect(host, username=username, key_filename=pem_file) + else: + client.connect(host, username=username, password=password) + sftp = client.open_sftp() + sftp.get(remote_file_name,localfilepath) + sftp.close() + client.close() + +def read_log_file(ip,username,password,pem_file): + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if pem_file != '': + client.connect(host, username=username, key_filename=pem_file) + else: + client.connect(host, username=username, password=password) + #log_file_path = '/home/ubuntu/AION/data/log/fine_tuning_log.log' + stdin, stdout, stderr = client.exec_command(f'tail -n 500 {log_file_path}') + time.sleep(5) + client.close() + return stdout.read().decode() + +def run_ssh_cmd(ip,pem_file,username,password,log,command): + try: + buf = '' + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + startTime = timeit.default_timer() + while True: + try: + if pem_file != '': + client.connect(host, username=username, key_filename=pem_file) + else: + client.connect(host, username=username, password=password) + break + except Exception as e: + print(e) + dataLoadTime = timeit.default_timer() - startTime + if dataLoadTime >= 600: + break + time.sleep(10) + + stdin, stdout, stderr =client.exec_command(command) + + for line in stdout: + if log != '': + log.info(line) + else: + # if buf != '': + # buf= buf+'\\n' + buf = buf+line + client.close() + return buf + except Exception as e: + print(str(e)) + raise Exception(str(e)) + +def copy_files_to_server(ip, pem_file,local_data_file_path,local_config_file_path,username,password,remote_data_dir,remote_config_dir): + try: + host = ip + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + startTime = timeit.default_timer() + while True: + try: + if pem_file != '': + client.connect(host, username=username, key_filename=pem_file) + elif password != '': + client.connect(host, username=username, password=password) + sftp = client.open_sftp() + break + except Exception as e: + print(e) + time.sleep(10) + dataLoadTime = timeit.default_timer() - startTime + if dataLoadTime >= 600: + break + try: + sftp.stat(remote_data_dir) + print(f""Path: '{remote_data_dir}' already exist"") + except FileNotFoundError: + sftp.mkdir(remote_data_dir) + try: + sftp.stat(remote_config_dir) + print(f""Path: '{remote_config_dir}' already exist"") + except FileNotFoundError: + sftp.mkdir(remote_config_dir) + Train_Data_InRemoteArtifacts = sftp.listdir(remote_data_dir) + for traindata_files in Train_Data_InRemoteArtifacts: + + if not traindata_files.endswith('.ipynb_checkpoints'): + sftp.remove(remote_data_dir +'/'+ traindata_files) + if os.path.isdir(local_data_file_path): + list_pdf_json_files = os.listdir(local_data_file_path) + for file_pdf_json in list_pdf_json_files: + sftp.put(local_data_file_path+'/'+file_pdf_json, remote_data_dir+'/'+ file_pdf_json) + print(file_pdf_json + "" data copied successfully"") + else: + filename = os.path.basename(local_data_file_path) + directoryname= os.path.dirname(local_data_file_path) + sftp.put(directoryname+'/'+filename, remote_data_dir+'/'+ filename) + print(filename + "" data copied successfully"") + + if local_config_file_path != '': + config_InRemoteArtifacts = sftp.listdir(remote_config_dir) + for config_file in config_InRemoteArtifacts: + print(config_file) + if not config_file.endswith('.ipynb_checkpoints'): + sftp.remove(remote_config_dir +'/'+ config_file) + + + if os.path.isdir(local_config_file_path): + list_pdf_json_files = os.listdir(local_config_file_path) + for file_pdf_json in list_pdf_json_files: + sftp.put(local_config_file_path+'/'+file_pdf_json, remote_config_dir+'/'+ file_pdf_json) + print(file_pdf_json + "" config copied successfully"") + else: + # updated_config_file_path = create_config(local_config_file_path) + filename = os.path.basename(local_config_file_path) + directoryname= os.path.dirname(local_config_file_path) + sftp.put(directoryname+'/'+filename, remote_config_dir+'/'+ 'config.json') + print(filename + "" config copied successfully"") + + + remote_files = sftp.listdir(remote_config_dir) + print(""List of uploaded files"",remote_files) + sftp.close() + client.close() + except Exception as e: + print(e) + import json +import os +import sys +import pandas as pd +import time +from stat import S_ISDIR, S_ISREG +from pathlib import Path +import logging +import re +remote_data_dir = '/home/ubuntu/AION/data/storage' +remote_config_dir = '/home/ubuntu/AION/data/config' +sh_file_path = '/home/ubuntu/AION/llm/sbin/run_experiment.sh' +import os +import tarfile +def tardirectory(path,tarfilepath): + with tarfile.open(tarfilepath, 'w:tar') as tarhandle: + for root, dirs, files in os.walk(path): + for f in files: + tarhandle.add(os.path.join(root, f),arcname=f) + tarhandle.close() + +def createCodeSummary(codedir,cloudconfig,filetype): + try: + from appbe.dataPath import DATA_FILE_PATH + filetimestamp = str(int(time.time())) + tarfilepath = os.path.join(DATA_FILE_PATH,filetimestamp+'.tar') + tardirectory(codedir,tarfilepath) + with open(cloudconfig, 'r') as config_f: + cloud_infra = json.load(config_f) + config_f.close() + aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID'] + aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey'] + instance_type = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceSetting']['InstanceType'] + security_group_id = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceSetting']['SecurityGroupId'] + region = cloud_infra['AWS_EC2']['CodeSummarization']['RegionName'] + image_id = cloud_infra['AWS_EC2']['CodeSummarization']['amiId'] + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['CodeSummarization']['ssh']['keyFilePath']) + username = cloud_infra['AWS_EC2']['CodeSummarization']['ssh']['userName'] + + instance_id = '' + if image_id != '': + from llm.aws_instance_api import create_instance + instance_id = create_instance(image_id, instance_type, security_group_id,region,instance_name,aws_access_key_id, aws_secret_key) + if instance_id == '': + return 'Failed','Instance Creation Failed' + if instance_id == '': + if cloud_infra['AWS_EC2']['CodeSummarization']['InstanceId'] != '': + instance_id = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceId'] + else: + return 'Failed','Instance Creation Failed.' + if instance_id != '': + from llm.aws_instance_api import start_instance + ip = start_instance(aws_access_key_id, aws_secret_key, instance_id,region) + if ip != '': + from llm.ssh_command import copy_files_to_server + copy_files_to_server(ip,pem_file,tarfilepath,'',username,'',remote_data_dir,remote_config_dir) + from llm.ssh_command import run_ssh_cmd + command = 'rm -r /home/ubuntu/AION/data/code' + buf = run_ssh_cmd(ip, pem_file, username,'','',command) + command = 'mkdir -p /home/ubuntu/AION/data/code' + buf = run_ssh_cmd(ip, pem_file, username,'','',command) + command = 'tar -xvf '+remote_data_dir+'/'+filetimestamp+'.tar -C /home/ubuntu/AION/data/code' + print(command) + buf = run_ssh_cmd(ip, pem_file, username,'','',command) + command = sh_file_path+' '+'/home/ubuntu/AION/data/code'+' '+filetype + print(command) + buf = run_ssh_cmd(ip, pem_file, username,'','',command) + from llm.ssh_command import read_file_from_server + filetimestamp = str(int(time.time())) + codesummar = os.path.join(DATA_FILE_PATH,filetimestamp+'.csv') + read_file_from_server(ip,username,'',pem_file,'/home/ubuntu/AION/data/storage/code_summararies.csv',codesummar) + return 'Success',codesummar + else: + return 'Failed','Instance Initialization Failed.' + else: + return 'Failed','Instance Initialization Failed . AMI/Instance is not configured. Please check with ERS Research' + except Exception as e: + print(e) + return 'Failed','Code Summarization Failed' from .genetic_optimization import GeneticOptimizationCV from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.linear_model import SGDClassifier +from sklearn.tree import DecisionTreeClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.ensemble import GradientBoostingClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.linear_model import LinearRegression +from sklearn.linear_model import Lasso +from sklearn.linear_model import Ridge +from sklearn.tree import DecisionTreeRegressor +from sklearn.ensemble import RandomForestRegressor +import warnings +warnings.filterwarnings('ignore') +from genetic_selection import GeneticSelectionCV +def GeneticOptimizationCV(model,genetic_params,n_iter,scoring,cv): + n_generations = n_iter + estimator = model + selector = GeneticSelectionCV(estimator,cv=cv,**genetic_params,n_generations=n_generations,scoring=scoring) + return selector + import pandas as pd + +tab = ' ' +VALID_AGGREGATION_METHODS = ['mean','sum'] +VALID_GRANULARITY_UNITS = ['second','minute','hour','day','week','month','year'] +VALID_INTERPOLATE_KWARGS = {'linear':{},'spline':{'order':5},'timebased':{}} +VALID_INTERPOLATE_METHODS = list( VALID_INTERPOLATE_KWARGS.keys()) + +def get_one_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_boolean(value): + if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): + return True + else: + return False + +def get_source_delta( data: pd.DataFrame): + MAX_SAMPLE_TRY = 20 + if len( data) <= 1: + return None + time_delta = data.index[-1] - data.index[-2] + count = {} + for i in range(len(data)): + if i == MAX_SAMPLE_TRY or i == data.index[-1]: + break + delta = data.index[i+1] - data.index[i] + if delta not in count.keys(): + count[delta] = 1 + else: + count[delta] += 1 + if count: + return max(count, key=count.get) + else: + return None + + +class timeSeries(): + + def __init__( self, config, datetime, log=None): + self.datetime = datetime + self.validate_config(config) + self.log = log + + def validate_config( self, config): + + if not self.datetime or self.datetime.lower() == 'na': + raise ValueError('date time feature is not provided') + self.config = {} + method = get_one_true_option(config.get('interpolation',None)) + self.config['interpolate'] = {} + self.config['interpolate']['enabled'] = method in VALID_INTERPOLATE_METHODS + self.config['interpolate']['method'] = method + self.config['rolling'] = {} + self.config['rolling']['enabled'] = get_boolean( config.get('rollingWindow',False)) + self.config['rolling']['size'] = int( config.get('rollingWindowSize',1)) + if self.config['rolling']['size'] < 1: + raise ValueError('Rolling window size should be greater than 0.') + self.config['aggregation'] = {} + aggregation = config.get('aggregation',{" +"}) + agg_method = get_one_true_option(aggregation['type']) + self.config['aggregation'] = {} + self.config['aggregation']['enabled'] = agg_method in VALID_AGGREGATION_METHODS + self.config['aggregation']['method'] = agg_method + granularity = aggregation.get('granularity',{}) + granularity_unit = get_one_true_option( granularity.get('unit',None)) + if granularity_unit in VALID_GRANULARITY_UNITS: + granularity_mapping = {'second':'S','minute':'Min','hour':'H','day':'D','week':'W','month':'M','year':'Y'} + size = int(granularity.get('size',10)) + granularity_unit = granularity_mapping.get(granularity_unit,granularity_unit) + self.config['aggregation']['granularity'] = {} + self.config['aggregation']['granularity']['unit'] = granularity_unit + self.config['aggregation']['granularity']['size'] = size + + def log_info(self, msg, type='info'): + if self.log: + if type == 'error': + self.log.error( msg) + else: + self.log.info( msg) + else: + print( msg) + + def is_down_sampling(self, data, size, granularity_unit): + down_sampling = False + if granularity_unit in ['M', 'Y']: + return True + else: + target_delta = pd.Timedelta(size , granularity_unit) + source_delta = get_source_delta(data) + if not source_delta: + raise ValueError('Could not find the data frame time frequency') + return source_delta < target_delta + + def run( self, data): + if self.datetime not in data.columns: + raise ValueError(f""Date time feature '{self.datetime}' is not present in data"") + try: + # data[self.datetime] = pd.to_datetime( data[self.datetime]) + ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. + try: + #for non utc timestamp + data[self.datetime] = pd.to_datetime( data[self.datetime]) + except: + #for utc timestamp + data[self.datetime] = pd.to_datetime( data[self.datetime],utc=True) + data.set_index( self.datetime, inplace=True) + except: + raise ValueError(f""can not convert '{self.datetime}' to dateTime"") + if self.config.get('interpolate',{}).get('enabled',False): + method = self.config['interpolate']['method'] + self.log_info(f""Applying Interpolation using {method}"") + methods_mapping = {'timebased': 'time'} + self.config['interpolate']['mapped_method'] = methods_mapping.get(method, method) + data.interpolate(method=self.config['interpolate']['mapped_method'], inplace=True, **VALID_INTERPOLATE_KWARGS[method]) + if self.config.get('rolling',{}).get('enabled',False): + if self.config['rolling']['size'] > len( data): + raise ValueError('Rolling window size is greater than dataset size') + self.log_info(f""Applying rolling window( moving avg) with size {self.config['rolling']['size']}"") + data = data.rolling( self.config['rolling']['size']).mean() + data = data.iloc[self.config['rolling']['size'] - 1:] + aggregation = self.config.get('aggregation',{}) + if aggregation.get('enabled',False): + method = aggregation.get('method','mean') + self.rule = str(aggregation['granularity']['size']) + aggregation['granularity']['unit'] + if self.is_down_sampling(data, aggregation['granularity']['size'], aggregation['granularity']['unit']): + self.log_info(f""Applying down sampling( {self.rule})"") + if method == 'mean': + data = data.resample( self.rule).mean() + elif method == 'sum': + data = data.resample( self.rule).sum() + else: + self.log_info(f""Applying up sampling using forward fill method( {self.rule})"") + data = data.resample( self.rule).ffill() + data.reset_index( inplace=True, names=self.datetime) + return data + + def get_code(self, indent=0): + tab = ' ' + code = '' + code += f"""""" + +def preprocess( data): + try: + #for non utc timestamp + data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}']) + except: + data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'],utc=True) + data.set_index( '{self.datetime}', inplace=True) +"""""" + if self.config.get('interpolate',{}).get('enabled',False): + code += tab + f""data.interpolate(method='{self.config['interpolate']['mapped_method']}', inplace=True, **{VALID_INTERPOLATE_KWARGS[self.config['interpolate']['method']]})\\n"" + if self.config.get('rolling',{}).get('enabled',False): + code += tab + f""data = data.rolling( {self.config['rolling']['size']}).mean().iloc[{self.config['rolling']['size'] - 1}:]\\n"" + if self.config.get('aggregation',{}).get('enabled',False): + code += tab + f""data = data.resample( '{self.rule}').{self.config.get('aggregation',{}).get('method','mean')}()\\n"" + code += tab + f""data.reset_index( inplace=True, names='{self.datetime}')\\n"" + code += tab + ""return data\\n"" + return code + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import pandas as pd +import sys +import os +import warnings +import logging +from pathlib import Path +import random +from sklearn.model_selection import train_test_split +import operator +import re +import pdfplumber + +class dataReader(): + + def __init__(self): + self.dataDf =None + self.log = logging.getLogger('eion') + + + def readCsv(self,dataPath,featureList,targetColumn): + data=pd.read_csv(dataPath) + dataDf=data[featureList] + predictDf=data[targetColumn] + return dataDf,predictDf + + def rowsfilter(self,filters,dataframe): + self.log.info('\\n-------> No of rows before filtering: '+str(dataframe.shape[0])) #task-13479 + filterexpression='' + firstexpressiondone = False + for x in filters: + if firstexpressiondone: + filterexpression += ' ' + if x['combineOperator'].lower() == 'and': + filterexpression += '&' + elif x['combineOperator'].lower() == 'or': + filterexpression += '|' + filterexpression += ' ' + firstexpressiondone = True + filterexpression += x['feature'] + filterexpression += ' ' + if x['condition'].lower() == 'equals': + filterexpression += '==' + elif x['condition'].lower() == 'notequals': + filterexpression += '!=' + elif x['condition'].lower() == 'lessthan': + filterexpression += '<' + elif x['condition'].lower() == 'lessthanequalto': + filterexpression += '<=' + elif x['condition'].lower() == 'greaterthan': + filterexpression += '>' + elif x['condition'].lower() == 'greaterthanequalto': + filterexpression += '>=' + filterexpression += ' ' + if dataframe[x['feature']].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + filterexpression += x['value'] + else: + filterexpression += '\\''+x['value']+'\\'' + dataframe = dataframe.query(filterexpression) + self.log.info('-------> Row filter: '+str(filterexpression)) #task-13479 + self.log.info('-------> No of rows after filtering: '+str(dataframe.shape[0])) + return dataframe,filterexpression + + def grouping(self,grouper,dataframe): + grouperbyjson= {} + groupbyfeatures = grouper['groupby'] + dataframe = dataframe.reset_index() + features = dataframe.columns.tolist() + aggjson = {} + for feature, featureType in zip(features,dataframe.dtypes): + if feature == groupbyfeatures or feature == 'index': + continue + if dataframe[feature].empty == True: + continue + if dataframe[feature].isnull().all() == True: + continue + if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + temp = {} + temp[feature+'_size'] = 'size' + temp[feature+'_sum'] = 'sum' + temp[feature+'_max'] = 'max' + temp[feature+'_min'] = 'min' + temp[feature+'_mean'] = 'mean' + aggjson[feature] = temp + else: + temp = {} + temp[feature+'_size'] = 'size' + temp[feature+'_unique'] = 'nunique' + aggjson[feature] = temp + groupbystring = 'groupby([\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')' + grouperbyjson['groupbystring'] = groupbystring + dataframe = dataframe.groupby([groupbyfeatures]).agg(aggjson) + dataframe.columns = dataframe.columns.droplevel(0) + dataframe = dataframe.reset_index() + ''' + if operation.lower() == 'size': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size() + elif operation.lower() == 'mean': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean() + elif operation.lower() == 'max': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max() + elif operation.lower() == 'min': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min() + + dataframe = dataframe.rename(""groupby_value"") + dataframe = dataframe.to_frame() + dataframe = dataframe.reset_index() + ''' + return dataframe,grouperbyjson + + + def timeGrouping(self,timegrouper,dataframe): + grouperbyjson= {} + dateTime = timegrouper['dateTime'] + frequency = timegrouper['freq'] + groupbyfeatures = timegrouper['groupby'] + grouperbyjson['datetime'] = dateTime + if dataframe[dateTime].dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + dtlenth = dataframe[dateTime].iloc[0] + dtlenth = np.int64(dtlenth) + dtlenth = len(str(dtlenth)) + if dtlenth == 13: + dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='ms') + grouperbyjson['unit'] = 'ms' + elif dtlenth == 10: + dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='s') + grouperbyjson['unit'] = 's' + else: + dataframe['date'] = pd.to_datetime(dataframe[dateTime]) + grouperbyjson['unit'] = '' + else: + dataframe['date'] = pd.to_datetime(dataframe[dateTime]) + grouperbyjson['unit'] = '' + dataframe = dataframe.reset_index() + dataframe.set_index('date',inplace=True) + features = dataframe.columns.tolist() + aggjson = {} + for feature, featureType in zip(features,dataframe.dtypes): + if feature == groupbyfeatures or feature == dateTime or feature == 'index': + continue + if dataframe[feature].empty == True: + continue + if dataframe[feature].isnull().all() == True: + continue + if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: + temp = {'size','sum','max','min','mean'} + aggjson[feature] = temp + else: + temp = {'size','nunique'} + aggjson[feature] = temp + + if groupbyfeatures == '': + groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\')]).agg('+str(aggjson)+')' + else: + groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\'),\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')' + + grouperbyjson['groupbystring'] = groupbystring + print(grouperbyjson) + if groupbyfeatures == '': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency)]).agg(aggjson) + else: + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).agg(aggjson) + + dataframe.columns = ['_'.join(col) for col in dataframe.columns] + dataframe = dataframe.reset_index() + self.log.info(dataframe.head(10)) + ''' + if operation.lower() == 'size': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size() + elif operation.lower() == 'mean': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean() + elif operation.lower() == 'max': + dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max() + elif operation.lower() == 'min': + data" +"frame = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min() + + dataframe = dataframe.rename(""groupby_value"") + dataframe = dataframe.to_frame() + dataframe = dataframe.reset_index() + ''' + return dataframe,grouperbyjson + + + def readDf(self,dataF,featureList,targetColumn): + dataDf = dataF[featureList] + predictDf =dataF[targetColumn] + return dataDf,predictDf + + def csvTodf(self,dataPath,delimiter,textqualifier): + ''' + if os.path.splitext(dataPath)[1] == "".tsv"": + dataFrame=pd.read_csv(dataPath,encoding='latin1',sep='\\t') + else: + dataFrame=pd.read_csv(dataPath,encoding='latin1') + ''' + if os.path.splitext(dataPath)[1] == "".py"": + f = open(dataPath, ""r"") + pythoncode = f.read() + f.close() + ldict = {} + exec(pythoncode, globals(), ldict) + dataFrame = ldict['dfpy'] + else: + dataFrame=pd.read_csv(dataPath,encoding='utf-8',sep=delimiter,quotechar=textqualifier, skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') + dataFrame.rename(columns=lambda x: x.strip(), inplace=True) + return dataFrame + + def read_file(self, fileName): + fileName = Path(fileName) + if fileName.suffix == '.pdf': + pdf = pdfplumber.open(fileName) + text = '' + for index, page in enumerate(pdf.pages): + if index: + text += ' ' + text += page.extract_text() + else: + with open(fileName, ""r"",encoding=""utf-8"") as f: + text = f.read() + return text + + def documentsTodf(self,folderlocation,labelFilePath): + dataDf = pd.DataFrame() + error_message = """" + dataset_csv_file = os.path.join(folderlocation,labelFilePath) + labels = pd.read_csv(dataset_csv_file) + dataDict = {} + keys = [""File"",""Label""] + for key in keys: + dataDict[key] = [] + for i in range(len(labels)): + filename = os.path.join(folderlocation,labels.loc[i,""File""]) + dataDict[""File""].append(self.read_file(filename)) + dataDict[""Label""].append(labels.loc[i,""Label""]) + dataDf = pd.DataFrame.from_dict(dataDict) + error_message = """" + return dataDf, error_message + + def removeFeatures(self,df,datetimeFeature,indexFeature,modelFeatures,targetFeature): + self.log.info(""\\n---------- Prepare Features ----------"") + if(str(datetimeFeature).lower() != 'na'): + datetimeFeature = datetimeFeature.split("","") + datetimeFeature = list(map(str.strip, datetimeFeature)) + for dtfeature in datetimeFeature: + if dtfeature in df.columns: + self.log.info(""-------> Remove Date Time Feature: ""+dtfeature) + df = df.drop(columns=dtfeature) + if(str(indexFeature).lower() != 'na'): + indexFeature = indexFeature.split("","") + indexFeature = list(map(str.strip, indexFeature)) + for ifeature in indexFeature: + if ifeature in df.columns: + self.log.info(""-------> Remove Index Feature: ""+ifeature) + df = df.drop(columns=ifeature) + if(str(modelFeatures).lower() != 'na'): + self.log.info(""-------> Model Features: ""+str(modelFeatures)) + modelFeatures = modelFeatures.split("","") + modelFeatures = list(map(str.strip, modelFeatures)) + if(targetFeature != '' and str(targetFeature).lower() != 'na'): + targetFeature = targetFeature.split("","") + targetFeature = list(map(str.strip, targetFeature)) + for ifeature in targetFeature: + if ifeature not in modelFeatures: + modelFeatures.append(ifeature) + if(str(indexFeature).lower() != 'na'): + for ifeature in indexFeature: + if ifeature in modelFeatures: + modelFeatures.remove(ifeature) + if(str(datetimeFeature).lower() != 'na'): + for dtfeature in datetimeFeature: + if dtfeature in modelFeatures: + modelFeatures.remove(dtfeature) + df = df[modelFeatures] + self.log.info(""---------- Prepare Features End ----------"") + return(df) + + def splitImageDataset(self, df, ratio, modelType): + if modelType.lower() == ""objectdetection"": + images = df['File'].unique().tolist() + trainImages = random.sample(images, int(len(images) * ratio)) + mask = [0] * len(df) + for i in range(len(df)): + mask[i] = df.iloc[i]['File'] in trainImages + trainDf = df.iloc[mask] + testDf = df.iloc[[not elem for elem in mask]] + return trainDf, testDf + else: + return train_test_split(df, test_size=(1 - ratio)) + def createTFRecord(self, train_image_dir, output_dir, csv_file, testPercentage, AugEnabled,keepAugImages,operations, modelType,augConf={}): + from transformations import generate_tfrecord + from transformations.imageAug import ImageAugmentation + if isinstance(csv_file, pd.DataFrame): + df = csv_file + else: + df = pd.read_csv(os.path.join(train_image_dir,csv_file)) + labelmap_path, num_classes = generate_tfrecord.createLabelFile(df, output_dir) + train_df, test_df = self.splitImageDataset(df, testPercentage/100.0, modelType) + if AugEnabled: + augFile = os.path.join(output_dir,""tempTrainDf.csv"") + train_df.to_csv(augFile) + ia = ImageAugmentation(train_image_dir, augFile) + augFile = ia.augment(modelType, operations,None,augConf) + train_df = pd.read_csv(augFile) + generate_tfrecord.generate_TF_record(train_image_dir, output_dir, train_df, test_df, labelmap_path) + if AugEnabled and not keepAugImages: + ia.removeAugmentedImages(train_df) + return train_df, num_classes + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#System imports +import os +import sys +import json +import datetime,time,timeit +import itertools + + +import numpy as np +import pandas as pd +import math +from sklearn.preprocessing import MinMaxScaler,StandardScaler +from sklearn.preprocessing import PowerTransformer +import logging +class dataTransformer(): + + def __init__(self): + self.log = logging.getLogger('eion') + + def startTransformer(self,df,features,target,transType): + scaler ='None' + if target in features: + features.remove(target) + + transFeatures=features + transDfColumns=[] + dataframe=df[transFeatures] + #targetArray=np.array(df[target]) + #targetArray.shape = (len(targetArray), 1) + self.log.info(""Data Normalization has started"") + if transType.lower() =='standardscaler': + scaler = StandardScaler().fit(dataframe) + transDf = scaler.transform(dataframe) + + elif transType.lower() =='minmax': + scaler=MinMaxScaler().fit(dataframe) + transDf = scaler.transform(dataframe) + elif transType.lower() =='lognormal': + print(dataframe) + scaler = PowerTransformer(method='yeo-johnson', standardize=False).fit(dataframe) + transDf = scaler.transform(dataframe) + else: + self.log.info(""Need to implement"") + #features.append(target) + + #scaledDf = pd.DataFrame(np.hstack((transDf, targetArray)),columns=features) + return transDf,features,scaler ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import io +import json +import logging +import pandas as pd +import sys +import numpy as np +from pathlib import Path +from word2number import w2n +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import OrdinalEncoder +from sklearn.preprocessing import OneHotEncoder +from sklearn.impute import SimpleImputer, KNNImputer +from sklearn.pipeline import Pipeline, FeatureUnion +from sklearn.preprocessing import FunctionTransformer +from sklearn.preprocessing import MinMaxScaler,StandardScaler +from sklearn.preprocessing import PowerTransformer +from sklearn.compose import ColumnTransformer +from sklearn.base import TransformerMixin +from sklearn.ensemble import IsolationForest +from category_encoders import TargetEncoder +try: + import transformations.data_profiler_functions as cs +except: + import data_profiler_functions as cs + +if 'AION' in sys.modules: + try: + from appbe.app_config import DEBUG_ENABLED + except: + DEBUG_ENABLED = False +else: + DEBUG_ENABLED = False +log_suffix = f'[{Path(__file__).stem}] ' + + +class profiler(): + + def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None): + if not isinstance(xtrain, pd.DataFrame): + raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type') + if xtrain.empty: + raise ValueError(f'{log_suffix}Data frame is empty') + if target and target in xtrain.columns: + self.target = xtrain[target] + xtrain.drop(target, axis=1, inplace=True) + self.target_name = target + elif ytrain: + self.target = ytrain + self.target_name = 'target' + else: + self.target = pd.Series() + self.target_name = None + self.data_path = data_path + self.encode_target = encode_target + self.label_encoder = None + self.data = xtrain + self.keep_unprocessed = keep_unprocessed + self.colm_type = {} + for colm, infer_type in zip(self.data.columns, self.data.dtypes): + self.colm_type[colm] = infer_type + self.numeric_feature = [] + self.cat_feature = [] + self.text_feature = [] + self.wordToNumericFeatures = [] + self.added_features = [] + self.pipeline = [] + self.dropped_features = {} + self.train_features_type={} + self.__update_type() + self.config = config + self.featureDict = config.get('featureDict', []) + self.output_columns = [] + self.feature_expender = [] + self.text_to_num = {} + self.force_numeric_conv = [] + if log: + self.log = log + else: + self.log = logging.getLogger('eion') + self.type_conversion = {} + self.log_input_feat_info() + + def log_input_feat_info(self): + if self.featureDict: + feature_df = pd.DataFrame(self.featureDict) + log_text = '\\nPreprocessing options:' + log_text += '\\n\\t'+str(feature_df.head( len(self.featureDict))).replace('\\n','\\n\\t') + self.log.info(log_text) + + def log_dataframe(self, msg=None): + buffer = io.StringIO() + self.data.info(buf=buffer) + if msg: + log_text = f'Data frame after {msg}:' + else: + log_text = 'Data frame:' + log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t') + log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t')) + self.log.info(log_text) + + def transform(self): + if self.is_target_available(): + if self.target_name: + self.log.info(f""Target feature name: '{self.target_name}'"") + self.log.info(f""Target feature size: {len(self.target)}"") + else: + self.log.info(f""Target feature not present"") + self.log_dataframe() + print(self.data.info()) + try: + self.process() + except Exception as e: + self.log.error(e, exc_info=True) + raise + pipe = FeatureUnion(self.pipeline) + try: + if self.text_feature:" +" + from text.textProfiler import set_pretrained_model + set_pretrained_model(pipe) + conversion_method = self.get_conversion_method() + process_data = pipe.fit_transform(self.data, y=self.target) + # save for testing + if DEBUG_ENABLED: + if not isinstance(process_data, np.ndarray): + process_data = process_data.toarray() + df = pd.DataFrame(process_data) + df.to_csv('debug_preprocessed.csv', index=False) + if self.text_feature and conversion_method == 'latentsemanticanalysis': + n_size = self.get_tf_idf_output_size( pipe) + dimensions = self.get_tf_idf_dimensions() + if n_size < dimensions or n_size > dimensions: + dimensions = n_size + from sklearn.decomposition import TruncatedSVD + reducer = TruncatedSVD( n_components = dimensions) + reduced_data = reducer.fit_transform( process_data[:,-n_size:]) + text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process') + pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer)) + if not isinstance(process_data, np.ndarray): + process_data = process_data.toarray() + process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1) + last_step = self.feature_expender.pop() + self.feature_expender.append({'feature_reducer':list(last_step.values())[0]}) + + except EOFError as e: + if ""Compressed file ended before the end-of-stream marker was reached"" in str(e): + raise EOFError('Pretrained model is not downloaded properly') + + self.update_output_features_names(pipe) + if not isinstance(process_data, np.ndarray): + process_data = process_data.toarray() + df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns) + + if self.is_target_available() and self.target_name: + df[self.target_name] = self.target + if self.keep_unprocessed: + df[self.keep_unprocessed] = self.data[self.keep_unprocessed] + self.log_numerical_fill() + self.log_categorical_fill() + self.log_normalization() + return df, pipe, self.label_encoder + + def log_type_conversion(self): + if self.log: + self.log.info('----------- Inspecting Features -----------') + self.log.info('----------- Type Conversion -----------') + count = 0 + for k, v in self.type_conversion.items(): + if v[0] != v[1]: + self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}') + self.log.info('Status:- |... Feature inspection done') + + def check_config(self): + removeDuplicate = self.config.get('removeDuplicate', False) + self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate) + self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio'])) + self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio'])) + self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel'])) + featureDict = self.config.get('featureDict', []) + if isinstance(featureDict, dict): + self.config['featureDict'] = [] + if isinstance(featureDict, str): + self.config['featureDict'] = [] + + def process(self): + #remove duplicate not required at the time of prediction + self.check_config() + self.remove_constant_feature() + self.remove_empty_feature(self.config['misValueRatio']) + self.remove_index_features() + self.dropna() + if self.config['removeDuplicate']: + self.drop_duplicate() + #self.check_categorical_features() + #self.string_to_numeric() + self.process_target() + self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)} + self.parse_process_step_config() + self.process_drop_fillna() + self.log_type_conversion() + self.update_num_fill_dict() + if DEBUG_ENABLED: + print(self.num_fill_method_dict) + self.update_cat_fill_dict() + self.create_pipeline() + self.text_pipeline(self.config) + self.apply_outlier() + if DEBUG_ENABLED: + self.log.info(self.process_method) + self.log.info(self.pipeline) + + def is_target_available(self): + return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target) + + def process_target(self, operation='encode', arg=None): + if self.is_target_available(): + # drop null values + self.__update_index( self.target.notna(), 'target') + if self.encode_target: + self.label_encoder = LabelEncoder() + self.target = self.label_encoder.fit_transform(self.target) + return self.label_encoder + return None + + def is_target_column(self, column): + return column == self.target_name + + def fill_default_steps(self): + + num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{})) + normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none') + for colm in self.numeric_feature: + if num_fill_method: + self.fill_missing_value_method(colm, num_fill_method.lower()) + if normalization_method: + self.fill_normalizer_method(colm, normalization_method.lower()) + + cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{})) + cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{})) + for colm in self.cat_feature: + if cat_fill_method: + self.fill_missing_value_method(colm, cat_fill_method.lower()) + if cat_encode_method: + self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True) + + def parse_process_step_config(self): + self.process_method = {} + user_provided_data_type = {} + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + user_provided_data_type[colm] = feat_conf['type'] + if user_provided_data_type: + self.update_user_provided_type(user_provided_data_type) + + self.fill_default_steps() + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + if feat_conf.get('fillMethod', None): + self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower()) + if feat_conf.get('categoryEncoding', None): + self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower()) + if feat_conf.get('normalization', None): + self.fill_normalizer_method(colm, feat_conf['normalization'].lower()) + if feat_conf.get('outlier', None): + self.fill_outlier_method(colm, feat_conf['outlier'].lower()) + if feat_conf.get('outlierOperation', None): + self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower()) + + + def get_tf_idf_dimensions(self): + dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default') + return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim] + + def get_tf_idf_output_size(self, pipe): + start_index = {} + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} + if start_index: + for key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + return len(v) + return 0 + + def update_output_features_names(self, pipe): + columns = self.output_columns + start_index = {} + index_shifter = 0 + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + for key,value in start_index.items(): + for k,v in value.items(): + index_shifter += len(v) + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} + #print(start_index) + if start_index: + for key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + v = [f'{x}_vect' for x in v] + self.output_columns[key:key] = v + self.added_features = [*self.added_features, *v] + + + def text_pipeline(self, conf_json): + + if self.text_feature: + from text.textProfiler import textProfiler + from text.textProfiler import textCombine + pipeList = [] + text_pipe = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", self.text_feature) + ], remainder=""drop"")), + (""text_fillNa"",SimpleImputer(strategy='constant', fill_value='')), + (""merge_text_feature"", textCombine())]) + obj = textProfiler() + pipeList = obj.cleaner(conf_json, pipeList, self.data_path) + pipeList = obj.embedding(conf_json, pipeList) + last_step = ""merge_text_feature"" + for pipe_elem in pipeList: + text_pipe.steps.append((pipe_elem[0], pipe_elem[1])) + last_step = pipe_elem[0] + text_transformer = ('text_process', text_pipe) + self.pipeline.append(text_transformer) + self.feature_expender.append({last_step:len(self.output_columns)}) + + def create_pipeline(self): + num_pipe = {} + for k,v in self.num_fill_method_dict.items(): + for k1,v1 in v.items(): + if k1 and k1 != 'none': + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)), + (k1, self.get_num_scaler(k1)) + ]) + else: + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)) + ]) + self.output_columns.extend(v1) + cat_pipe = {} + for k,v in self.cat_fill_method_dict.items(): + for k1,v1 in v.items(): + cat_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_cat_imputer(k)), + (k1, self.get_cat_encoder(k1)) + ]) + if k1 not in ['onehotencoding']: + self.output_columns.extend(v1) + else: + self.feature_expender.append({k1:len(self.output_columns)}) + for key, pipe in num_pipe.items(): + self.pipeline.append((key, pipe)) + for key, pipe in cat_pipe.items(): + self.pipeline.append((key, pipe)) + + ""Drop: feature during training but replace with zero during prediction "" + def process_drop_fillna(self): + drop_column = [] + if 'numFill' in self.process_method.keys(): + for col, method in self.process_method['numFill'].items(): + if method == 'drop': + self.process_method['numFill'][col] = 'zero' + drop_column.append(col) + if 'catFill' in self.process_method.keys(): + for col, method in self.process_method['catFill'].items(): + if method == 'drop': + self.process_method['catFill'][col] = 'zero' + drop_column.append(col) + if drop_column: + self.data.dropna(subset=drop_column, inplace=True) + + def update_num_fill_dict(self): + self.num_fill_method_dict = {} + if 'numFill' in self.process_method.keys(): + for f in cs.supported_method['fillNa']['numeric']: + self.num_fill_method_dict[f] = {} + for en in cs.supported_method['normalization']: + self.num_fill_method_dict[f][en] = [] + for col in self.numeric_feature: + numFillDict = self.process_method.get('numFill',{}) + normalizationDict = self.process_method.get('normalization',{}) + if f" +"== numFillDict.get(col, '') and en == normalizationDict.get(col,''): + self.num_fill_method_dict[f][en].append(col) + if not self.num_fill_method_dict[f][en] : + del self.num_fill_method_di" +" + if colm in self.cat_feature: + if method.lower() in cs.supported_method['categoryEncoding']: + if 'catEncoder' not in self.process_method.keys(): + self.process_method['catEncoder'] = {} + if method == 'na' and self.process_method['catEncoder'].get(colm, None): + pass + else: + self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default) + else: + self.log.info(f""-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}"") + + def fill_normalizer_method(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['normalization']: + if 'normalization' not in self.process_method.keys(): + self.process_method['normalization'] = {} + if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None): + pass + else: + self.process_method['normalization'][colm] = method + else: + self.log.info(f""-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}"") + + def apply_outlier(self): + inlier_indice = np.array([True] * len(self.data)) + if self.process_method.get('outlier', None): + self.log.info('-------> Feature wise outlier detection:') + for k,v in self.process_method['outlier'].items(): + if k in self.numeric_feature: + if v == 'iqr': + index = cs.findiqrOutlier(self.data[k]) + elif v == 'zscore': + index = cs.findzscoreOutlier(self.data[k]) + elif v == 'disable': + index = None + if k in self.process_method['outlierOperation'].keys(): + if self.process_method['outlierOperation'][k] == 'dropdata': + inlier_indice = np.logical_and(inlier_indice, index) + elif self.process_method['outlierOperation'][k] == 'average': + mean = self.data[k].mean() + index = ~index + self.data.loc[index,[k]] = mean + self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}') + elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable': + self.log.info(f'-------> Total outliers in ""{k}"": {(~index).sum()}') + if self.config.get('outlierDetection',None): + if self.config['outlierDetection'].get('IsolationForest','False') == 'True': + if self.numeric_feature: + index = cs.findiforestOutlier(self.data[self.numeric_feature]) + inlier_indice = np.logical_and(inlier_indice, index) + self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):') + if inlier_indice.sum() != len(self.data): + self.__update_index(inlier_indice, 'outlier') + + def fill_outlier_method(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['outlier_column_wise']: + if 'outlier' not in self.process_method.keys(): + self.process_method['outlier'] = {} + if method not in ['Disable', 'na']: + self.process_method['outlier'][colm] = method + else: + self.log.info(f""-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}"") + + def fill_outlier_process(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['outlierOperation']: + if 'outlierOperation' not in self.process_method.keys(): + self.process_method['outlierOperation'] = {} + self.process_method['outlierOperation'][colm] = method + else: + self.log.info(f""-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}"") + + def get_cat_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_cat_encoder(self,method): + if method == 'labelencoding': + return OrdinalEncoder() + elif method == 'onehotencoding': + return OneHotEncoder(sparse=False,handle_unknown=""ignore"") + elif method == 'targetencoding': + if not self.is_target_available(): + raise ValueError('Can not apply Target Encoding when target feature is not present') + return TargetEncoder() + + def get_num_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'mean': + return SimpleImputer(strategy='mean') + elif method == 'median': + return SimpleImputer(strategy='median') + elif method == 'knnimputer': + return KNNImputer() + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_num_scaler(self,method): + if method == 'minmax': + return MinMaxScaler() + elif method == 'standardscaler': + return StandardScaler() + elif method == 'lognormal': + return PowerTransformer(method='yeo-johnson', standardize=False) + + def recommenderStartProfiler(self,modelFeatures): + return cs.recommenderStartProfiler(self,modelFeatures) + + def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): + return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation) + + def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): + return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2) + + def get_conversion_method(self): + return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower() + +def set_features(features,profiler=None): + return cs.set_features(features,profiler) + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import random +from matplotlib import pyplot as plt +import cv2 +import albumentations as A +import os +import pandas as pd +from pathlib import Path + + +class ImageAugmentation(): + + def __init__(self, dataLocation, csvFile): + self.AugmentationOptions = {""Flip"": {""operation"": A.HorizontalFlip, ""suffix"":""_flip""}, + ""Rotate"": {""operation"": A.Rotate, ""suffix"":""_rotate""}, + ""Shift"": {""operation"": A.RGBShift, ""suffix"":""_shift""}, + ""Crop"": {""operation"": [A.CenterCrop, A.RandomSizedBBoxSafeCrop], ""suffix"":""_crop""}, + ""Contrast"": {""operation"": A.RandomContrast, ""suffix"":""_cont""}, + ""Brightness"": {""operation"": A.RandomBrightness, ""suffix"":""_bright""}, + ""Blur"": {""operation"": A.GaussianBlur, ""suffix"":""_blur""} + } + self.dataLocation = dataLocation + self.csvFile = csvFile + + def __applyAugmentationClass(self, image, augmentation,limit): + if augmentation in list(self.AugmentationOptions.keys()): + if augmentation == ""Crop"": + height, width, _ = image.shape + crop_percentage = random.uniform(0.6, 0.9) + transform = self.AugmentationOptions[augmentation][""operation""][0](height=int(height*crop_percentage), width=int(width*crop_percentage) ) + elif augmentation == ""Blur"": + transform = self.AugmentationOptions[augmentation][""operation""](blur_limit = limit) + elif augmentation in [""Contrast"",""Brightness""]: + transform = self.AugmentationOptions[augmentation][""operation""](limit = limit) + else: + transform = self.AugmentationOptions[augmentation][""operation""]() + return transform(image=image) + + def __applyAugmentation(self, image, augmentation,limit,bboxes=None, category_ids=None, seed=7): + transformOptions = [] + if bboxes: + bbox_params = A.BboxParams(format='pascal_voc', label_fields=['category_ids']) + else: + bbox_params = None + if augmentation in list(self.AugmentationOptions.keys()): + if augmentation == ""Crop"": + height, width, _ = image.shape + crop_percentage = random.uniform(0.6, 0.9) + transformOptions.append(self.AugmentationOptions[augmentation][""operation""][1](height=int(height*crop_percentage), width=int(width*crop_percentage) )) + elif augmentation == ""Blur"": + transformOptions.append(self.AugmentationOptions[augmentation][""operation""](blur_limit = limit)) + elif augmentation in [""Contrast"",""Brightness""]: + transformOptions.append(self.AugmentationOptions[augmentation][""operation""](limit = limit)) + else: + transformOptions.append(self.AugmentationOptions[augmentation][""operation""]()) + transform = A.Compose( + transformOptions, + bbox_params=bbox_params, + ) + random.seed(seed) + return transform(image=image, bboxes=bboxes, category_ids=category_ids) + else: + return None + def getBBox(self, df, imageLoc, category_name_to_id): + + subDf = df[df['loc']==imageLoc] + boxes = [] + category = [] + for index, row in subDf.iterrows(): + boxes.append( [row['xmin'],row['ymin'],row['xmax'],row['ymax']]) + category.append(category_name_to_id[row['Label']]) + return boxes, category + + def __objAug(self, imageLoc, df, classes_names, category_id_to_name, category_name_to_id,limit,numberofImages,op): + for x in range(numberofImages): + bbox, category_ids = self.getBBox(df, imageLoc, category_name_to_id) + image = cv2.imread(imageLoc) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + transformed = self.__applyAugmentation(image, op,limit,bbox, category_ids) + transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR) + count = 1 + row = df[df['loc']==imageLoc].iloc[0] + filename = (Path(imageLoc).stem +'_'+str(x)+ self.AugmentationOptions[op][""suffix""] + Path(imageLoc).suffix) + newImage = str(Path(imageLoc).parent/filename) + for index,bbox in enumerate(transformed['bboxes']): + data = {'File':filename, 'xmin':bbox[0],'ymin':bbox[1],'xmax':bbox[2],'ymax':bbox[3],'Label':category_id_to_name[transformed['category_ids'][index]],'id':count,'height':row['height'],'width':row['width'], 'angle':0.0, 'loc': newImage, 'AugmentedImage': True} + count += 1 + df=df.append(data, ignore_index=True) + + cv2.imwrite(newImage, transformed['image']) + return df + + + def __objectDetection(self, images, df, optionDf, classes_names, suffix='',augConf={}): + + category_id_to_name = {v+1:k for v,k in enumerate(classes_names)} + category_name_to_id = {k:v+1 for v,k in enumerate(classes_names)} + for i, imageLoc in enumerate(images): + for key in optionDf.columns: + if optionDf.iloc[i][key]: + if key in augConf: + limit = eval(augConf[key].get('limit','0.2')) + numberofImages = int(augConf[key].get('noOfImages',1)) + else: + limit = 0.2 + numberofImages = 1 + + df = self.__objAug(imageLoc, df, classes_names, category_id_to_name,category_name_to_id,limit,numberofImages,op=key) + return df + + def __augClassificationImage(self, imageLoc, df,limit,imageindex,op): + data = {} + image = cv2.imread(imageLoc) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + transformed = self.__applyAugmentationClass(image, op,limit) + transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR) + location = Path(imageLoc).parent + filename = (Path(imageLoc).stem +'_'+'str(imageindex)'+ self.AugmentationOptions[op][""suffix""] + Path(imageLoc).suffix) + cv2.imwrite(str(location/'AION'/'AugumentedImages'/filename), transformed['image']) + data['File'] = filename + data['Label'] = df[df['File']==Path(imageLoc).name][""Label""].iloc[0] + data['AugmentedImage'] = True + data['loc'] = str(location/filename) + return data + + def __classification(self, images, df, optionDf,augConf,csv_file=None, outputDir=None): + for i, imageLoc in enumerate(images): + for key in optionDf.columns: + if optionDf.iloc[i][key]: + if key in augConf: + limit = eval(augConf[key].get('limit','0.2')) + numberofImages = int(augConf[key].get('noOfImages',1)) + else: + limit = 0.2 + numberofImages = 1 + for x in range(numberofImages): + rows = self.__augClassificationImage(imageLoc, df,limit,x,op=key) + df=df.append(rows, ignore_index=True) + return df + + def removeA" +"ugmentedImages(self, df): + removeDf = df[df['AugmentedImage'] == True]['loc'].unique().tolist() + #df[df['imageAugmentationOriginalImage'] != True][loocationField].apply(lambda x: Path(x).unlink()) + for file in removeDf: + if file: + Path(file).unlink() + + def augment(self, modelType=""imageclassification"",params=None,csvSavePath = None,augConf={}): + if isinstance(params, dict) and any(params.values()): + df = pd.read_csv(self.csvFile) + if not self.dataLocation.endswith('/'): + images = self.dataLocation+'/' + else: + images = self.dataLocation + if modelType == ""imageclassification"": + images = images + df['File'] + else: + images = images + df['File'] + df['loc'] = images + images = set(images.tolist()) + option = {} + for key in list(self.AugmentationOptions.keys()): + option[key] = params.get(key, False) + optionDf = pd.DataFrame(columns=list(option.keys())) + for i in range(len(images)): + optionDf = optionDf.append(option, ignore_index=True) + if modelType == ""imageclassification"": + df = self.__classification(images, df, optionDf,augConf) + else: + classes_names = sorted(df['Label'].unique().tolist()) + df = self.__objectDetection(images, df, optionDf, classes_names,'',augConf) + df.to_csv(self.csvFile, index=False) + return self.csvFile ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#System imports +import logging +from distutils.util import strtobool +import pandas as pd +from text import TextProcessing + + +def get_one_true_option(d, default_value): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + +class textProfiler(): + + def __init__(self): + self.log = logging.getLogger('eion') + + def textCleaning(self, textCorpus): + textProcessor = TextProcessing.TextProcessing() + textCorpus = textProcessor.transform(textCorpus) + return(textCorpus) + + def textProfiler(self, textCorpus, conf_json, pipeList, max_features): + cleaning_kwargs = {} + textCleaning = conf_json.get('textCleaning') + self.log.info(""Text Preprocessing config: "",textCleaning) + cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True')) + cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True')) + cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False')) + cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False')) + cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True')) + cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True')) + cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True')) + cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'), + 'lemmatization').capitalize() + + removeNoiseConfig = textCleaning.get('removeNoiseConfig') + if type(removeNoiseConfig) is dict: + cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True')) + cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True')) + cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True')) + cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True')) + cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace' + cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True')) + cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True')) + + acronymConfig = textCleaning.get('acronymConfig') + if type(acronymConfig) is dict: + cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None) + + stopWordsConfig = textCleaning.get('stopWordsConfig') + if type(stopWordsConfig) is dict: + cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', []) + cleaning_kwargs['extend_or_replace_stopwordslist'] = 'extend' if strtobool(stopWordsConfig.get('extend', 'True')) else 'replace' + removeNumericConfig = textCleaning.get('removeNumericConfig') + if type(removeNumericConfig) is dict: + cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True')) + + removePunctuationConfig = textCleaning.get('removePunctuationConfig') + if type(removePunctuationConfig) is dict: + cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False')) + + cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False')) + if cleaning_kwargs['fExpandContractions']: + cleaning_kwargs['expandContractions_googleNewsWordVectorPath'] = GOOGLE_NEWS_WORD_VECTORS_PATH + + libConfig = textCleaning.get('libConfig') + if type(libConfig) is dict: + cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk') + cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk') + cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk') + + textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs) + textCorpus = textProcessor.transform(textCorpus) + pipeList.append((""TextProcessing"",textProcessor)) + + textFeatureExtraction = conf_json.get('textFeatureExtraction') + if strtobool(textFeatureExtraction.get('pos_tags', 'False')): + pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk') + posTagger = TextProcessing.PosTagging( pos_tags_lib) + textCorpus = posTagger.transform(textCorpus) + pipeList.append((""posTagger"",posTagger)) + ngram_min = 1 + ngram_max = 1 + if strtobool(textFeatureExtraction.get('n_grams', 'False')): + n_grams_config = textFeatureExtraction.get(""n_grams_config"") + ngram_min = int(n_grams_config.get('min_n', 1)) + ngram_max = int(n_grams_config.get('max_n', 1)) + if (ngram_min < 1) or ngram_min > ngram_max: + ngram_min = 1 + ngram_max = 1 + invalidNgramWarning = 'WARNING : invalid ngram config.\\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max) + self.log.info(invalidNgramWarning) + ngram_range_tuple = (ngram_min, ngram_max) + textConversionMethod = conf_json.get('textConversionMethod') + conversion_method = get_one_true_option(textConversionMethod, None) + if conversion_method.lower() == ""countvectors"": + X, vectorizer = TextProcessing.ExtractFeatureCountVectors(textCorpus, ngram_range=ngram_range_tuple, max_features=max_features) + pipeList.append((""vectorizer"",vectorizer)) + df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) + df1 = df1.add_suffix('_vect') + self.log.info('----------> Conversion Method: CountVectors') + elif conversion_method.lower() in [""word2vec"",""fasttext"",""glove""]: + embedding_method = conversion_method + wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method) + wordEmbeddingVecotrizer.checkAndDownloadPretrainedModel() + X = wordEmbeddingVecotrizer.transform(textCorpus) + df1 = pd.DataFrame(X) + df1 = df1.add_suffix('_vect') + pipeList.append((""vectorizer"",wordEmbeddingVecotrizer)) + self.log.info('----------> Conversion Method: '+str(conversion_method)) + elif conversion_method.lower() == ""sentencetransformer"": + from sentence_transformers import SentenceTransformer + model = SentenceTransformer('sentence-transformers/msmarco-distilroberta-base-v2') + X = model.encode(textCorpus) + df1 = pd.DataFrame(X) + df1 = df1.add_suffix('_vect') + pipeList.append((""vectorizer"",model)) + self.log.info('----------> Conversion Method: SentenceTransformer') + elif conversion_method.lower() == 'tf_idf': + X, vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(textCorpus,ngram_range=ngram_range_tuple, max_features=max_features) + pipeList.append((""vectorizer"",vectorizer)) + df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) + df1 = df1.add_suffix('_vect') + self.log.info('----------> Conversion Method: TF_IDF') + else: + df1 = pd.DataFrame() + df1['tokenize'] = textCorpus + self.log.info('----------> Conversion Method: NA') + return df1, pipeList,conversion_method + import os +import sys +import numpy as np +import scipy +import pandas as pd +from pathlib import Path + +default_config = { + 'misValueRatio': '1.0', + 'numericFeatureRatio': '1.0', + 'categoryMaxLabel': '20', + 'str_to_cat_len_max': 10 +} + +target_encoding_method_change = {'targetencoding': 'labelencoding'} + +supported_method = { + 'fillNa': + { + 'categorical' : ['mode','zero','na'], + 'numeric' : ['median','mean','knnimputer','zero','drop','na'], + }, + 'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'], + 'normalization': ['standardscaler','minmax','lognormal', 'na','none'], + 'outlier_column_wise': ['iqr','zscore', 'disable', 'na'], + 'outlierOperation': ['dropdata', 'average', 'nochange'] + } + +def findiqrOutlier(df): + Q1 = df.quantile(0.25) + Q3 = df.quantile(0.75) + IQR = Q3 - Q1 + index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))) + return index + +def findzscoreOutlier(df): + z = np.abs(scipy.stats.zscore(df)) + index = (z < 3) + return index + +def findiforestOutlier(df): + from sklearn.ensemble import IsolationForest + isolation_forest = IsolationForest(n_estimators=100) + isolation_forest.fit(df) + y_pred_train = isolation_forest.predict(df) + return y_pred_train == 1 + +def get_one_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_boolean(value): + if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): + return True + else: + return False + +def recommenderStartProfiler(self,modelFeatures): + try: + self.log.info('----------> FillNA:0') + self.data = self.data.fillna(value=0) + self.log.info('Status:- !... Missing value treatment done') + self.log.info('----------> Remove Empty Row') + self.data = self.data.dropna(axis=0,how='all') + self.log.info('Status:- !... Empty feature treatment done') + userId,itemId,rating = modelFeatures.split(',') + self.data[itemId] = self.data[itemId].astype(np.int32) + self.data[userId] = self.data[userId].astype(np.int32) + self.data[rating] = self.data[rating].astype(np.float32) + return self.data + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + return(self.data) + +def folderPreprocessing" +"(self,folderlocation,folderdetails,deployLocation): + try: + dataset_directory = Path(folderlocation) + dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name'] + tfrecord_directory = Path(deployLocation)/'Video_TFRecord' + from savp import PreprocessSAVP + import csv + csvfile = open(dataset_csv_file, newline='') + csv_reader = csv.DictReader(csvfile) + PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory) + dataColumns = list(self.data.columns) + VideoProcessing = True + return dataColumns,VideoProcessing,tfrecord_directory + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + +def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): + import os + try: + features = [doc_col_1, doc_col_2] + pipe = None + dataColumns = list(self.data.columns) + self.numofCols = self.data.shape[1] + self.numOfRows = self.data.shape[0] + from transformations.textProfiler import textProfiler + + self.log.info('-------> Execute Fill NA With Empty String') + self.data = self.data.fillna(value="" "") + self.log.info('Status:- |... Missing value treatment done') + self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1]) + self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2]) + self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2) + self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + from tensorflow.keras.preprocessing.text import Tokenizer + pipe = Tokenizer() + pipe.fit_on_texts(self.data['text'].values) + self.log.info('-------> Tokenizer: Fit on Concatenate Field') + self.log.info('Status:- |... Tokenizer the text') + self.data[doc_col_1] = self.data[doc_col_1].astype(str) + self.data[doc_col_1] = self.data[doc_col_1].astype(str) + return (self.data, pipe, self.target_name, features) + except Exception as inst: + self.log.info(""StartProfiler failed "" + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + +def set_features(features,profiler=None): + if profiler: + features = [x for x in features if x not in profiler.added_features] + return features + profiler.text_feature + return features ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sys +from pathlib import Path +import urllib.request +import tarfile +import json +import subprocess +import os +from os.path import expanduser +import platform + + +class ODpretrainedModels(): + + def __init__(self, location=None): + if location: + if isinstance(location, Path): + self.pretrained_models_location = location.as_posix() + else: + self.pretrained_models_location = location + else: + p = subprocess.run([sys.executable, ""-m"", ""pip"",""show"",""AION""],capture_output=True, text=True) + if p.returncode == 0: + Output = p.stdout.split('\\n') + for x in Output: + y = x.split(':',1) + if(y[0]=='Location'): + self.pretrained_models_location = y[1].strip()+""/AION/pretrained_models/object_detection"" + break + if Path(self.pretrained_models_location).is_dir(): + self.config_file_location = self.pretrained_models_location+'/supported_models.json' + with open(self.config_file_location) as json_data: + self.supportedModels = json.load(json_data) + + home = expanduser(""~"") + if platform.system() == 'Windows': + self.modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection') + else: + self.modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection') + + if os.path.isdir(self.modelsPath) == False: + os.makedirs(self.modelsPath) + + def __save_config(self): + with open(self.config_file_location, 'w') as json_file: + json.dump(self.supportedModels, json_file) + + + def __download(self, modelName): + try: + url = self.supportedModels[modelName][""url""] + file = self.supportedModels[modelName][""file""] + local_file_path = Path(self.modelsPath)/(file+"".tar.gz"") + urllib.request.urlretrieve(url, local_file_path) + except: + raise ValueError(""{} model download error, check your internet connection"".format(modelName)) + return local_file_path + + + def __extract(self, modelName, file_location, extract_dir): + try: + tarFile = tarfile.open(file_location) + tarFile.extractall(extract_dir) + tarFile.close() + Path.unlink(file_location) + return True + except: + return False + + + def download(self, modelName): + if modelName in list(self.supportedModels.keys()): + p = Path(self.modelsPath).glob('**/*') + modelsDownloaded = [x.name for x in p if x.is_dir()] + if self.supportedModels[modelName]['file'] not in modelsDownloaded: + file = self.__download(modelName) + self.supportedModels[modelName][""downloaded""] = True + if self.__extract(modelName, file, self.modelsPath): + self.supportedModels[modelName][""extracted""] = True + self.__save_config() + else: + self.__save_config() + raise ValueError(""{} model downloaded but extraction failed,please try again"".format(modelName)) + else: + raise ValueError(""{} is not supported for object detection"".format(modelName)) + return self.supportedModels[modelName] + + def get_info(self,modeltype): + models_info = {} + p = Path(self.pretrained_models_location) + downloaded_models = [x.name for x in p.iterdir() if x.is_dir()] + for model in list(self.supportedModels.keys()): + if (self.supportedModels[model]['type'] == modeltype) or (modeltype == ''): + models_info[model] = self.supportedModels[model]['extracted'] + return models_info + + def is_model_exist(self, model_name): + models = self.get_info('') + status = ""NOT_SUPPORTED"" + if model_name in models: + if self.supportedModels[model_name]['extracted']: + status = ""READY"" + else: + status = ""NOT_READY"" + return status + + def clear_config(self, model_name): + self.supportedModels[model_name]['extracted'] = False + self.supportedModels[model_name]['downloaded'] = False + self.__save_config() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import os +import sys +import string +import spacy +#import en_core_web_sm +from spacy.lang.en.stop_words import STOP_WORDS +from spacy.lang.en import English +try: + from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS +except: + from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS +from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer +from sklearn.base import TransformerMixin +from nltk.stem import WordNetLemmatizer +import re +from collections import defaultdict +from nltk.corpus import wordnet as wn +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import LabelBinarizer +from nltk.tokenize import word_tokenize +from nltk import pos_tag +from nltk.corpus import stopwords + +class textDataProfiler(): + def __init__(self): + self.data=None + #self.nlp=en_core_web_sm.load() + self.punctuations = string.punctuation + self.stopwords = list(STOP_WORDS) + + def startTextProfiler(self,df,target): + try: + dataColumns = list(df.columns) + print(' \\n No of rows and columns in dataFrame',df.shape) + print('\\n features in dataFrame',dataColumns) + dataFDtypes=self.dataFramecolType(df) + print('\\n feature types in dataFrame',dataFDtypes) + trainX=df['text'] + trainY=df[target] + return trainX,trainY + except Exception as inst: + print('startTextProfiler code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + + def dataFramecolType(self,dataFrame): + dataFDtypes=[] + try: + dataColumns=list(dataFrame.columns) + for i in dataColumns: + dataType=dataFrame[i].dtypes + dataFDtypes.append(tuple([i,str(dataType)])) + return dataFDtypes + except Exception as e: + print(""error in dataFramecolyType"",e) + return dataFDtypes + + def textTokenizer(self,text): + try: + parser = English() + tokens = parser(text) + tokens = [ word.lemma_.lower().strip() if word.lemma_ != ""-PRON-"" else word.lower_ for word in tokens ] + tokens = [ word for word in tokens if word not in self.stopwords and word not in self.punctuations ] + return tokens + except Exception as inst: + print('textDataProfiler code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + return {} + + def cleanText(self,text): + try: + text=str(text).strip().lower() + for punctuation in string.punctuation: + text = text.replace(punctuation, '') + return text + except Exception as inst: + print('cleanText code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + + def textTokenization(self,text): + try: + tokenizedText=word_tokenize(text) + return tokenizedText + except Exception as inst: + print('textDataProfiler code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + return {} + + def textLemmitizer(self,text): + try: + tag_map = defaultdict(lambda : wn.NOUN) + tag_map['J'] = wn.ADJ + tag_map['V'] = wn.VERB + tag_map['R'] = wn.ADV + Final_words = [] + word_Lemmatized = WordNetLemmatizer() + for word, tag in pos_tag(text): + if word not in stopwords.words('english') and word.isalpha(): + word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]]) + Final_words.append(word_Final) + return str(Final_words) + except Exception as inst: + print('textLemmitizer code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + return {} + +class TextCleaner(TransformerMixin): + + def clean_text(self,text): + try: + text=str(text).strip().lower() + text = text.replace(""isn't"", ""is not"") + text = text.replace(""aren't"", ""are not"") + text = text.replace(""ain't"", ""am not"") + text = text.replace(""won't"", ""will not"") + text = text.replace(""didn't"", ""did not"") + text = text.replace(""shan't"", ""shall not"") + text = text.replace(""haven't"", ""have not"") + text = text.replace(""hadn't"", ""had not"") + text = text.replace(""hasn't"", ""has not"") + text = text.replace(""don't" +""", ""do not"") + text = text.replace(""wasn't"", ""was not"") + text = text.replace(""weren't"", ""were not"") + text = text.replace(""doesn't"", ""does not"") + text = text.replace(""'s"", "" is"") + text = text.replace(""'re"", "" are"") + text = text.replace(""'m"", "" am"") + text = text.replace(""'d"", "" would"") + text = text.replace(""'ll"", "" will"") + text = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE) + text = re.sub(r'[\\w\\.-]+@[\\w\\.-]+', ' ', text, flags=re.MULTILINE) + for punctuation in string.punctuation: + text = text.replace(punctuation,' ') + text = re.sub(r'[^A-Za-z0-9\\s]',r' ',text) + text = re.sub(r'\\n',r' ',text) + text = re.sub(r'[0-9]',r' ',text) + wordnet_lemmatizer = WordNetLemmatizer() + text = "" "".join([wordnet_lemmatizer.lemmatize(w, pos='v') for w in text.split()]) + return text + except Exception as inst: + print('TextCleaner clean_text code execution failed !....',inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(exc_type, fname, exc_tb.tb_lineno) + + def text_cleaner(self,text): + text = self.clean_text(text) + stop_words = set(stopwords.words('english')) + text_tokens = word_tokenize(text) + out=' '.join(str(j) for j in text_tokens if j not in stop_words and (len(j)!=1)) + return(out) + + def transform(self, X, **transform_params): + # Cleaning Text + return [self.clean_text(text) for text in X] + + def fit(self, X, y=None, **fit_params): + return self + + def get_params(self, deep=True): + return {} ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import os +import glob +import pandas as pd +import io +import xml.etree.ElementTree as ET +import argparse + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) +import tensorflow as tf +from PIL import Image +from object_detection.utils import dataset_util, label_map_util +from collections import namedtuple +from pathlib import Path + + +def class_text_to_int(row_label, label_map_dict): + return label_map_dict[row_label] + + +def split(df, group): + data = namedtuple('data', ['File', 'object']) + gb = df.groupby(group) + return [data(File, gb.get_group(x)) for File, x in zip(gb.groups.keys(), gb.groups)] + + +def create_tf_example(group, path, label_map_dict): + with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.File)), 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = Image.open(encoded_jpg_io) + width, height = image.size + + File = group.File.encode('utf8') + image_format = b'jpg' + xmins = [] + xmaxs = [] + ymins = [] + ymaxs = [] + classes_text = [] + classes = [] + + for index, row in group.object.iterrows(): + xmin_n = min(row['xmin'], row['xmax']) + xmax_n = max(row['xmin'], row['xmax']) + ymin_n = min(row['ymin'], row['ymax']) + ymax_n = max(row['ymin'], row['ymax']) + + xmin_new = min(xmin_n, width) + xmax_new = min(xmax_n, width) + ymin_new = min(ymin_n, height) + ymax_new = min(ymax_n, height) + + xmn = xmin_new / width + xmins.append(xmn) + + xmx = xmax_new / width + xmaxs.append(xmx) + + ymn = ymin_new / height + ymins.append(ymn) + + ymx = ymax_new / height + ymaxs.append(ymx) + classes_text.append(row['Label'].encode('utf8')) + classes.append(class_text_to_int(row['Label'], label_map_dict)) + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(File), + 'image/source_id': dataset_util.bytes_feature(File), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example + +def labelFile(classes_names, label_map_path): + pbtxt_content = """" + + for i, class_name in enumerate(classes_names): + pbtxt_content = ( + pbtxt_content + + ""item {{\\n id: {0}\\n name: '{1}'\\n}}\\n\\n"".format(i + 1, class_name) + ) + pbtxt_content = pbtxt_content.strip() + with open(label_map_path, ""w"") as f: + f.write(pbtxt_content) + +def createLabelFile(train_df, save_path): + labelmap_path = str(Path(save_path)/ 'label_map.pbtxt') + classes_names = sorted(train_df['Label'].unique().tolist()) + labelFile(classes_names, labelmap_path) + return labelmap_path, len(classes_names) + + +def generate_TF_record(image_dir, output_dir, train_df, test_df, labelmap_path): + + outputPath = str(Path(output_dir)/ 'train.tfrecord') + writer = tf.io.TFRecordWriter( outputPath) + grouped = split(train_df, 'File') + label_map = label_map_util.load_labelmap(labelmap_path ) + label_map_dict = label_map_util.get_label_map_dict(label_map) + for group in grouped: + tf_example = create_tf_example(group, image_dir, label_map_dict) + writer.write(tf_example.SerializeToString()) + writer.close() + if len(test_df): + outputPath = str(Path(output_dir)/ 'test.tfrecord') + writer = tf.io.TFRecordWriter( outputPath) + grouped = split(test_df, 'File') + for group in grouped: + tf_example = create_tf_example(group, image_dir, label_map_dict) + writer.write(tf_example.SerializeToString()) + writer.close() + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' from kafka import KafkaConsumer +from json import loads +import pandas as pd +import json +import os,sys +import time +import multiprocessing +from os.path import expanduser +import platform +import datetime +modelDetails = {} +class Process(multiprocessing.Process): + def __init__(self, modelSignature,jsonData,predictedData,modelpath): + super(Process, self).__init__() + self.config = jsonData + self.modelSignature = modelSignature + self.data = predictedData + self.modelpath = modelpath + def run(self): + #data = pd.json_normalize(self.data) + minotoringService = self.config['minotoringService']['url'] + trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature] + #filetimestamp = 'AION_'+str(int(time.time()))+'.csv' + #data.to_csv(dataFile, index=False) + inputFieldsJson = {""trainingDataLocation"":trainingdatalocation,""currentDataLocation"":self.data} + inputFieldsJson = json.dumps(inputFieldsJson) + ser_url = minotoringService+self.modelSignature+'/monitoring' + driftTime = datetime.datetime.now() + import requests + try: + response = requests.post(ser_url, data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + outputStr=response.content + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + print(decoded_data) + status = decoded_data['status'] + msg = decoded_data['data'] + except Exception as inst: + if 'Failed to establish a new connection' in str(inst): + status = 'Fail' + msg = 'AION Service needs to be started' + else: + status = 'Fail' + msg = 'Error during Drift Analysis' + statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv') + df = pd.DataFrame(columns = ['dateTime', 'status', 'msg']) + df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True) + + print(df) + if (os.path.exists(statusFile)): + df.to_csv(statusFile, mode='a', header=False,index=False) + else: + df.to_csv(statusFile, header=True,index=False) + + + + +def launch_kafka_consumer(): + from appbe.dataPath import DATA_DIR + configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf') + with open(configfile,'r',encoding='utf-8') as f: + jsonData = json.load(f) + f.close() + kafkaIP=jsonData['kafkaCluster']['ip'] + kafkaport = jsonData['kafkaCluster']['port'] + topic = jsonData['kafkaCluster']['topic'] + kafkaurl = kafkaIP+':'+kafkaport + if jsonData['database']['csv'] == 'True': + database = 'csv' + elif jsonData['database']['mySql'] == 'True': + database = 'mySql' + else: + database = 'csv' + kafkaPath = os.path.join(DATA_DIR,'kafka') + if not (os.path.exists(kafkaPath)): + try: + os.makedirs(kafkaPath) + except OSError as e: + pass + consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8'))) + for message in consumer: + message = message.value + data = message['data'] + data = pd.json_normalize(data) + modelname = message['usecasename'] + version = message['version'] + modelSignature = modelname+'_'+str(version) + modelpath = os.path.join(kafkaPath,modelSignature) + try: + os.makedirs(modelpath) + except OSError as e: + pass + secondsSinceEpoch = time.time() + if modelSignature not in modelDetails: + modelDetails[modelSignature] = {} + modelDetails[modelSignature]['startTime'] = secondsSinceEpoch + if database == 'csv': + csvfile = os.path.join(modelpath,modelSignature+'.csv') + if (os.path.exists(csvfile)): + data.to_csv(csvfile, mode='a', header=False,index=False) + else: + data.to_csv(csvfile, header=True,index=False) + modelTimeFrame = jsonData['timeFrame'][modelSignature] + currentseconds = time.time() + print(currentseconds - modelDetails[modelSignature]['startTime']) + if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame): + csv_path = os.path.join(modelpath,modelSignature+'.csv') + #predictedData = pd.read_csv(csv_path) + ##predictedData = predictedData.to_json(orient=""records"") + index = Process(modelSignature,jsonData,csv_path,modelpath) + index.start() + modelDetails[modelSignature]['startTime'] = secondsSinceEpoch + + import os +import shutil +import sys +import subprocess +from os.path import expanduser +import platform +import json + +def createDockerImage(model_name,model_version,module,folderpath): + command = 'docker pull python:3.8-slim-buster' + os.system(command); + subprocess.check_call([""docker"", ""build"", ""-t"",module+'_'+model_name.lower()+"":""+model_version,"".""], cwd=folderpath) + +def local_docker_build(config): + print(config) + config = json.loads(config) + model_name = config['usecase'] + model_version = config['version'] + mlaac__code_path = config['mlacPath'] + docker_images = {} + docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version + dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring') + createDockerImage(model_name,model" +"_version,'modelmonitoring',dataset_addr) + docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version + dataset_addr = os.path.join(mlaac__code_path,'DataIngestion') + createDockerImage(model_name,model_version,'dataingestion',dataset_addr) + transformer_addr = os.path.join(mlaac__code_path,'DataTransformation') + docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version + createDockerImage(model_name,model_version,'datatransformation',transformer_addr) + featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering') + docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version + createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr) + from os import listdir + arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith(""ModelTraining"")] + docker_training_images = [] + for x in arr: + dockertraing={} + dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version + docker_training_images.append(dockertraing) + training_addri = os.path.join(mlaac__code_path,x) + createDockerImage(model_name,model_version,str(x).lower(),training_addri) + docker_images['ModelTraining'] = docker_training_images + docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version + deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry') + createDockerImage(model_name,model_version,'modelregistry',deploy_addr) + docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version + deploy_addr = os.path.join(mlaac__code_path,'ModelServing') + createDockerImage(model_name,model_version,'modelserving',deploy_addr) + outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json') + with open(outputjsonFile, 'w') as f: + json.dump(docker_images, f) + f.close() + output = {'Status':'Success','Msg':outputjsonFile} + output = json.dumps(output) + print(""aion_build_container:"",output) import docker +import json +import logging +def read_json(file_path): + data = None + with open(file_path,'r') as f: + data = json.load(f) + return data + +def run_pipeline(inputconfig): + inputconfig = json.loads(inputconfig) + logfilepath = inputconfig['logfilepath'] + logging.basicConfig(level=logging.INFO,filename =logfilepath) + usecasename = inputconfig['usecase'] + logging.info(""UseCaseName :""+str(usecasename)) + version = inputconfig['version'] + logging.info(""version :""+str(version)) + config = inputconfig['dockerlist'] + persistancevolume = inputconfig['persistancevolume'] + logging.info(""PersistanceVolume :""+str(persistancevolume)) + datasetpath = inputconfig['datasetpath'] + logging.info(""DataSet Path :""+str(datasetpath)) + config = read_json(config) + client = docker.from_env() + inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath} + inputconfig = json.dumps(inputconfig) + inputconfig = inputconfig.replace('""', '\\\\""') + logging.info(""===== Model Monitoring Container Start ====="") + outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('ModelMonitoring: '+str(outputStr)) + print('ModelMonitoring: '+str(outputStr)) + logging.info(""===== ModelMonitoring Stop ====="") + logging.info(""===== Data Ingestion Container Start ====="") + outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('DataIngestion: '+str(outputStr)) + print('DataIngestion: '+str(outputStr)) + logging.info(""===== Data Ingestion Container Stop ====="") + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + if status != 'Success': + output = {'Status':'Error','Msg':'Data Ingestion Fails'} + logging.info(""===== Transformation Container Start ====="") + outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('Data Transformations: '+str(outputStr)) + print('Data Transformations: '+str(outputStr)) + logging.info(""===== Transformation Container Done ====="") + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + if status != 'Success': + output = {'Status':'Error','Msg':'Data Transformations Fails'} + logging.info(""===== Feature Engineering Container Start ====="") + outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('FeatureEngineering: '+str(outputStr)) + print('FeatureEngineering: '+str(outputStr)) + logging.info(""===== Feature Engineering Container Done ====="") + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + modeltraining = config['ModelTraining'] + for mt in modeltraining: + logging.info(""===== Training Container Start ====="") + outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('ModelTraining: '+str(outputStr)) + print('ModelTraining: '+str(outputStr)) + logging.info(""===== Training Container Done ====="") + outputStr = outputStr.strip() + try: + decoded_data = json.loads(outputStr) + status = decoded_data['Status'] + except Exception as inst: + logging.info(inst) + logging.info(""===== Model Registry Start ====="") + outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('ModelRegistry: '+str(outputStr)) + print('ModelRegistry: '+str(outputStr)) + logging.info(""===== ModelRegistry Done ====="") + logging.info(""===== ModelServing Start ====="") + outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion']) + outputStr = outputStr.decode('utf-8') + logging.info('Prediction: '+str(outputStr)) + print('Prediction: '+str(outputStr)) + logging.info(""===== ModelServing Done ====="") import os +import sys +import json +from pathlib import Path +import subprocess +import shutil +import argparse + +def create_and_save_yaml(git_storage_path, container_label,usecasepath): + file_name_prefix = 'gh-acr-' + yaml_file = f""""""\\ +name: gh-acr-{container_label} +on: + push: + branches: main + paths: {container_label}/** + workflow_dispatch: +jobs: + gh-acr-build-push: + runs-on: ubuntu-latest + steps: + - name: 'checkout action' + uses: actions/checkout@main + + - name: 'azure login' + uses: azure/login@v1 + with: + creds: ${{{{ secrets.AZURE_CREDENTIALS }}}} + + - name: 'build and push image' + uses: azure/docker-login@v1 + with: + login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}} + username: ${{{{ secrets.REGISTRY_USERNAME }}}} + password: ${{{{ secrets.REGISTRY_PASSWORD }}}} + - run: | + docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} + docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} + docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} + docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} + docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} + docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} + docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} +"""""" + arr = [filename for filename in os.listdir(usecasepath) if filename.startswith(""ModelTraining"")] + for x in arr: + yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label + yaml_file+='\\n' + yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label + yaml_file+='\\n' + with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f: + f.write(yaml_file) + +def run_cmd(cmd): + try: + subprocess.check_output(cmd, stderr=subprocess.PIPE) + except subprocess.CalledProcessError as e: + if e.stderr: + if isinstance(e.stderr, bytes): + err_msg = e.stderr.decode(sys.getfilesystemencoding()) + else: + err_msg = e.stderr + elif e.output: + if isinstance(e.output, bytes): + err_msg = e.output.decode(sys.getfilesystemencoding()) + else: + err_msg = e.output + else: + err_msg = str(e) + return False, err_msg + return True, """" + +def validate_config(config): + non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName'] + missing_keys = [k for k in non_null_keys if k not in config.keys()] + if missing_keys: + raise ValueError(f""following fields are missing in config file: {missing_keys}"") + for k,v in config.items(): + if k in non_null_keys and not v: + raise ValueError(f""Please provide value for '{k}' in config file."") + +def upload(config): + + validate_config(config) + url_type = config.get('url_type','https') + if url_type == 'https': + https_str = ""https://"" + url = https_str + config['username'] + "":"" + config['token'] + ""@"" + config['url'][len(https_str):] + else: + url = config['url'] + model_location = Path(config['location']) + git_folder_location = Path(config['gitFolderLocation']) + git_folder_location.mkdir(parents=True, exist_ok=True) + (git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True) + if not model_location.exists(): + raise ValueError('Trained model data not found') + + os.chdir(str(git_folder_location)) + (git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True) + shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True) + create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location']) + if (Path(git_folder_location)/'.git').exists(): + first_upload = False + else: + first_upload = True + if first_upload: + cmd = ['git','init'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','config','user.name',config['username']] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','config','user.email',config['email']] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','add', '-A'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','commit','-m',f""commit {config['modelName']}""] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','branch','-M','main'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + if first_upload: + cmd = ['git','remote','add','origin', url] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + cmd = ['git','push','-f','-u','origin', 'main'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + else: + cmd = ['git','push'] + status, msg = run_cmd(cmd) + if not status: + raise ValueError(msg) + + return json.dumps" +"({'Status':'SUCCESS'}) + +if __name__ == '__main__': + try: + if shutil.which('git') is None: + raise ValueError(""git is not installed on this system"") + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--config', help='Config file location or as a string') + + args = parser.parse_args() + if Path(args.config).is_file() and Path(args.config).suffix == '.json': + with open(args.config,'r') as f: + config = json.load(f) + else: + config = json.loads(args.config) + print(upload(config)) + except Exception as e: + status = {'Status':'Failure','msg':str(e)} + print(json.dumps(status)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import json +import logging +import os +import shutil +import time +from sys import platform +from distutils.util import strtobool +# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules. + +class OTAionConfigManager: + +# eion configuration Constractor + def __init__(self): + self.log = logging.getLogger('eion') + self.data = '' + self.problemType = '' + self.basic = [] + self.advance=[] + +# To get parameter list of configuration module from json, this will be passed as dict{} + def getEionProfilerConfigurarion(self): + try: + if(self.advance['profiler']): + return self.advance['profiler'] + else: + return('NA') + except KeyError: + return('NA') + + def getAIONTestTrainPercentage(self): + try: + if(self.advance['testPercentage']): + return int(self.advance['testPercentage']) + else: + return(80) + except KeyError: + return(80) + + def getAIONDataBalancingMethod(self): + try: + if(self.advance['categoryBalancingMethod']): + return self.advance['categoryBalancingMethod'] + else: + return(""oversample"") + except KeyError: + return(""oversample"") + +# To get parameter list of selector module params + def getEionSelectorConfiguration(self): + try: + if(self.advance['selector']): + return self.advance['selector'] + else: + return('NA') + except KeyError: + return('NA') + + def createDeploymentFolders(self,deployFolder,iterName,iterVersion): + usecaseFolderLocation = os.path.join(deployFolder,iterName) + os.makedirs(usecaseFolderLocation,exist_ok = True) + deployLocation = os.path.join(usecaseFolderLocation,str(iterVersion)) + try: + os.makedirs(deployLocation) + except OSError as e: + shutil.rmtree(deployLocation) + time.sleep(2) + os.makedirs(deployLocation) + dataFolderLocation = os.path.join(deployLocation,'data') + try: + os.makedirs(dataFolderLocation) + except OSError as e: + print(""\\nDeployment Data Folder Already Exists"") + logFolderLocation = os.path.join(deployLocation,'log') + try: + os.makedirs(logFolderLocation) + except OSError as e: + print(""\\nLog Folder Already Exists"") + etcFolderLocation = os.path.join(deployLocation,'etc') + try: + os.makedirs(etcFolderLocation) + except OSError as e: + print(""\\ETC Folder Already Exists"") + prodFolderLocation = os.path.join(deployLocation,'production') + os.makedirs(prodFolderLocation) + profilerFolderLocation = os.path.join(prodFolderLocation, 'profiler') + os.makedirs(profilerFolderLocation) + modelFolderLocation = os.path.join(prodFolderLocation, 'model') + os.makedirs(modelFolderLocation) + original_data_file = os.path.join(dataFolderLocation,'preprocesseddata.csv') + profiled_data_file = os.path.join(dataFolderLocation,'postprocesseddata.csv') + trained_data_file = os.path.join(dataFolderLocation,'trainingdata.csv') + predicted_data_file = os.path.join(dataFolderLocation,'predicteddata.csv') + logFileName=os.path.join(logFolderLocation,'model_training_logs.log') + outputjsonFile=os.path.join(deployLocation,'etc','output.json') + return(deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile) + +# To get parameter list of learner module params + def getEionLearnerConfiguration(self): + try: + if(self.advance['onlinelearner_config']): + mllearner_config = self.advance['mllearner_config'] + if 'categoryBalancingMethod' not in mllearner_config: + mllearner_config['categoryBalancingMethod'] = 'oversample' + if 'testPercentage' not in mllearner_config: + mllearner_config['testPercentage'] = 20 + if 'missingTargetCategory' not in mllearner_config: + mllearner_config['missingTargetCategory'] = '' + return mllearner_config + else: + return('NA') + except KeyError: + return('NA') + except Exception as inst: + self.log.info( '\\n-----> getEionLearnerConfiguration failed!!!.'+str(inst)) + return('NA') + + + def gettimegrouper(self): + try: + if(self.basic['timegrouper']): + return self.basic['timegrouper'] + else: + return 'NA' + except: + return 'NA' + + def getgrouper(self): + try: + if(self.basic['group']): + return self.basic['group'] + else: + return 'NA' + except: + return 'NA' + + def getfilter(self): + try: + if(self.basic['filter']): + return self.basic['filter'] + else: + return 'NA' + except: + return 'NA' + + + def getModulesDetails(self): + problem_type = self.problemType + visualizationstatus = self.getEionVisualizationStatus() + profiler_status = self.getEionProfilerStatus() + selector_status = self.getEionSelectorStatus() + learner_status = self.mllearner + targetFeature = self.getTargetFeatures() + deploy_status = self.getEionDeploymentStatus() + + + if learner_status: + if(problem_type == 'NA'): + learner_status = True + elif(problem_type.lower() in ['classification','regression']): + learner_status = True + else: + learner_status = False + + + return problem_type,targetFeature,profiler_status,selector_status,learner_status,visualizationstatus,deploy_status + + def __get_true_option(self, d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + + def getAlgoName(self, problem_type=None): + if problem_type == None: + problem_type = self.__get_true_option(self.basic['algorithms']) + return self.__get_true_option(self.basic['algorithms'][problem_type]) + + def getScoringCreteria(self): + return (self.scoringCreteria) + + + def getEionDeployerConfiguration(self): + try: + if(self.advance['deployer']): + return self.advance['deployer'] + else: + return('NA') + except KeyError: + return('NA') + + + def getAIONRemoteTraining(self): + try: + if(self.advance['remoteTraining']): + self.advance['remoteTraining']['Enable'] = strtobool(self.advance['remoteTraining'].get('Enable', 'False')) + return self.advance['remoteTraining'] + else: + remoteTraining = {} + remoteTraining['Enable'] = False + remoteTraining['server'] = None + remoteTraining['ssh'] = None + return(remoteTraining) + except KeyError: + remoteTraining = {} + remoteTraining['Enable'] = False + remoteTraining['server'] = None + remoteTraining['ssh'] = None + return(remoteTraining) + + + def getEionVisualizationStatus(self): + return(True) + + + def getEionVisualizationConfiguration(self): + try: + if(self.advance['visualization_settings']): + return(self.advance['visualization_settings']) + else: + return('NA') + except KeyError: + return('NA') + + def getEionBatchLearningStatus(self): + try: + if(self.basic['output']['batchLearning']): + return(self.basic['output']['batchLearning']) + else: + return('disable') + except KeyError: + return('disable') + + def getEionProblemType(self): + try: + analysis_type = self.basic['analysisType'] + self.problemType = '' + for key in analysis_type.keys(): + if analysis_type[key] == 'True': + self.problemType = key + break + if self.problemType: + return self.problemType + else: + return('NA') + except KeyError: + return('NA') + def getEionProfilerStatus(self): + try: + if(self.basic['output']['profilerStage']): + return(self.basic['output']['profilerStage']) + else: + return('false') + except KeyError: + return('false') + + +#To get eion selector module status (enable/disable/none) + def getEionSelectorStatus(self): + try: + if(self.basic['output']['selectorStage']): + return(self.basic['output']['selectorStage']) + else: + return('disable') + except KeyError: + return('disable') + + def getEionDeploymentStatus(self): + try: + if(self.basic['output']['deploymentStage']): + return(self.basic['output']['deploymentStage']) + else: + return(False) + except KeyError: + return(False) + + + + + + def getEionLearnerModelParams(self,modelType): + try: + numberofModels = 0 + ml_algorithm_filename = '' + if(modelType == 'classification'): + requiredalgo = 'classifierModelParams' + elif(modelType == 'regression'): + requiredalgo = 'regressorModelParams' + learnerconfig = self.advance['onlinelearner_config'] + selectedMLModel = self.mlmodels + modelParams = [] + modelList=[] + if 'modelParams' in learnerconfig: + modelParams = learnerconfig['modelParams'] + if(str(type(modelParams)) != """"): + modelParams = [] + elif(len(modelParams) == 0): + modelParams = [] + if(len(modelParams) == 0): + if 'modelparamsfile' in learnerconfig: + if(learnerconfig['modelparamsfile'] != """"): + ml_algorithm_filename = learnerconfig['modelparamsfile'] + if(ml_algorithm_filename == ''): + ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/ML_Defaults.json' + modelParams = json.loads(open(ml_algorithm_filename).read()) + if requiredalgo in modelParams: + modelParams = modelParams[requiredalgo] + if selectedMLModel != '': + modelList = selectedMLModel.split("","") + modelList = list(map(str.strip, modelList)) + for mod in modelList: + if mod not in modelParams: + self.log.info(""'""+mod+""' Not Available for Particular Problem Type"") + modelList.remove(mod) + else: + modelList = list(modelParams.keys()) + #modelList = selectedMLModel.split("","") + if(len(modelList) ==0): + modelList = list(modelParams.keys()) + return(modelParams,modelList) + except KeyError: + modelParams = [] + modelList=[] + return(modelParams,modelList) + + def getTargetFeatures(self): + try: + if(self.basic['targetFeature']): + return(self.basic['targetFeature']) + else: + return('') + except KeyError: + return('') + + def getModel" +"Features(self): + try: + if(self.basic['trainingFeatures']): + modFeatures = self.basic['trainingFeatures'] + modFeatures = modFeatures.split("","") + modFeatures = list(map(str.strip, modFeatures)) + modFeatures = "","".join([modf for modf in modFeatures]) + return(modFeatures) + else: + return('NA') + except KeyError: + return('NA') + + def getFolderSettings(self): + try: + if(self.basic['folderSettings']): + return(self.basic['folderSettings']) + else: + return('NA') + except KeyError: + return('NA') + + def getAIONLocationSettings(self): + self.iter_name = self.basic['modelName'] + self.iteration_version = self.basic['modelVersion'] + if(self.basic['dataLocation']): + dataLocation = self.basic['dataLocation'] + else: + dataLocation = 'NA' + if(self.basic['deployLocation']): + deployLocation = self.basic['deployLocation'] + else: + deployLocation = 'NA' + try: + if 'csv_settings' in self.basic: + csv_setting = self.basic['csv_settings'] + if 'delimiters' in csv_setting: + delimiter = csv_setting['delimiters'] + if delimiter.lower() == 'tab': + delimiter = '\\t' + elif delimiter.lower() == 'semicolon': + delimiter = ';' + elif delimiter.lower() == 'comma': + delimiter = ',' + elif delimiter.lower() == 'space': + delimiter = ' ' + elif delimiter.lower() == 'other': + if 'other' in csv_setting: + delimiter = csv_setting['other'] + else: + delimiter = ',' + else: + delimiter = ',' + else: + delimiter = ',' + if 'textqualifier' in csv_setting: + textqualifier = csv_setting['textqualifier'] + else: + textqualifier = '""' + else: + delimiter = ',' + textqualifier = '""' + except KeyError: + delimiter = ',' + textqualifier = '""' + return(self.iter_name,self.iteration_version,dataLocation,deployLocation,delimiter,textqualifier) + + def getFeatures(self): + try: + if(self.basic['dateTimeFeature']): + dtFeatures = self.basic['dateTimeFeature'] + dtFeatures = dtFeatures.split("","") + dtFeatures = list(map(str.strip, dtFeatures)) + dtFeatures = "","".join([dtf for dtf in dtFeatures]) + else: + dtFeatures = 'NA' + except KeyError: + dtFeatures = 'NA' + + try: + if(self.basic['indexFeature']): + iFeatures = self.basic['indexFeature'] + iFeatures = iFeatures.split("","") + iFeatures = list(map(str.strip, iFeatures)) + iFeatures = "","".join([dif for dif in iFeatures]) + else: + iFeatures = 'NA' + except KeyError: + iFeatures = 'NA' + + try: + if(self.basic['trainingFeatures']): + modFeatures = self.basic['trainingFeatures'] + modFeatures = modFeatures.split("","") + modFeatures = list(map(str.strip, modFeatures)) + modFeatures = "","".join([modf for modf in modFeatures]) + else: + modFeatures = 'NA' + except KeyError: + modFeatures = 'NA' + return(dtFeatures,iFeatures,modFeatures) + + def setModels(self): + try: + analysis_type = self.basic['analysisType'] + #print(analysis_type) + self.problemType = '' + for key in analysis_type.keys(): + if analysis_type[key] == 'True': + self.problemType = key + break + if self.problemType not in ['inputDrift','outputDrift']: + conf_algorithm = self.basic['algorithms'][self.problemType] + else: + conf_algorithm = {} + self.mlmodels='' + self.dlmodels='' + self.scoringCreteria = 'NA' + if self.problemType in ['classification','regression']: + scorCre = self.basic['scoringCriteria'][self.problemType] + for key in scorCre.keys(): + if scorCre[key] == 'True': + self.scoringCreteria = key + break + #print(self.problemType) + #print(self.scoringCreteria) + if self.scoringCreteria == 'Mean Squared Error': + self.scoringCreteria = 'MSE' + if self.scoringCreteria == 'Root Mean Squared Error': + self.scoringCreteria = 'RMSE' + if self.scoringCreteria == 'Mean Absolute Error': + self.scoringCreteria = 'MAE' + if self.scoringCreteria == 'R-Squared': + self.scoringCreteria = 'R2' + if self.problemType in ['classification','regression']: + for key in conf_algorithm.keys(): + if conf_algorithm[key] == 'True': + if self.mlmodels != '': + self.mlmodels += ',' + self.mlmodels += key + else: + for key in conf_algorithm.keys(): + if conf_algorithm[key] == 'True': + if self.mlmodels != '': + self.mlmodels += ',' + self.mlmodels += key + self.mllearner = False + if self.mlmodels != '': + self.mllearner = True + return('done') + except KeyError: + return('NA') + + def readConfigurationFile(self,path): + with open(path, 'rb') as data_file: + try: + self.data = json.load(data_file) #loading json object as python dictionary + #print(self.data) + self.basic = self.data['basic'] + self.advance = self.data['advance'] + problemType = self.setModels() + + + if(self.basic['output']['profilerStage']): + if(str(type(self.basic['output']['profilerStage'])) != """"): + msg = ""JSON Validation Fail: Profiling Should be String and value should be either enable or disable"" + self.log.info(msg) + return False,msg + if((self.basic['output']['profilerStage'].lower() == 'true') & ('profiler' not in self.advance)): + msg = ""JSON Validation Fail: Profiler Configuration Not Found in Advance JSON"" + self.log.info(msg) + return False,msg + if(str(type(self.advance['profiler'])) != """"): + msg = ""JSON Validation Fail: Error: Profiler Configuration Syntax"" + self.log.info(msg) + return False,msg + if((self.basic['output']['profilerStage'].lower() != 'true') & (self.basic['output']['profilerStage'].lower() != 'false')): + msg = ""JSON Validation Fail: Profiling is Not defined Correctly, it should be either enable or disable"" + self.log.info(msg) + return False,msg + + if(self.basic['output']['selectorStage']): + + if(str(type(self.basic['output']['selectorStage'])) != """"): + msg = ""JSON Validation Fail: Selection Should be String and value should be either enable or disable"" + self.log.info(msg) + return False,msg + + if((self.basic['output']['selectorStage'].lower() == 'true') & ('selector' not in self.advance)): + msg = ""JSON Validation Fail: Selector Configuration Not Found"" + self.log.info(msg) + return False,msg + + if((self.basic['output']['selectorStage'].lower() != 'true') & (self.basic['output']['selectorStage'].lower() != 'false')): + msg = ""JSON Validation Fail:: Selection is Not defined Correctly, it should be either enable or disable"" + self.log.info(msg) + return False,msg + + if(str(type(self.advance['selector'])) != """"): + msg = ""JSON Validation Fail: Error: Syntax of Selector"" + self.log.info(msg) + return False,msg + + + if 'dataLocation' not in self.basic: + msg = ""JSON Validation Fail: Data Location Not Defined"" + self.log.info(msg) + return False,msg + + if 'deployLocation' not in self.basic: + msg = ""JSON Validation Fail: Deploy Location Not Defined"" + self.log.info(msg) + return False,msg + + if 'deployment' in self.basic: + if(str(type(self.basic['deployment'])) != """"): + msg = ""JSON Validation Fail: deployment Should be String and value should be either enable or disable"" + self.log.info(msg) + return False,msg + if(self.basic['deployment'] == 'enable'): + if 'deployer' in self.advance: + if(str(type(self.advance['deployer'])) != """"): + msg = ""JSON Validation Fail: deployer configuration should be nexted json object"" + self.log.info(msg) + return False,msg + else: + msg = ""JSON Validation Fail: deployer configuration is missing"" + self.log.info(msg) + return False,msg + + + except ValueError as e: + print(""Error""+str(e)) + return False,e + return True,'Good' + + + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json + + +def get_true_option(d, default_value=None): + if isinstance(d, dict): + for k, v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + +def get_true_options(d): + options = [] + if isinstance(d, dict): + for k, v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + options.append(k) + return options + +def check_datetime(config): + dateTime = config['basic']['dateTimeFeature'] + if dateTime == '' or dateTime.lower()=='na': + return False + return True + +def check_dtype(d): + flag= 1 + for item in d: + if item[""type""].lower() != ""text"" and item[""type""].lower() != ""index"": + flag = 0 + break + return flag + +def check_text(d): #task 12627 + flag= 0 + for item in d: + if item[""type""].lower() == ""text"": + flag = 1 + break + return flag + +def check_labelencoding(ftr_dict_list, target_ftr): + for ftr_dict in ftr_dict_list: + if ftr_dict['feature']!=target_ftr and ftr_dict['type'].lower()=='categorical' and ftr_dict['categoryEncoding" +"'].lower()!='labelencoding': + return False + return True + +class timeseries(): + + def __init__(self,config): + self.config=config + #task 11997 + if self.config['basic']['analysisType']['timeSeriesForecasting'].lower()=='true': + self.problemType = 'timeSeriesForecasting' + elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true': + self.problemType = 'timeSeriesAnomalyDetection' + + def validate_basic_config(self,status='pass',msg=None): + #task 12627 + date_time_status = check_datetime(self.config) + text_status = check_text(self.config['advance']['profiler']['featureDict']) + if not date_time_status and text_status: + msg = 'For time series problem,\\\\n* One feature should be in datetime format\\\\n* Text feature not supported ' + return 'error', msg + elif not date_time_status: + msg = 'For time series problem, one feature should be in datetime format' + return 'error', msg + elif text_status: + msg = 'For time series problem, text feature not supported ' + return 'error', msg + selected_algos = get_true_options(self.config['basic']['algorithms'][self.problemType]) #task 11997 + if isinstance(self.config['basic']['targetFeature'],str): + targetFeature = list(self.config['basic']['targetFeature'].split(',')) + if self.problemType=='timeSeriesForecasting': #task 11997 + if len(targetFeature) > 1: + if 'ARIMA' in selected_algos: + status = 'error' + msg = ""ARIMA is not supported for multilabel (target) feature"" + return status, msg + if ""FBPROPHET"" in selected_algos: + status = 'error' + msg = ""FBPROPHET is not supported for multiLabel (target) feature"" + return status, msg + if 'MLP' in selected_algos: + status = 'error' + msg = ""MLP is not supported for multiLabel (target) feature"" + return status, msg + if len(targetFeature) == 1 and 'VAR' in selected_algos: + status = 'error' + msg = ""VAR is not supported for singleLabel (target) feature"" + return status, msg + elif self.problemType=='timeSeriesAnomalyDetection': + anomChecker = anomaly(self.config) + status, msg = anomChecker.validate_basic_config() + return status, msg + + +class anomaly(): + + def __init__(self,config): + self.config = config + if self.config['basic']['analysisType']['anomalyDetection']=='': + self.problemType = 'anomalyDetection' + elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection']: #task 11997 + self.problemType = 'timeSeriesAnomalyDetection' + + def validate_basic_config(self,status='pass',msg=None): + #task 12627 + date_time_status = check_datetime(self.config) + targetFeature = self.config['basic']['targetFeature'] + if self.problemType=='anomalyDetection' and date_time_status: + status = 'error' + msg = 'Date feature detected. For anomaly detection on time series change problem type to Time Series Anomaly Detection or drop Date feature' + return status, msg + if targetFeature.lower()!= 'na' and targetFeature!= """" and self.config['basic']['inlierLabels'] == '': + status = 'error' + msg = 'Please provide inlier label in case of supervised anomaly detection' + return status, msg + +class survival(): + + def __init__(self,config): + self.config = config + self.problemType= 'survivalAnalysis' + + def validate_basic_config(self): + dateTimeStatus = check_datetime(self.config) + labelencoding_status = check_labelencoding(self.config['advance']['profiler']['featureDict'], self.config['basic']['targetFeature']) + if not dateTimeStatus and not labelencoding_status: + msg = 'For survival analysis problem,\\\\n* One feature should be in datetime format\\\\n* Encoding of categorical features should be of label encoding ' + return 'error', msg + elif not dateTimeStatus: + msg = 'One feature should be in datetime format for survival analysis problem. Please select it from model feature' + return 'error', msg + elif not labelencoding_status: + msg = 'Categorical features are expected to be label encoded for survival analysis problem. Please select it from feature encoding' + return 'error', msg + else: + return 'pass', "" "" + +class associationrule(): + + def __init__(self,config): + self.config=config + + def validate_basic_config(self,status='pass', msg=None): + if self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == 'na' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == 'na': + return ""error"",""Make sure to configure invoice feature and item feature"" + elif self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] == self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']: + return ""error"",""Make sure to invoice feature and item feature is configure correctly"" + else: + return ""pass"", "" "" + +class documentsimilarity(): + + def __init__(self,config): + self.config=config + + + def validate_basic_config(self,status='pass', msg=None): + flag = check_dtype(self.config['advance']['profiler']['featureDict']) + if flag == 1: + return ""pass"", "" "" + else: + msg=""Make sure to change the feature type from Catgeory to Text and drop numerical features for document Similarity"" + return ""error"", msg + + + +def config_validate(path): + with open(path, 'rb') as data_file: + config = json.load(data_file) + data_file.close() + try: + problem_type = get_true_option(config['basic']['analysisType']) + status = 'pass' + msg = '' + if 'timeseries' in problem_type.lower(): #task 11997 + obj = timeseries(config) + elif problem_type.lower() == 'survivalanalysis': + obj = survival(config) + elif problem_type.lower() == 'anomalydetection': + obj = anomaly(config) + elif problem_type.lower() in ['similarityidentification','contextualsearch']: + obj = documentsimilarity(config) + elif problem_type.lower() == 'recommendersystem': + if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'].lower() == 'true': + obj = associationrule(config) + else: + return 'pass',"""" + else: + return 'pass',"""" + status,msg= obj.validate_basic_config() + return(status,msg) + except Exception as e: + print(e) + + + +def start_check(config): + return config_validate(config) + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json +from pathlib import Path +from distutils.util import strtobool + +class code_configure(): + + def __init__(self): + self.code_config = {} + self.unsupported_algo = [] + self.supported_model = {""classification"":{""Logistic Regression"": ""LogisticRegression"", ""Naive Bayes"": ""GaussianNB"", ""Decision Tree"": ""DecisionTreeClassifier"", ""Random Forest"": ""RandomForestClassifier"", ""Support Vector Machine"": ""SVC"", ""K Nearest Neighbors"": ""KNeighborsClassifier"", ""Gradient Boosting"": ""GradientBoostingClassifier"", ""Extreme Gradient Boosting (XGBoost)"":""XGBClassifier"", ""Light Gradient Boosting (LightGBM)"": ""LGBMClassifier"",""Categorical Boosting (CatBoost)"": ""CatBoostClassifier""}, + ""regression"":{""Linear Regression"": ""LinearRegression"", ""Lasso"": ""Lasso"", ""Ridge"": ""Ridge"", ""Decision Tree"": ""DecisionTreeRegressor"", ""Random Forest"": ""RandomForestRegressor"", ""Extreme Gradient Boosting (XGBoost)"": ""XGBRegressor"", ""Light Gradient Boosting (LightGBM)"": ""LGBMRegressor"",""Categorical Boosting (CatBoost)"": ""CatBoostRegressor""},""timeSeriesForecasting"":{""MLP"": ""MLP"",""LSTM"":""LSTM""}} #task 11997 + + def __get_true_option(self, d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + def __get_true_options(self, d): + options = [] + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + options.append(k) + return options + + def __get_scoring_criteria(self, criteria): + mapping = {'Mean Squared Error':'MSE', 'Root Mean Squared Error':'RMSE','Mean Absolute Error':'MAE','R-Squared':'R2'} + if criteria in mapping.keys(): + return mapping[criteria] + return criteria + + def __get_feature_selector(self, selector_config): + feature_selector = [] + if self.__get_true_option(selector_config['selectionMethod']) == 'featureSelection': + feature_selector = self.__get_true_options(selector_config['featureSelection']) + return feature_selector + + def __get_feature_reducer(self, selector_config): + feature_reducer = """" + if self.__get_true_option(selector_config['selectionMethod']) == 'featureEngineering': + feature_reducer = self.__get_true_option(selector_config['featureEngineering'],'pca').lower() + return feature_reducer + + def __getOptimizationParam(self, param_config): + param_dict = {} + param_dict['iterations'] = int(param_config['iterations']) + param_dict['trainTestCVSplit'] = int(param_config['trainTestCVSplit']) + param_dict['geneticparams'] = param_config['geneticparams'] + return param_dict + + def add_model(self, model_name, config): + if not self.unsupported_algo: + self.code_config[""algorithms""][model_name] = config.copy() + + def update_config(self, key, value): + self.code_config[key] = value + + def save_config(self, file_path): + if Path(file_path).is_dir(): + file_path = Path(file_path)/'etc/code_config.json' + with open(file_path, ""w"") as f: + if not self.unsupported_algo: + json.dump(self.code_config, f, indent=4) + else: + if 'ensemble' in self.unsupported_algo: + json.dump({""Status"":""Failure"",""msg"":""Ensemble is not supported"",""error"":""Ensemble is not supported""}, f) # keep error key + elif 'text_features' in self.unsupported_algo: + json.dump({""Status"":""Failure"",""msg"":""Text feature processing is not supported"",""error"":""Text feature processing is not supported""}, f) # keep error key + else: + json.dump({""Status"":""Failure"",""msg"":f""Unsupported model {self.unsupported_algo}"",""error"":f""Unsupported model {self.unsupported_algo}""}, f) # keep error key + + def __is_algo_supported(self, config): + problem_type = self.__get_true_option(config['basic']['analysisType']) + if problem_type not in self.supported_model.keys(): + self.unsupported_algo = [problem_type] + return False + algos = config['basic']['algorithms'][problem_type] + algos = self.__get_true_options(algos) + self.unsupported_algo = [x for x in algos if x not in self.supported_model[problem_type].keys()] + if self.unsupported_algo: + return False + return True + + def create_config(self, config): + if isinstance(config, str): + with open(config,'r') as f: + config = json.load(f) + problem_type = self.__get_true_option(config['basic']['analysisType']) + self.code_config[""problem_type""] = problem_type.lower() + if not self.__is_algo_supported(config): + return + if 'ensemble' in config['advance']['mllearner_config']: + if config['advance']['mllearner_config']['ensemble'] == 'enable': + self.unsupported_algo = ['ensemble'] + return + self.code_config[""modelName""] = config['basic']['modelName'] + self.code_config[""modelVersion""] = config['basic']['modelVersion'] + if config['basic']['folderSettings']['fileType'].lower() == 'url': + self.code_config[""dataLocation""] = config['basic']['folderSettings']['labelDataFile'] + else: + self.code_config[""dataLocation""] = config['basic']['dataLocation'] + self.code_config[""target_feature""] = config['basic']['targetFeature'] + trainingfeatures = config['basic']['trainingFeatures'].split(',') + datetimeFeature = list(map(str.strip, config['basic']['dateTimeFeature'].split(','))) + for dtfeature in datetimeFeature: + if dtfeature in trainingfeatures: + trainingfeatures.remove(dtfeature) + indexFeature = list(map(str.strip, config['basic']['indexFeature'].split(','))) + for dtfeature in indexFeature: + if dtfeature in trainingfeatures: + trainingfeatures.remove(dtfeature) + self.code_config[""selected_features""] = trainingfeatures + self.code_config[""dateTimeFeature""] = datetimeFeature + self.code_config[""pro" +"filer""] = config['advance']['profiler'] + self.code_config[""feature_selector""]= self.__get_feature_selector(config['advance']['selector']) + self.code_config[""feature_reducer""]= self.__get_feature_reducer(config['advance']['selector']) + self.code_config[""corr_threshold""]= float(config['advance']['selector']['statisticalConfig'].get('correlationThresholdTarget',0.85)) + self.code_config[""var_threshold""]= float(config['advance']['selector']['statisticalConfig'].get('varianceThreshold',0.01)) + self.code_config[""pValueThreshold""]= float(config['advance']['selector']['statisticalConfig'].get('pValueThresholdTarget',0.04)) + self.code_config[""n_components""]= int(config['advance']['selector']['featureEngineering']['numberofComponents']) + self.code_config[""balancingMethod""] = config['advance']['categoryBalancingMethod'] + self.code_config[""test_ratio""] = int(config['advance']['testPercentage'])/100 + #self.code_config[""scoring_criteria""] = ""accuracy"" + if self.code_config[""problem_type""] in ['classification','regression']: + self.code_config[""algorithms""] = {} + else: + algo = self.__get_true_option(config['basic']['algorithms'][problem_type]) + self.code_config[""algorithms""] = {algo: config['advance'][problem_type]['modelParams'][algo]} #task 11997 + self.code_config[""scoring_criteria""] = self.__get_scoring_criteria(self.__get_true_option(config['basic'][""scoringCriteria""][problem_type])) + if problem_type.lower() == 'timeseriesforecasting': #task 11997 + self.code_config[""lag_order""] = self.code_config[""algorithms""][algo][""lag_order""] + self.code_config[""noofforecasts""] = config[""basic""][""noofforecasts""] + self.code_config[""target_feature""] = config['basic']['targetFeature'].split(',') + self.code_config[""optimization""] = config['advance']['mllearner_config']['optimizationMethod'] + self.code_config[""optimization_param""] = self.__getOptimizationParam(config['advance']['mllearner_config']['optimizationHyperParameter']) + + +if __name__ == '__main__': + codeConfigure = code_configure() + codeConfigure.create_config(""C:\\\\Users\\\\vashistah\\\\AppData\\\\Local\\\\HCLT\\\\AION\\\\config\\\\AION_1668151242.json"") + codeConfigure.save_config(r""C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\target\\AION_57_ts_1"") ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import json + +def getDebiasingDetail(self): + try: + if(self.advance['profiler']['deBiasing']): + dlconfig = self.advance['profiler']['deBiasing'] + return dlconfig + else: + return('NA') + except KeyError: + return('NA') + +def getFirstDocumentFeature(self): + if 'firstDocFeature' in self.basic: + firstDocFeature = self.basic['algorithms']['recommenderSystem']['textSimilarityConfig']['baseFeature'] + else: + firstDocFeature = '' + return(firstDocFeature) + +def getSecondDocumentFeature(self): + if 'secondDocFeature' in self.basic: + secondDocFeature = self.basic['algorithms']['recommenderSystem']['textSimilarityConfig']['comparisonFeature'] + else: + secondDocFeature = '' + return(secondDocFeature) + +def getEionInliers(self): + if 'inlierLabels' in self.basic: + self.inlierLabels = self.basic['inlierLabels'] + else: + self.inlierLabels = 'NA' + return (self.inlierLabels) + +def getEionProfilerConfigurarion(self): + try: + if(self.advance['profiler']): + return self.advance['profiler'] + else: + return('NA') + except KeyError: + return('NA') + +def getAIONTestTrainPercentage(self): + try: + return (int(self.advance.get('testPercentage',20))) + except KeyError: + return(20) + +def getAIONDataBalancingMethod(self): + try: + if(self.advance['categoryBalancingMethod']): + return self.advance['categoryBalancingMethod'] + else: + return(""oversample"") + except KeyError: + return(""oversample"") + +def getEionSelectorConfiguration(self): + try: + if(self.advance['selector']): + return self.advance['selector'] + else: + return('NA') + except KeyError: + return('NA') + +def getEionDeepLearnerConfiguration(self): + try: + if(self.advance['dllearner_config']): + dlconfig = self.advance['dllearner_config'] + if 'categoryBalancingMethod' not in dlconfig: + dlconfig['categoryBalancingMethod'] = '' + if 'testPercentage' not in dlconfig: #Unnati + dlconfig['testPercentage'] = 20 #Unnati + return dlconfig + else: + return('NA') + except KeyError: + return('NA') + +def gettimegrouper(self): + try: + if(self.basic['timegrouper']): + return self.basic['timegrouper'] + else: + return 'NA' + except: + return 'NA' + +def getgrouper(self): + try: + if(self.basic['group']): + return self.basic['group'] + else: + return 'NA' + except: + return 'NA' + +def getfilter(self): + try: + if(self.basic['filter']): + return self.basic['filter'] + else: + return 'NA' + except: + return 'NA' + +def getNumberofForecasts(self): + try: + if(self.basic['noofforecasts']): + return int(self.basic['noofforecasts']) + else: + return (-1) + except: + return (-1) + +##To get multivariate feature based anomaly detection status +def getMVFeaturebasedAD(self): + try: + dict_ae=self.basic['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder'] #task 11997 + if(dict_ae): + return (dict_ae) + else: + return (-1) + except: + return (-1) + +def getEionDeployerConfiguration(self): + try: + if(self.advance['deployer']): + return self.advance['deployer'] + else: + return('NA') + except KeyError: + return('NA') + +def getEionAssociationRuleConfiguration(self): + try: + if(self.advance['associationrule']): + return self.advance['associationrule'] + else: + return('NA') + except KeyError: + return('NA') + +def getEionObjectDetectionConfiguration(self): + try: + if(self.advance['objectDetection']): + return self.advance['objectDetection'] + else: + return('NA') + except KeyError: + return('NA') + +def getEionTimeSeriesConfiguration(self): + try: + if(self.advance['timeSeriesForecasting']): #task 11997 + return self.advance['timeSeriesForecasting'] + else: + return('NA') + except KeyError: + return('NA') + +def getAIONAnomalyDetectionConfiguration(self): + try: + if(self.advance['anomalyDetection']): + return self.advance['anomalyDetection'] + else: + return('NA') + except KeyError: + return('NA') + +def getAIONTSAnomalyDetectionConfiguration(self): #task 11997 + try: + if(self.advance['timeSeriesAnomalyDetection']): + return self.advance['timeSeriesAnomalyDetection'] + else: + return('NA') + except KeyError: + return('NA') + +def getEionVisualizationConfiguration(self): + try: + if(self.advance['visualization_settings']): + return(self.advance['visualization_settings']) + else: + return('NA') + except KeyError: + return('NA') + +def getEionRecommenderConfiguration(self): + try: + if(self.advance['recommenderparam']): + return self.advance['recommenderparam'] + else: + return('NA') + except KeyError: + return('NA') + +def getAionNASConfiguration(self): + try: + if(self.advance['neuralarchsearch']): + return self.advance['neuralarchsearch'] + else: + return('NA') + except KeyError: + return('NA') + +def getEionProfilerStatus(self): + try: + if(self.basic['output']['profilerStage']): + return(self.basic['output']['profilerStage']) + else: + return('false') + except KeyError: + return('false') + +def getEionSelectorStatus(self): + try: + if(self.basic['output']['selectorStage']): + return(self.basic['output']['selectorStage']) + else: + return('disable') + except KeyError: + return('disable') + +def getEionDeploymentStatus(self): + try: + if(self.basic['output']['deploymentStage']): + return(self.basic['output']['deploymentStage']) + else: + return(False) + except KeyError: + return(False) + +def __get_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def getNASStatus(self): + try: + if(self.dlmodels): + return(self.dlmodels) + else: + return('NA') + except KeyError: + return('NA') + +def getTargetFeatures(self): + try: + if(self.basic['targetFeature']): + return(self.basic['targetFeature']) + else: + return('') + except KeyError: + return('') + +def getFolderSettings(self): + try: + if(self.basic['folderSettings']): + return(self.basic['folderSettings']) + else: + return('NA') + except KeyError: + return('NA') + +def getFilterExpression(self): + try: + if(self.basic['filterExpression']): + return (self.basic['filterExpression']) + else: + return None + except KeyError: + return None + +def setModels(self): + try: + analysis_type = self.basic['analysisType'] + #print(analysis_type) + self.problemType = '' + for key in analysis_type.keys(): + if analysis_type[key] == 'True': + self.problemType = key + break + if self.problemType == 'summarization': + self.problemType = 'classification' + self.summarize = True + if self.problemType not in ['inputDrift','outputDrift']: + conf_algorithm = self.basic['algorithms'][self.problemType] + else: + conf_algorithm = {} + self.mlmodels='' + self.dlmodels='' + self.scoringCreteria = 'NA' + if self.problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997 + scorCre = self.basic['scoringCriteria'][self.problemType] + for key in scorCre.keys(): + if scorCre[key] == 'True': + self.scoringCreteria = key + break + if self.problemType.lower() == 'timeseriesforecasting': #task 11997 + self.mllearner=False #task 11997 removed initialising self.ml models as timeSeriesForecasting + + if self.scoringCreteria == 'Mean Squared Error': + self.scoringCreteria = 'MSE' + if self.scoringCreteria == 'Root Mean Squared Error': + self.scoringCreteria = 'RMSE' + if self.scoringCreteria == 'Mean Absolute Error': + self.scoringCreteria = 'MAE' + if self.scoringCreteria == 'R-Squared': + self.scoringCreteria = 'R2' + if self.problemType in ['similarityIdentification','contextualSearch']: + self.scoringCreteria = __get_true_option(self.basic['scoringCriteria'][self.problemType], ""Cosine Similarity"") + if self.problemType in ['classification','regression']: + for key in conf_algorithm.keys(): + if conf_algorithm[key] == 'True': + if key not in ['Recurrent Neural Network','Convolutional Neural Network (1D)','Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','GoogleModelSearch_DNN']: + if self.mlmodels != '': + self.mlmodels += ',' + self.mlmodels += key + else: + if self.dlmodels != '': + self.dlmodels += ',' + self.dlmodels += key + elif self.problemType in ['videoForecasting','imageClassification','objectDetection']: + for key in conf_algorithm.keys(): + if conf_algorithm[key] == 'True': + if self.dlmodels != '': + self.dlmodels += ',' + self.dlmodels += key + elif self.problemType == 'recommenderSystem': + problem_model = '' + for key in conf_algorithm.keys(): + if key not in ['itemRatingConfig','textSimilarityConfig']: + if conf_algorithm[key] == 'True': + problem_model = key + break + if problem_model == 'ItemRating': + self.mlmodels = 'SVD' + elif problem_model == 'AssociationRules-Apriori': + self.mlmodels = 'Apriori' + self.problemType = 'AssociationRules' + elif problem_model == 'TextSimilarity-Siamese': + self.mlmodels = 'Siamese' + self.problemType = 'TextSimilarity' + else: + for key in conf" +"_algorithm.keys(): + if conf_algorithm[key] == 'True': + if self.mlmodels != '': + self.mlmodels += ',' + self.mlmodels += key + self.mllearner = False + self.dllearner = False + if self.mlmodels != '': + self.mllearner = True + if self.advance['mllearner_config']['Stacking (Ensemble)'] == 'True': + self.mlmodels += ',' + self.mlmodels += 'Stacking (Ensemble)' + if self.advance['mllearner_config']['Voting (Ensemble)'] == 'True': + self.mlmodels += ',' + self.mlmodels += 'Voting (Ensemble)' + if self.dlmodels != '': + self.dllearner = True + return('done') + except KeyError: + return('NA') + +def readConfigurationFile(self, path): + if isinstance( path, dict): + self.data = path + else: + with open(path, 'r') as data_file: + self.data = json.load(data_file) #loading json object as python dictionary + self.basic = self.data['basic'] + self.advance = self.data['advance'] + problemType = self.setModels() + + if 'output' in self.basic: + if(self.basic['output']['profilerStage']): + if(str(type(self.basic['output']['profilerStage'])) != """"): + msg = ""JSON Validation Fail: Profiling Should be String and value should be either enable or disable"" + self.log.info(msg) + return False,msg + if((self.basic['output']['profilerStage'].lower() == 'true') & ('profiler' not in self.advance)): + msg = ""JSON Validation Fail: Profiler Configuration Not Found in Advance JSON"" + self.log.info(msg) + return False,msg + if(str(type(self.advance['profiler'])) != """"): + msg = ""JSON Validation Fail: Error: Profiler Configuration Syntax"" + self.log.info(msg) + return False,msg + if((self.basic['output']['profilerStage'].lower() != 'true') & (self.basic['output']['profilerStage'].lower() != 'false')): + msg = ""JSON Validation Fail: Profiling is Not defined Correctly, it should be either enable or disable"" + self.log.info(msg) + return False,msg + + if(self.basic['output']['selectorStage']): + if(str(type(self.basic['output']['selectorStage'])) != """"): + msg = ""JSON Validation Fail: Selection Should be String and value should be either enable or disable"" + self.log.info(msg) + return False,msg + + if((self.basic['output']['selectorStage'].lower() == 'true') & ('selector' not in self.advance)): + msg = ""JSON Validation Fail: Selector Configuration Not Found"" + self.log.info(msg) + return False,msg + + if((self.basic['output']['selectorStage'].lower() != 'true') & (self.basic['output']['selectorStage'].lower() != 'false')): + msg = ""JSON Validation Fail:: Selection is Not defined Correctly, it should be either enable or disable"" + self.log.info(msg) + return False,msg + + if(str(type(self.advance['selector'])) != """"): + msg = ""JSON Validation Fail: Error: Syntax of Selector"" + self.log.info(msg) + return False,msg + + if 'dataLocation' not in self.basic: + msg = ""JSON Validation Fail: Data Location Not Defined"" + self.log.info(msg) + return False,msg + + if 'deployLocation' not in self.basic: + msg = ""JSON Validation Fail: Deploy Location Not Defined"" + self.log.info(msg) + return False,msg + + if 'deployment' in self.basic: + if(str(type(self.basic['deployment'])) != """"): + msg = ""JSON Validation Fail: deployment Should be String and value should be either enable or disable"" + self.log.info(msg) + return False,msg + if(self.basic['deployment'] == 'enable'): + if 'deployer' in self.advance: + if(str(type(self.advance['deployer'])) != """"): + msg = ""JSON Validation Fail: deployer configuration should be nexted json object"" + self.log.info(msg) + return False,msg + else: + msg = ""JSON Validation Fail: deployer configuration is missing"" + self.log.info(msg) + return False,msg + return True,'Good' + +def getSurvivalEventColumn(self): + try: + if(self.advance['survival_config']): + survival_config = self.advance['survival_config'] + if 'input' in survival_config: + inp = survival_config['input'] + if not isinstance(inp, dict): + return None + elif 'event_col' in inp: + e = inp['event_col'] + if not isinstance(e, str): + return None + return (e) + else: + return None + else: + return None + else: + return None + except KeyError: + return None + +def getSurvivalDurationColumn(self): + try: + if(self.advance['survival_config']): + survival_config = self.advance['survival_config'] + if 'input' in survival_config: + inp = survival_config['input'] + if not isinstance(inp, dict): + return None + elif 'duration_col' in inp: + t = inp['duration_col'] + if not isinstance(t, str): + return None + return (t) + else: + return None + else: + return None + else: + return None + except KeyError: + return None + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import json +import logging +import os +import shutil +import time +import importlib +from sys import platform +from pathlib import Path +from distutils.util import strtobool +import config_manager.pipeline_config_reader as cs +# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules. + +class AionConfigManager: + + def getDebiasingDetail(self): + return cs.getDebiasingDetail(self) + +# eion configuration Constractor + def __init__(self): + self.log = logging.getLogger('eion') + self.data = '' + self.problemType = '' + self.basic = [] + self.advance=[] + self.summarize = False + + #To get the inliner labels for eion anomaly detection + def get_text_feature(self): + self.text_features = [] + feat_dict = self.advance['profiler']['featureDict'] + for feat in feat_dict: + if feat.get('type') == 'text': + if feat.get('feature'): + self.text_features.append(feat['feature']) + return self.text_features + + def validate_config(self): + status = True + error_id = '' + msg = '' + conversion_method = self.__get_true_option(self.advance.get('profiler',{}).get('textConversionMethod',{})) + is_text_feature = self.get_text_feature() + if is_text_feature and conversion_method.lower() == 'fasttext': + status = importlib.util.find_spec('fasttext') + if not status: + error_id = 'fasttext' + msg = 'fastText is not installed. Please install fastText' + return status,error_id, msg + + def getTextlocation(self): + text_data = self.basic[""dataLocation""] + return text_data + + def getTextSummarize(self): + algo = self.basic['algorithms']['textSummarization'] + for key in algo: + if algo[key] == 'True': + algoname = key + method = self.advance['textSummarization']['summaryLength'] + for key in method: + if method[key] == 'True': + methodname = key + + return algoname,methodname + def getAssociationRuleFeatures(self): + if 'invoiceNoFeature' in self.basic['algorithms']['recommenderSystem']['associationRulesConfig']: + invoiceNoFeature = self.basic['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] + else: + invoiceNoFeature ='' + if 'itemFeature' in self.basic['algorithms']['recommenderSystem']['associationRulesConfig']: + itemFeature = self.basic['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'] + else: + itemFeature ='' + return invoiceNoFeature,itemFeature + + def getFirstDocumentFeature(self): + return cs.getFirstDocumentFeature(self) + + def getSecondDocumentFeature(self): + return cs.getSecondDocumentFeature(self) + + def getEionTextSimilarityConfig(self): + return self.advance['textSimilarityConfig'] + + + def getEionTextSummarizationConfig(self): + return self.basic['dataLocation'],self.basic['deployLocation'] ,self.basic['textSummarization']['KeyWords'],self.basic['textSummarization']['pathForKeywordFile'] + + def getEionInliers(self): + return cs.getEionInliers(self) + +#To get the selected models for eion anomaly detection + def getEionanomalyModels(self): + self.anomalyModels = self.mlmodels + return (self.anomalyModels) + +# To get parameter list of configuration module from json, this will be passed as dict{} + def getEionProfilerConfigurarion(self): + return cs.getEionProfilerConfigurarion(self) + + def getAIONTestTrainPercentage(self): + return cs.getAIONTestTrainPercentage(self) + + def getModelEvaluationConfig(self): + try: + return request.POST.get('mydata',{}) + except Exception as e: + return({}) + + def getAIONDataBalancingMethod(self): + return cs.getAIONDataBalancingMethod(self) + + def updateFeatureSelection(self, selectorConfig,codeConfigure,vectorizer=False): + if vectorizer: + selectorConfig['selectionMethod']['featureSelection'] = 'True' + selectorConfig['featureSelection']['allFeatures'] = 'True' + selectorConfig['featureSelection']['statisticalBased'] = 'False' + selectorConfig['featureSelection']['modelBased'] = 'False' + codeConfigure.update_config(""feature_selector"", ['allFeatures']) + +# To get parameter list of selector module params + def getEionSelectorConfiguration(self): + return cs.getEionSelectorConfiguration(self) + + def createDeploymentFolders(self,deployFolder,iterName,iterVersion): + usecase = '{}{}{}'.format(iterName, '_' if iterVersion != '' else '', iterVersion) + folders = ['data','log','model','script','etc'] + skip_delete = ['log'] + deployLocation = Path(deployFolder)/iterName/iterVersion + deployLocation.mkdir(parents=True, exist_ok=True) + + # delete previous failed/trained use case outputs except log folder + # as logging is already enabled for current usecase + for x in deployLocation.iterdir(): + if x.is_file(): # bug 13315 delete existing files + x.unlink() + elif x.is_dir(): + if x.stem not in skip_delete: + shutil.rmtree( x) + for folder in folders: + (deployLocation/folder).mkdir( parents=True, exist_ok=True) + (deployLocation/'log'/'img').mkdir( parents=True, exist_ok=True) + data_location = deployLocation/'data' + paths = { + 'usecase': str(deployLocation.parent), + 'deploy': str(deployLocation), + 'data': str(deployLocation/'data'), + 'image': str(deployLocation/'log'/'img'), + } + files = { + 'original': str(data_location/'preprocesseddata.csv.gz'), + 'profiled': str(data_location/'postprocesseddata.csv.gz'), + 'reduction': str(data_location/'reductiondata.csv'), + 'trained': str(data_location/'trainingdata.csv'), + 'predicted': str(data_location/'predicteddata.csv.gz'), + 'logs': str(deployLocation/'log'/'model_training_logs.log'), + 'output': str(deployLocation/'etc'/'output.json'), + } + return( paths['usecase'],paths['deploy'],paths['data'],paths['image'],files['original'],files['profiled'],files['trained'],files['predicted'],files['logs'],files['output'],files['reduction']) + +# To get parameter list of learner module params + def getEionLearnerConfiguration(self): + try: + if(self.advance['mllearner_config']): + mllearner_config = self.advance['mllearner_config'] + if 'categoryBalancingMethod' not in mllearner_config: + mllearner_config['categoryBalancingMethod'] = 'oversample' + if 'testPercentage' not in mllearner_config: + mllearner_config['testPercentage'] = 20 + if 'missingTargetCategory' not in mllearner_config: + mllearner_config['missingTargetCategory'] = '' + mllearner_config['modelParams']['classifierModelParams']['Deep Q Network'] = self.advance['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network'] + mllearner_config['modelParams']['classifierModelParams']['Neural Architecture Search'] = self.advance['dllearner_config']['modelParams']['classifierModelParams']['Neural Architecture Search'] + " +" mllearner_config['modelParams']['classifierModelParams']['Dueling Deep Q Network'] = self.advance['rllearner_config']['modelParams']['classifierModelParams']['Dueling Deep Q Network'] + mllearner_config['modelParams']['regressorModelParams']['Deep Q Network'] = self.advance['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network'] + mllearner_config['modelParams']['regressorModelParams']['Dueling Deep Q Network'] = self.advance['rllearner_config']['modelParams']['regressorModelParams']['Dueling Deep Q Network'] + mllearner_config['modelParams']['regressorModelParams']['Neural Architecture Search'] = self.advance['dllearner_config']['modelParams']['regressorModelParams']['Neural Architecture Search'] + + return mllearner_config + else: + return('NA') + except KeyError: + return('NA') + except Exception as inst: + self.log.info( '\\n-----> getEionLearnerConfiguration failed!!!.'+str(inst)) + return('NA') + + def getEionDeepLearnerConfiguration(self): + return cs.getEionDeepLearnerConfiguration(self) + + def gettimegrouper(self): + return cs.gettimegrouper(self) + + def getgrouper(self): + return cs.getgrouper(self) + + def getfilter(self): + return cs.getfilter(self) + + def getNumberofForecasts(self): + return cs.getNumberofForecasts(self) + + ##To get multivariate feature based anomaly detection status + def getMVFeaturebasedAD(self): + return cs.getMVFeaturebasedAD(self) + + def getModulesDetails(self): + problem_type = self.problemType + visualizationstatus = self.getEionVisualizationStatus() + profiler_status = self.getEionProfilerStatus() + selector_status = self.getEionSelectorStatus() + learner_status = self.mllearner + deeplearner_status = self.dllearner + targetFeature = self.getTargetFeatures() + deploy_status = self.getEionDeploymentStatus() + VideoProcessing = False + similarityIdentificationStatus = False + contextualSearchStatus = False + anomalyDetectionStatus = False + if problem_type.lower() == 'survivalanalysis': + survival_analysis_status = True + selector_status = False + associationRuleStatus = 'disable' + timeseriesStatus = 'disable' + learner_status = False + deeplearner_status = False + else: + survival_analysis_status = False + + if problem_type.lower() == 'textsimilarity': + selector_status = False + learner_status = False + deeplearner_status = False + timeseriesStatus = 'disable' + associationRuleStatus = 'disable' + inputDriftStatus = 'disable' + textSimilarityStatus = True + else: + textSimilarityStatus = False + + if problem_type.lower() == 'inputdrift': + inputDriftStatus = True + profiler_status = False + selector_status = False + learner_status = False + deeplearner_status = False + timeseriesStatus = 'disable' + associationRuleStatus = 'disable' + deploy_status = False + visualizationstatus = False + else: + inputDriftStatus = False + + if problem_type.lower() == 'outputdrift': + outputDriftStatus = True + profiler_status = False + selector_status = False + learner_status = False + deeplearner_status = False + timeseriesStatus = 'disable' + associationRuleStatus = 'disable' + deploy_status = False + visualizationstatus = False + else: + outputDriftStatus = False + + if problem_type.lower() == 'recommendersystem': + recommenderStatus = True + #profiler_status = 'disable' + selector_status = False + learner_status = False + deeplearner_status = False + timeseriesStatus = 'disable' + associationRuleStatus = 'disable' + #Task 11190 + visualizationstatus = False + else: + recommenderStatus = False + ''' + if profiler_status.lower() == 'enable': + profiler_status = True + else: + profiler_status = False + + if selector_status.lower() == 'enable': + selector_status = True + else: + selector_status = False + + if visualizationstatus.lower() == 'enable': + visualizationstatus = True + else: + visualizationstatus = False + ''' + + if learner_status: + if(problem_type == 'NA'): + learner_status = True + elif(problem_type.lower() in ['classification','regression','clustering','anomalydetection', 'topicmodelling', 'objectdetection', 'timeseriesanomalydetection']): #task 11997 + learner_status = True + else: + learner_status = False + if problem_type.lower() == 'anomalydetection' or problem_type.lower() == 'timeseriesanomalydetection': #task 11997 + anomalyDetectionStatus = True + + if deeplearner_status: + if(problem_type.lower() == 'na'): + deeplearner_status = True + elif(problem_type.lower() in ['classification','regression']): + deeplearner_status = True + else: + deeplearner_status = False + + if(targetFeature == ''): + deeplearner_status = False + + + if problem_type.lower() == 'timeseriesforecasting': #task 11997 + timeseriesStatus = True + profiler_status = True #task 12627 + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = 'disable' + else: + timeseriesStatus = False + + if problem_type.lower() == 'videoforecasting': + forecastingStatus = True + timeseriesStatus = False + profiler_status = True + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = 'disable' + else: + forecastingStatus = False + + if problem_type.lower() == 'imageclassification': + imageClassificationStatus = True + timeseriesStatus = False + profiler_status = False + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = 'disable' + else: + imageClassificationStatus = False + + if problem_type.lower() == 'associationrules': + associationRuleStatus = True + timeseriesStatus = False + profiler_status = False + selector_status = False + learner_status = False + deeplearner_status = False + visualizationstatus = False + else: + associationRuleStatus = False + + if problem_type.lower() == 'statetransition': + stateTransitionStatus = True + objectDetectionStatus = False + imageClassificationStatus = False + timeseriesStatus = False + profiler_status = False + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = False + visualizationstatus = False + else: + stateTransitionStatus = False + + if problem_type.lower() == 'objectdetection': + objectDetectionStatus = True + imageClassificationStatus = False + timeseriesStatus = False + profiler_status = False + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = False + visualizationstatus = False + else: + objectDetectionStatus = False + if problem_type.lower() == 'similarityidentification': + similarityIdentificationStatus = True + objectDetectionStatus = False + imageClassificationStatus = False + timeseriesStatus = False + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = False + visualizationstatus = False + self.updateEmbeddingForDocSimilarity() + else: + similarityIdentificationStatus = False + + if problem_type.lower() == 'contextualsearch': + contextualSearchStatus = True + objectDetectionStatus = False + imageClassificationStatus = False + timeseriesStatus = False + selector_status = False + learner_status = False + deeplearner_status = False + associationRuleStatus = False + visualizationstatus = False + self.updateEmbeddingForContextualsearch() + else: + contextualSearchStatus = False + + + if problem_type.lower() == 'textsummarization': + textSummarization = True + profiler_status = False + selector_status = False + else: + textSummarization = False + ''' + if deploy_status.lower() == 'enable': + deploy_status = True + else: + deploy_status = False + ''' + #print(inputDriftStatus) + return problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textSummarization,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus,objectDetectionStatus,stateTransitionStatus,similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus + + def __get_true_option(self, d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + + def updateEmbeddingForDocSimilarity(self): + method = self.__get_true_option(self.basic['algorithms']['similarityIdentification']) + textConversionMethods = self.advance['profiler']['textConversionMethod'] + print(""------------""+method+'---------------') + for key in textConversionMethods: + if key == method: + self.advance['profiler']['textConversionMethod'][key] = ""True"" + else: + self.advance['profiler']['textConversionMethod'][key] = ""False"" + if method.lower() == 'bm25': + self.advance['profiler']['textConversionMethod']['bm25'] = ""True"" + + def updateEmbeddingForContextualsearch(self): + method = self.__get_true_option(self.basic['algorithms']['contextualSearch']) + textConversionMethods = self.advance['profiler']['textConversionMethod'] + print(""------------""+method+'---------------') + for key in textConversionMethods: + if key == method: + self.advance['profiler']['textConversionMethod'][key] = ""True"" + else: + self.advance['profiler']['textConversionMethod'][key] = ""False"" + if method.lower() == 'bm25': + self.advance['profiler']['textConversionMethod']['bm25'] = ""True"" + + def get_conversion_method(self): + return self.__get_true_option( self.advance['profiler']['textConversionMethod']) + + def getAlgoName(self, problem_type=None): + if problem_type == None: + problem_type = self.__get_true_option(self.basic['algorithms']) + return self.__get_true_option(self.basic['algorithms'][problem_type]) + + def getScoringCreteria(self): + return self.scoringCreteria + + def getVectorDBCosSearchStatus(self,problemType): + if self.basic['preprocessing'][problemType]['VectorDB'] == 'True': + return True + else: + return False + + def getVectorDBFeatureDelimitInDoc(self): + return ' ~&~ ' + + def getEionDeployerConfiguration(self): + return cs.getEionDeployerConfiguration(self) + + def getEionAssociationRuleConfiguration(self): + return cs.getEionAssociationRuleConfiguration(self) + + def getEionAssociationRuleModelParams(self): + try: + associationConfg = self.advance['associationrule'] + if 'modelParams' in associationConfg: + modelParams = associationConfg['modelParams'] + if(str(type(modelParams)) != """"): + modelParams = [] + elif(len(modelParams) == 0): + modelParams = [] + if(len(modelParams) == 0): + if 'modelparamsfile' in associationConfg: + ml_algorithm_filename = associationConfg['modelparamsfile'] + if(ml_algorithm_filename == ''): + ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/AssciationRules_Defaults.json' + modelParams = json.loads(open(ml_algorithm_filename).read()) + modelList = [] + modelList = list(modelParams.keys()) + return(modelParams,modelList) + except KeyError: + modelParams = [] + modelList=[] + return(modelParams,modelList) + + def getEionImageAugmentationConfiguration(self): + try: + enable = self.advance['ImageAug" +"mentation'].get('Enable', ""False"") + keepAugImages = self.advance['ImageAugmentation'].get('KeepAugmentedImages', ""False"") + if enable == ""True"": + operations = {} + operations.update(self.advance['ImageAugmentation'].get('Noise', {})) + operations.update(self.advance['ImageAugmentation'].get('Transformation', {})) + if keepAugImages == 'True': + keepAugImages = True + if keepAugImages == 'False': + keepAugImages = False + return True,keepAugImages,{key: True if value.lower() == ""true"" else False for key, value in operations.items()},self.advance['ImageAugmentation'].get('configuration',{}) + else: + return False,False, {},{} + except KeyError: + return False,False, {},{} + def getAIONRemoteTraining(self): + try: + if(self.advance['remoteTraining']): + self.advance['remoteTraining']['Enable'] = strtobool(self.advance['remoteTraining'].get('Enable', 'False')) + return self.advance['remoteTraining'] + else: + remoteTraining = {} + remoteTraining['Enable'] = False + remoteTraining['server'] = None + remoteTraining['ssh'] = None + return(remoteTraining) + except KeyError: + remoteTraining = {} + remoteTraining['Enable'] = False + remoteTraining['server'] = None + remoteTraining['ssh'] = None + return(remoteTraining) + + def getEionObjectDetectionConfiguration(self): + return cs.getEionObjectDetectionConfiguration(self) + + def getEionTimeSeriesConfiguration(self): + return cs.getEionTimeSeriesConfiguration(self) + + def getAIONAnomalyDetectionConfiguration(self): + return cs.getAIONAnomalyDetectionConfiguration(self) + + def getAIONTSAnomalyDetectionConfiguration(self): + return cs.getAIONTSAnomalyDetectionConfiguration(self) + + def getEionVisualizationStatus(self): + return(True) + + def getEionVisualizationConfiguration(self): + return cs.getEionVisualizationConfiguration(self) + + def getEionRecommenderConfiguration(self): + return cs.getEionRecommenderConfiguration(self) + + def getAionNASConfiguration(self): + return cs.getAionNASConfiguration(self) + + def getEionProblemType(self): + try: + analysis_type = self.basic['analysisType'] + self.problemType = '' + for key in analysis_type.keys(): + if analysis_type[key] == 'True': + self.problemType = key + break + if self.problemType: + return self.problemType + else: + return('NA') + except KeyError: + return('NA') + def getEionProfilerStatus(self): + return cs.getEionProfilerStatus(self) + + def getEionSelectorStatus(self): + return cs.getEionSelectorStatus(self) + + def getEionDeploymentStatus(self): + return cs.getEionDeploymentStatus(self) + + def getEionTimeSeriesModelParams(self): + try: + selectedMLModel = self.mlmodels + tsconfig = self.advance['timeSeriesForecasting'] #task 11997 + if 'modelParams' in tsconfig: + modelParams = tsconfig['modelParams'] + if(str(type(modelParams)) != """"): + modelParams = [] + elif(len(modelParams) == 0): + modelParams = [] + if(len(modelParams) == 0): + if 'modelparamsfile' in tsconfig: + ml_algorithm_filename = tsconfig['modelparamsfile'] + if(ml_algorithm_filename == ''): + ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/TS_Defaults.json' + modelParams = json.loads(open(ml_algorithm_filename).read()) + + #Modified getting modelParams as small letters + modelParams = {k.lower(): v for k, v in modelParams.items()} + #print(""\\n modelParams: type \\n"",modelParams,type(modelParams)) + + if selectedMLModel != '': + #if selectedMLModel.lower() != 'var': + if ('var' not in selectedMLModel.lower()): + modelList = selectedMLModel.split("","") + modelList = list(map(str.strip, modelList)) + #Modified getting modelList as small letters + modelList = [strMP.lower() for strMP in modelList] + for mod in modelList: + if mod not in modelParams: + self.log.info(""'""+mod+""' Not Available for Particular Problem Type"") + modelList.remove(mod) + else: + modelList = selectedMLModel.split("","") + #Modified + modelList = [strMP.lower() for strMP in modelList] + modelList = list(map(str.strip, modelList)) + else: + #Modified + modelParams = [strMP.lower() for strMP in modelParams] + modelList = list(modelParams.keys()) + return(modelParams,modelList) + except KeyError: + modelParams = [] + modelList=[] + return(modelParams,modelList) + #NAS status + def getNASStatus(self): + return cs.getNASStatus(self) + + def getEionImageLearnerModelParams(self): + try: + selectedDLModel = self.dlmodels + learnerconfig = self.advance['image_config'] + modelList = selectedDLModel.split("","") + return(learnerconfig,modelList) + except KeyError: + learnerconfig = [] + modelList=[] + return(learnerconfig,modelList) + + def getAionObjectDetectionModelParams(self): + try: + selectedDLModel = self.dlmodels + modelList = selectedDLModel.split("","") + return(modelList) + except KeyError: + modelList=[] + return(modelList) + + def getEionVideoLearnerModelParams(self): + try: + selectedDLModel = self.basic['selected_DL_Models'] + learnerconfig = self.advance['video_config'] + modelList = selectedDLModel.split("","") + return(learnerconfig,modelList) + except KeyError: + learnerconfig = [] + modelList=[] + return(learnerconfig,modelList) + + def getEionDeepLearnerModelParams(self,modelType): + try: + numberofModels = 0 + dl_algorithm_filename = '' + if(modelType == 'classification'): + requiredalgo = 'classifierModelParams' + elif(modelType == 'regression'): + requiredalgo = 'regressorModelParams' + selectedmodels = 'regression' + elif(modelType == 'TextClassification'): + requiredalgo = 'classifierModelParams' + elif(modelType == 'clustering'): + requiredalgo = 'clusteringModelParams' + learnerconfig = self.advance['dllearner_config'] + selectedDLModel = self.dlmodels + modelParams = [] + modelList=[] + if 'modelParams' in learnerconfig: + modelParams = learnerconfig['modelParams'] + if(str(type(modelParams)) != """"): + modelParams = [] + elif(len(modelParams) == 0): + modelParams = [] + if(len(modelParams) == 0): + if 'modelparamsfile' in learnerconfig: + if(learnerconfig['modelparamsfile'] != """"): + dl_algorithm_filename = learnerconfig['modelparamsfile'] + if(dl_algorithm_filename == ''): + dl_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/DL_Defaults.json' + modelParams = json.loads(open(dl_algorithm_filename).read()) + if requiredalgo in modelParams: + modelParams = modelParams[requiredalgo] + + if selectedDLModel != '': + modelList = selectedDLModel.split("","") + modelList = list(map(str.strip, modelList)) + for mod in modelList: + if mod not in modelParams: + self.log.info(""'""+mod+""' Not Available for Particular Problem Type"") + modelList.remove(mod) + else: + modelList = list(modelParams.keys()) + #modelParams = dict((k.lower(), v) for k, v in modelParams .items()) + #modelList = selectedMLModel.split("","") + if(len(modelList) == 0): + modelList = list(modelParams.keys()) + return(modelParams,modelList) + except KeyError: + modelParams = [] + modelList=[] + return(modelParams,modelList) + + def getEionLearnerModelParams(self,modelType): + try: + numberofModels = 0 + ml_algorithm_filename = '' + if(modelType == 'classification'): + requiredalgo = 'classifierModelParams' + elif(modelType == 'regression'): + requiredalgo = 'regressorModelParams' + elif(modelType == 'TextClassification'): + requiredalgo = 'classifierModelParams' + elif(modelType == 'clustering'): + requiredalgo = 'clusteringModelParams' + elif(modelType == 'topicmodelling'): + requiredalgo = 'topicModellingParams' + learnerconfig = self.advance['mllearner_config'] + selectedMLModel = self.mlmodels + modelParams = [] + modelList=[] + if 'modelParams' in learnerconfig: + modelParams = learnerconfig['modelParams'] + if(str(type(modelParams)) != """"): + modelParams = [] + elif(len(modelParams) == 0): + modelParams = [] + if(len(modelParams) == 0): + if 'modelparamsfile' in learnerconfig: + if(learnerconfig['modelparamsfile'] != """"): + ml_algorithm_filename = learnerconfig['modelparamsfile'] + if(ml_algorithm_filename == ''): + ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/ML_Defaults.json' + modelParams = json.loads(open(ml_algorithm_filename).read()) + if requiredalgo in modelParams: + modelParams = modelParams[requiredalgo] + #modelParams = dict((k.lower(), v) for k, v in modelParams .items()) + #print(modelParams) + #modelList = list(modelParams.keys()) + #print(""SelectedModels"") + #self.log.info(selectedmodels) + #if selectedmodels in selectedMLModel: + if selectedMLModel != '': + modelList = selectedMLModel.split("","") + modelList = list(map(str.strip, modelList)) + for mod in modelList: + if mod not in modelParams: + self.log.info(""'""+mod+""' Not Available for Particular Problem Type"") + modelList.remove(mod) + else: + modelList = list(modelParams.keys()) + #modelList = selectedMLModel.split("","") + if(len(modelList) ==0): + modelList = list(modelParams.keys()) + return(modelParams,modelList) + except KeyError: + modelParams = [] + modelList=[] + return(modelParams,modelList) + + def getTargetFeatures(self): + return cs.getTargetFeatures(self) + + def getModelFeatures(self): + try: + if(self.basic['trainingFeatures']): + modFeatures = self.basic['trainingFeatures'] + modFeatures = modFeatures.split("","") + modFeatures = list(map(str.strip, modFeatures)) + modFeatures = "","".join([modf for modf in modFeatures]) + return(modFeatures) + else: + return('NA') + except KeyError: + return('NA') + + def getFolderSettings(self): + return cs.getFolderSettings(self) + + def getAIONLocationSettings(self): + self.iter_name = self.basic['modelName'] + self.iteration_version = self.basic['modelVersion'] + if(self.basic['dataLocation']): + dataLocation = self.basic['dataLocation'] + else: + dataLocation = 'NA' + if(self.basic['deployLocation']): + deployLocation = self.basic['deployLocation'] + else: + deployLocation = 'NA' + try: + if 'fileSettings' in self.basic: + csv_setting = self.basic['fileSettings'] + if 'delimiters' in csv_setting: + delimiter = csv_setting['delimiters'] + if delimiter.lower() == 'tab' or delimiter.lower() == '\\t': + delimiter = '\\t' + elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';': + delimiter = ';' + elif delimiter.lower() == 'comma' or delimiter.lower() == ',': + delimiter = ',' + " +" elif delimiter.lower() == 'space' or delimiter.lower() == ' ': + delimiter = ' ' + elif delimiter.lower() == 'other': + if 'other' in csv_setting: + delimiter = csv_setting['other'] + else: + delimiter = ',' + elif delimiter == '': + delimiter = ',' + else: + delimiter = ',' + if 'textqualifier' in csv_setting: + textqualifier = csv_setting['textqualifier'] + else: + textqualifier = '""' + else: + delimiter = ',' + textqualifier = '""' + except KeyError: + delimiter = ',' + textqualifier = '""' + return(self.iter_name,self.iteration_version,dataLocation,deployLocation,delimiter,textqualifier) + + def getFeatures(self): + try: + if(self.basic['dateTimeFeature']): + dtFeatures = self.basic['dateTimeFeature'] + dtFeatures = dtFeatures.split("","") + dtFeatures = list(map(str.strip, dtFeatures)) + dtFeatures = "","".join([dtf for dtf in dtFeatures]) + else: + dtFeatures = 'NA' + except KeyError: + dtFeatures = 'NA' + + try: + if(self.basic['indexFeature']): + iFeatures = self.basic['indexFeature'] + iFeatures = iFeatures.split("","") + iFeatures = list(map(str.strip, iFeatures)) + iFeatures = "","".join([dif for dif in iFeatures]) + else: + iFeatures = 'NA' + except KeyError: + iFeatures = 'NA' + + try: + if(self.basic['trainingFeatures']): + modFeatures = self.basic['trainingFeatures'] + modFeatures = modFeatures.split("","") + modFeatures = list(map(str.strip, modFeatures)) + modFeatures = "","".join([modf for modf in modFeatures]) + else: + modFeatures = 'NA' + except KeyError: + modFeatures = 'NA' + return(dtFeatures,iFeatures,modFeatures) + + def setModels(self): + return cs.setModels(self) + + def readConfigurationFile(self,path): + return cs.readConfigurationFile(self, path) + + def getFilterExpression(self): + return cs.getFilterExpression(self) + + def getSurvivalEventColumn(self): + return cs.getSurvivalEventColumn(self) + + def getSurvivalDurationColumn(self): + return cs.getSurvivalDurationColumn(self) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +'''# -*- coding: utf-8 -*- + +"""""" +@author: satish_k +"""""" + +import pandas as pd +import numpy as np +import matplotlib.pyplot as plt +import seaborn as sns +import statistics +from sklearn.impute import SimpleImputer +from sklearn.model_selection import train_test_split +from lifelines import KaplanMeierFitter, CoxPHFitter +from lifelines.statistics import logrank_test +from scipy import stats +import logging + + +class SurvivalAnalysis(object): + + def __init__(self, df, method, event_column, duration_column, fitter_param=None, df_negate=None ): + pd.options.display.width = 30 + self.df = df + self.fitter_param = fitter_param + self.method = method + self.event_column = event_column + self.duration_column = duration_column + self.models = [] + self.train = df.drop_duplicates().reset_index() + self.test = None + if isinstance(df_negate, pd.DataFrame): + self.df_n = df_negate.drop_duplicates().reset_index() + else: + self.df_n = None + self.log = logging.getLogger('eion') + self.plots = [] + + def learn(self): + self.log.info('\\n---------- SurvivalAnalysis learner has started ----------') + self.log.info('\\n---------- SurvivalAnalysis learner method is ""%s"" ----------'%self.method) + + lifelines_univariate_models = [""AalenJohansenFitter"", ""BreslowFlemingHarringtonFitter"", ""ExponentialFitter"", ""GeneralizedGammaFitter"", + ""KaplanMeierFitter"", ""LogLogisticFitter"", ""LogNormalFitter"", ""MixtureCureFitter"", ""NelsonAalenFitter"", ""PiecewiseExponentialFitter"", + ""SplineFitter"", ""WeibullFitter""] + + lifelines_regression_models = [""AalenAdditiveFitter"", ""CRCSplineFitter"", ""CoxPHFitter"", ""CoxTimeVaryingFitter"", ""GeneralizedGammaRegressionFitter"", + ""LogLogisticAFTFitter"", ""LogNormalAFTFitter"", ""PiecewiseExponentialRegressionFitter"", ""WeibullAFTFitter""] + + + if self.method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']: + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has started ----------'%self.method) + #from lifelines.utils import find_best_parametric_model + #m,s = find_best_parametric_model(event_times=self.df[self.duration_column]) + if not isinstance(self.df_n, pd.DataFrame): + kmf = KaplanMeierFitter() + self.log.info('\\n Shape of training data - %s'%str(self.train.shape)) + T = self.train[self.duration_column] + E = self.train[self.event_column] + self.log.info('\\n T : \\n%s'%str(T)) + self.log.info('\\n E : \\n%s'%str(E)) + K = kmf.fit(T, E) + ax = plt.subplot(111) + + kmf_sf = K.survival_function_ + ax = kmf_sf.plot(ax=ax) + + kmf_sf_json = self.survival_probability_to_json(kmf_sf) + self.models.append(K) + plt.title(""KM Survival Functions"") + self.plots.append(plt) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------'%self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + return kmf_sf_json + else: + kmf1 = KaplanMeierFitter() + kmf2 = KaplanMeierFitter() + T1 = self.train[self.duration_column] + E1 = self.train[self.event_column] + #self.df_n = self.df_n.drop('fin', axis=1) + T2 = self.df_n[self.duration_column] + E2 = self.df_n[self.event_column] + + ax = plt.subplot(111) + plt.title(""KM Survival Functions - Filter vs Negation"") + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for filter expression has started----------'%self.method) + kmf1.fit(T1, E1) + ax = kmf1.plot(ax=ax, label='%s'%self.fitter_param) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for filter expression has ended----------'%self.method) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for negation has started----------'%self.method) + kmf2.fit(T2, E2) + ax = kmf2.plot(ax=ax, label='~%s'%self.fitter_param) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for negation has ended----------'%self.method) + self.models.extend([kmf1,kmf2]) + + kmf1_sf = kmf1.survival_function_ + kmf2_sf = kmf2.survival_function_ + kmf1_sf_json = self.survival_probability_to_json(kmf1_sf) + self.plots.append(plt) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------'%self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + return kmf1_sf_json + + elif self.method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']: + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has started ----------'%self.method) + #from lifelines.utils import k_fold_cross_validation + if not isinstance(self.df_n, pd.DataFrame): + cph = CoxPHFitter() + C = cph.fit(self.train, self.duration_column, self.event_column, show_progress=True) + self.models.append(C) + cph_sf = C.baseline_survival_ + ax = plt.subplot(111) + ax = C.plot(ax=ax) + cph_sf_json = self.survival_probability_to_json(cph_sf) + self.log.info('\\n Summary : \\n%s'%str(C.summary)) + plt.title(""COX hazard ratio"") + self.plots.append(plt) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------'%self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + #plt.show() + return cph_sf_json + else: + cph1 = CoxPHFitter(penalizer=0.0001) + cph2 = CoxPHFitter(penalizer=0.0001) + ax = plt.subplot(211) + plt.title(""COX hazard ratio - [%s](Top) vs [~(%s)](Bottom)""%(self.fitter_param,self.fitter_param)) + #self.train = self.train.drop('fin',axis=1) + self.df_n = self.drop_constant_features(self.df_n) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for filter expression has started----------'%self.method) + cph1.fit(self.train, self.duration_column, self.event_column, show_progress=True, step_size=0.4) + ax = cph1.plot(ax=ax, label='%s'%self.fitter_param) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for filter expression has ended----------'%self.method) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for negation has started----------'%self.method) + cph2.fit(self.df_n, self.duration_column, self.event_column, show_progress=True, step_size=0.4) + ax = plt.subplot(212) + ax = cph2.plot(ax=ax, label='~%s'%self.fitter_param) + self.log.info('\\n---------- SurvivalAnalysis learner ""%s"" fitting for negation has ended----------'%self.method) + self.models.extend([cph1,cph2]) + + cph1_sf = cph1.baseline_survival_ + cph2_sf = cph2.baseline_survival_ + cph1_sf_json = self.survival_probability_to_json(cph1_sf) + #plt.show() + plt.tight_layout() + self.plots.append(plt) + self.log.info('\\n---------- SurvivalAnalysis learner method ""%s"" has ended ----------'%self.method) + self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------') + return cph1_sf_json + + def survival_probability_to_json(self, sf): + ''' + sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_ + returns json of survival probabilities + ''' + sf = sf[sf.columns[0]].apply(lambda x: ""%4.2f""%(x*100)) + self.log.info('\\n Survival probabilities : \\n%s'%str(sf)) + sf = sf.reset_index() + sf = sf.sort_values(sf.columns[0]) + sf_json = sf.to_json(orient='records') + self.log.info('\\n Survival prbability json : \\n%s'%str(sf_json)) + + return sf_json + + def drop_constant_features(self, df): + for col in df.columns: + if len(df[col].unique()) == 1: + df.drop(col,inplace=True,axis=1) + return df + + def predict(self): + if self.method == 'KaplanMeierFitter': + return self.model.predict(self.test[self.duration_column]) + #kmf.predict() + #kmf.median_survival_time_ + #from lifelines.utils import median_survival_times + #median_ci = median_survival_times(kmf.confidence_interval_) + + elif self.method == 'CoxPHFitter': + #print('train score',self.model.score(self.train)) + #print('test score',self.model.score(self.test)) + return self.model.predict_survival_function(self.test) + #cph.predict_cumulative" +"_hazard() + #cph.predict_expectation() + #cph.predict_log_partial_hazard() + #cph.predict_median() + #cph.predict_partial_hazard() + #cph.predict_percentile() + #cph.predict_survival_function() + #cph.predict_hazard() + #cph.score() + #cph.summary() + +#if __name__ == ""__main__"": +# data_file = r""C:\\Users\\satish_k\\Desktop\\Work\\input\\echocardiogram.csv"" +# #data_file = r""C:\\Users\\satish_k\\Desktop\\Work\\input\\lymphoma.csv"" +# method = ""CoxPHFitter"" +# event_column = ""alive"" +# duration_column = ""survival"" +# sa = SurvivalAnalysis(data_file, method, event_column, duration_column) +# sa.profiler() +# model = sa.learn() +# print(sa.predict()) + #print(model.survival_function_) + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import logging +logging.getLogger('tensorflow').disabled = True +import json +#from nltk.corpus import stopwords +from collections import Counter +from numpy import mean +from numpy import std +from pandas import read_csv +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import OneHotEncoder +from sklearn.compose import ColumnTransformer +from learner.machinelearning import machinelearning +# from sklearn.dummy import DummyClassifier +# create histograms of numeric input variables +import sys +import os +import re +import pandas as pd +import numpy as np +from learner.aion_matrix import aion_matrix +import tensorflow as tf +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +import autokeras as ak +# load the sonar dataset +from sklearn.model_selection import train_test_split +# from sklearn.metrics import cohen_kappa_score +# from sklearn.metrics import roc_auc_score +# from sklearn.metrics import confusion_matrix +from sklearn.metrics import roc_curve +from math import sqrt +from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error +from sklearn import metrics + + +class aionNAS: + def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation): + try: + self.dfFeatures=None + self.nas_class=nas_class + self.nas_params=nas_params + self.targetFeature=None + self.log = logging.getLogger('eion') + self.n_models=int(self.nas_params['n_models']) + self.n_epochs=int(self.nas_params['n_epochs']) + self.optimizer=self.nas_params['optimizer'] + self.metrics=self.nas_params['metrics'] + self.tuner=self.nas_params['tuner'] + self.seed=int(self.nas_params['seed']) + self.xtrain = xtrain1 + self.xtest = xtest1 + self.ytrain = ytrain1 + self.ytest = ytest1 + #self.labelMaps = labelMaps + self.deployLocation=deployLocation + + except Exception as e: + self.log.info(' ') + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def paramCheck(self): + try: + if not (self.nas_class): + self.log.info(' ') + if not (self.nas_params): + self.log.info(' ') + if not (self.targetFeature): + self.log.info(' ') + if (self.n_models < 1): + self.n_models=1 + if not (self.dfFeatures): + self.log.info(' ') + if (self.n_epochs < 1): + self.n_models=1 + if not (self.optimizer): + self.optimizer=""adam"" + if not (self.tuner): + self.tuner=""greedy"" + if (self.seed < 1): + self.seed=0 + if not (self.metrics): + self.metrics=None + except ValueError: + self.log.info('<------------------ NAS config file error. --------------->') + + + + + + def recall_m(self,y_true, y_pred): + true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) + possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1))) + recall = true_positives / (possible_positives + tf.keras.backend.epsilon()) + return recall + + def precision_m(self,y_true, y_pred): + true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) + predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1))) + precision = true_positives / (predicted_positives + tf.keras.backend.epsilon()) + return precision + + def f1_score(self,y_true, y_pred): + precision = self.precision_m(y_true, y_pred) + recall = self.recall_m(y_true, y_pred) + return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon())) + + def nasStructdataPreprocess(self): + df=self.data + self.paramCheck() + target=df[self.targetFeature].values + counter = Counter(target) + for k,v in counter.items(): + per = v / len(target) * 100 + self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per)) + # select columns with numerical data types + num_ix = df.select_dtypes(include=['int64', 'float64']).columns + subset = df[num_ix] + last_ix = len(df.columns) - 1 + y=df[self.targetFeature] + X = df.drop(self.targetFeature, axis=1) + #Using Pearson Correlation + # plt.figure(figsize=(12,10)) + # cor = df.corr() + # sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) + # plt.show() + # select categorical features + cat_ix = X.select_dtypes(include=['object', 'bool']).columns + # one hot encode cat features only + ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough') + X = X.reset_index() + X=X.replace(to_replace=""NULL"",value=0) + X = X.dropna(how='any',axis=0) + X = ct.fit_transform(X) + from sklearn.preprocessing import scale + X = scale(X) + # label encode the target variable to have the classes 0 and 1 + y = LabelEncoder().fit_transform(y) + # separate into train and test sets + X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1) + return X_train, X_test, y_train, y_test + + + def nasStructClassification(self,scoreParam): + try: + objClf = aion_matrix() + X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest + modelName=""nas_structdata_classifier"" + self.log.info(""Processing structured data block...\\n"") + s_in = ak.StructuredDataInput() + #s_in = Flatten()(s_in) + s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) + self.log.info(""Data pipe via autokeras Classification Dense layers ...\\n"") + s_out = ak.ClassificationHead()(s_out) + self.log.info(""applying autokeras automodel to run different neural models...\\n"") + try: + tuner = str(self.tuner).lower() + except UnicodeEncodeError: + tuner = (self.tuner.encode('utf8')).lower() + nasclf = ak.AutoModel( + inputs=s_in, + outputs=s_out, + overwrite=True, + tuner=tuner, + max_trials=self.n_models, + seed=self.seed) + # compile the model + #nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m]) + nasclf.fit(X_train, y_train, epochs=self.n_epochs) + best_model = nasclf.export_model() + mpredict=best_model.predict(X_test) + mtpredict=best_model.predict(X_train) + #loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0) + #from sklearn.metrics import classification_report + #Classification report + y_pred_bool = np.argmax(mpredict, axis=1) + y_train_pred_bool = np.argmax(mtpredict, axis=1) + score = objClf.get_score(scoreParam,y_test, y_pred_bool) + #best_model = nasclf.export_model() + best_model_summary=best_model.summary() + filename = os.path.join(self.deployLocation,'log','summary.txt') + with open(filename,'w') as f: + best_model.summary(print_fn=lambda x: f.write(x + '\\n')) + f.close() + #self.log.info(""=========="") + #self.log.info(best_model_summary) + self.log.info(""NAS struct data classification, best model summary: \\n""+str(best_model.summary(print_fn=self.log.info))) + #self.log.info(""=========="") + #Save and load model + # # #try: + # try: + # best_model.save(""model_class_autokeras"", save_format=""tf"") + # except Exception: + # best_model.save(""model_class_autokeras.h5"") + # loaded_model = load_model(""model_class_autokeras"", custom_objects=ak.CUSTOM_OBJECTS) + # loadedmodel_predict=loaded_model.predict(X_test) + loss,accuracy_m=nasclf.evaluate(X_test, y_test) + #mpredict_classes = mpredict.argmax(axis=-1) + #accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int)) + # precision tp / (tp + fp) + #precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro') + # recall: tp / (tp + fn) + #recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro') + #f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average=""macro"") + self.log.info(""Autokeras struct data classification metrics: \\n"") + except Exception as inst: + self.log.info(""Error: NAS failed ""+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(inst) + return modelName,nasclf,score + + def nasStructRegressor(self,scoreParam): + objClf = aion_matrix() + modelName=""nas_struct_regressor"" + #self.paramCheck() + X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest + # Autokeras alg + s_in = ak.StructuredDataInput() + #tf.keras.layers.GlobalMaxPooling2D()(s_in) + s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) + self.log.info(""Data pipe via autokeras Regression Dense layers ...\\n"") + s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out) + self.log.info(""applying autokeras automodel to evaluate different neural models...\\n"") + try: + tuner = str(self.tuner).lower() + except UnicodeEncodeError: + tuner = (self.tuner.encode('utf8')).lower() + nas_reg = ak.AutoModel( + inputs=s_in, + outputs=s_out, + overwrite=True, + tuner=tuner, + max_trials=self.n_models) + nas_reg.fit(X_train, y_train, epochs=self.n_epochs) + best_model = nas_reg.export_model() + self.log.info(""NAS struct data regression best model summary: \\n"") + best_model_summary=best_model.summary(print_fn=self.log.info) + self.log.info(best_model_summary) + predictm=best_model.predict(X_test) + mtpredict=best_" +"model.predict(X_train) + score = objClf.get_score(scoreParam,y_test, predictm) + self.log.info(""Autokeras struct data regression metrics: \\n"") + + return modelName,nas_reg,score + + def nasMain(self,scoreParam): + modelName = """" + nasclf=None + nas_reg=None + #text_reg_model=None + mse_value=0 + reg_rmse=0 + mape_reg=0 + huber_loss_reg=0 + accuracy=0 + precision=0 + recall=0 + #Dummy values to return main for classification problems + dummy_score_1=int(0) + #dummy_score_2=int(0) + try: + if ((self.nas_class.lower() == ""classification"")): + modelName,nasclf,score=self.nasStructClassification(scoreParam) + self.log.info('NAS Struct Classification score: '+str(score)) + best_model_nas = nasclf.export_model() + scoredetails = '{""Model"":""NAS"",""Score"":'+str(round(score,2))+'}' + return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1 + elif (self.nas_class.lower() == ""regression""): + modelName,nas_reg,score =self.nasStructRegressor(scoreParam) + self.log.info('NAS Struct Regression score: '+str(score)) + best_model_nas = nas_reg.export_model() + ''' + filename = os.path.join(self.deployLocation,'model','autoKerasModel') + best_model_nas = nas_reg.export_model() + try: + best_model_nas.save(filename, save_format=""tf"") + modelName = 'autoKerasModel' + except Exception: + filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5') + best_model_nas.save(filename) + modelName = 'autoKerasModel.h5' + ''' + scoredetails = '{""Model"":""NAS"",""Score"":'+str(round(score,2))+'}' + ''' + error_matrix = '""MSE"":""'+str(round(mse_value,2))+'"",""RMSE"":""'+str(round(reg_rmse,2))+'"",""MAPE"":""'+str(round(mape_reg,2))+'"",""MSLE"":""'+str(round(msle_reg,2))+'""' + ''' + return best_model_nas,self.nas_params,score,'NAS' + else: + pass + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + output = {""status"":""FAIL"",""message"":str(inst).strip('""')} + output = json.dumps(output) + import itertools +import logging +from typing import Optional, Dict, Union + +from nltk import sent_tokenize + +import torch +from transformers import( + AutoModelForSeq2SeqLM, + AutoTokenizer, + PreTrainedModel, + PreTrainedTokenizer, +) + +logger = logging.getLogger(__name__) + +class QGPipeline: + """"""Poor man's QG pipeline"""""" + def __init__( + self, + model: PreTrainedModel, + tokenizer: PreTrainedTokenizer, + ans_model: PreTrainedModel, + ans_tokenizer: PreTrainedTokenizer, + qg_format: str, + use_cuda: bool + ): + self.model = model + self.tokenizer = tokenizer + + self.ans_model = ans_model + self.ans_tokenizer = ans_tokenizer + + self.qg_format = qg_format + + self.device = ""cuda"" if torch.cuda.is_available() and use_cuda else ""cpu"" + self.model.to(self.device) + + if self.ans_model is not self.model: + self.ans_model.to(self.device) + + assert self.model.__class__.__name__ in [""T5ForConditionalGeneration"", ""BartForConditionalGeneration""] + + if ""T5ForConditionalGeneration"" in self.model.__class__.__name__: + self.model_type = ""t5"" + else: + self.model_type = ""bart"" + + def __call__(self, inputs: str): + inputs = "" "".join(inputs.split()) + sents, answers = self._extract_answers(inputs) + flat_answers = list(itertools.chain(*answers)) + + if len(flat_answers) == 0: + return [] + + if self.qg_format == ""prepend"": + qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers) + else: + qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers) + + qg_inputs = [example['source_text'] for example in qg_examples] + questions = self._generate_questions(qg_inputs) + output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)] + return output + + def _generate_questions(self, inputs): + inputs = self._tokenize(inputs, padding=True, truncation=True) + + outs = self.model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + max_length=32, + num_beams=4, + ) + + questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs] + return questions + + def _extract_answers(self, context): + sents, inputs = self._prepare_inputs_for_ans_extraction(context) + inputs = self._tokenize(inputs, padding=True, truncation=True) + + outs = self.ans_model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + max_length=32, + ) + + dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs] + answers = [item.split('') for item in dec] + answers = [i[:-1] for i in answers] + + return sents, answers + + def _tokenize(self, + inputs, + padding=True, + truncation=True, + add_special_tokens=True, + max_length=512 + ): + inputs = self.tokenizer.batch_encode_plus( + inputs, + max_length=max_length, + add_special_tokens=add_special_tokens, + truncation=truncation, + padding=""max_length"" if padding else False, + pad_to_max_length=padding, + return_tensors=""pt"" + ) + return inputs + + def _prepare_inputs_for_ans_extraction(self, text): + sents = sent_tokenize(text) + + inputs = [] + for i in range(len(sents)): + source_text = ""extract answers:"" + for j, sent in enumerate(sents): + if i == j: + sent = "" %s "" % sent + source_text = ""%s %s"" % (source_text, sent) + source_text = source_text.strip() + + if self.model_type == ""t5"": + source_text = source_text + "" "" + inputs.append(source_text) + + return sents, inputs + + def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers): + inputs = [] + for i, answer in enumerate(answers): + if len(answer) == 0: continue + for answer_text in answer: + sent = sents[i] + sents_copy = sents[:] + + answer_text = answer_text.strip() + + ans_start_idx = 0 + # ans_start_idx = sent.index(answer_text) + + # if answer_text in sent: + # ans_start_idx = sent.index(answer_text) + # else: + # continue + + sent = f""{sent[:ans_start_idx]} {answer_text} {sent[ans_start_idx + len(answer_text): ]}"" + sents_copy[i] = sent + + source_text = "" "".join(sents_copy) + source_text = f""generate question: {source_text}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + + inputs.append({""answer"": answer_text, ""source_text"": source_text}) + + return inputs + + def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers): + flat_answers = list(itertools.chain(*answers)) + examples = [] + for answer in flat_answers: + source_text = f""answer: {answer} context: {context}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + + examples.append({""answer"": answer, ""source_text"": source_text}) + return examples + + +class MultiTaskQAQGPipeline(QGPipeline): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def __call__(self, inputs: Union[Dict, str]): + if type(inputs) is str: + # do qg + return super().__call__(inputs) + else: + # do qa + return self._extract_answer(inputs[""question""], inputs[""context""]) + + def _prepare_inputs_for_qa(self, question, context): + source_text = f""question: {question} context: {context}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + return source_text + + def _extract_answer(self, question, context): + source_text = self._prepare_inputs_for_qa(question, context) + inputs = self._tokenize([source_text], padding=False) + + outs = self.model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + max_length=16, + ) + + answer = self.tokenizer.decode(outs[0], skip_special_tokens=True) + return answer + + +class E2EQGPipeline: + def __init__( + self, + model: PreTrainedModel, + tokenizer: PreTrainedTokenizer, + use_cuda: bool + ) : + + self.model = model + self.tokenizer = tokenizer + + self.device = ""cuda"" if torch.cuda.is_available() and use_cuda else ""cpu"" + self.model.to(self.device) + + assert self.model.__class__.__name__ in [""T5ForConditionalGeneration"", ""BartForConditionalGeneration""] + + if ""T5ForConditionalGeneration"" in self.model.__class__.__name__: + self.model_type = ""t5"" + else: + self.model_type = ""bart"" + + self.default_generate_kwargs = { + ""max_length"": 256, + ""num_beams"": 4, + ""length_penalty"": 1.5, + ""no_repeat_ngram_size"": 3, + ""early_stopping"": True, + } + + def __call__(self, context: str, **generate_kwargs): + inputs = self._prepare_inputs_for_e2e_qg(context) + + # TODO: when overrding default_generate_kwargs all other arguments need to be passsed + # find a better way to do this + if not generate_kwargs: + generate_kwargs = self.default_generate_kwargs + + input_length = inputs[""input_ids""].shape[-1] + + # max_length = generate_kwargs.get(""max_length"", 256) + # if input_length < max_length: + # logger.warning( + # ""Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)"".format( + # max_length, input_length + # ) + # ) + + outs = self.model.generate( + input_ids=inputs['input_ids'].to(self.device), + attention_mask=inputs['attention_mask'].to(self.device), + **generate_kwargs + ) + + prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True) + questions = prediction.split("""") + questions = [question.strip() for question in questions[:-1]] + return questions + + def _prepare_inputs_for_e2e_qg(self, context): + source_text = f""generate questions: {context}"" + if self.model_type == ""t5"": + source_text = source_text + "" "" + + inputs = self._tokenize([source_text], padding=False) + return inputs + + def _tokenize( + self, + inputs, + padding=True, + truncation=True, + add_special_tokens=True, + max_length=512 + ): + inputs = self.tokenizer.batch_encode_plus( + inputs, + max_length=max_length, + add_special_tokens=add_special_tokens, + truncation=truncation, + padding=""max_length"" if padding else False, + pad_to_max_length=padding, + return_tensors=""pt"" + ) + return inputs + + +SUPPORTED_TASKS = { + ""question-generation"": { + ""impl"": QGPipeline, + ""default"": { + ""model"": ""valhalla/t5-small-qg-hl"", + ""ans_model"": ""valhalla/t5-small-qa-qg-hl"", + } + }, + ""multitask-qa-qg"": { + ""impl"": MultiTaskQAQGPipeline, + ""default"": { + ""model"": ""valhalla/t5-small-qa-qg-hl"", + } + }, + ""e2e-qg"": { + ""impl"": E2EQGPipeline, + ""default"": { + ""model"": ""valhalla/t5-small-e2e-qg"", + } + } +} + +def pipeline( + task: str, + model: Optional = None, + tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, + qg_format: Optional[str] = ""highlight"", + ans_model: Optional = None, + ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, + use_cuda: Optional[bool] = True, + **kwargs, +): + # Retrieve the task + if task not in SUPPORT" +"ED_TASKS: + raise KeyError(""Unknown task {}, available tasks are {}"".format(task, list(SUPPORTED_TASKS.keys()))) + + targeted_task = SUPPORTED_TASKS[task] + task_class = targeted_task[""impl""] + + # Use default model/config/tokenizer for the task if no model is provided + if model is None: + model = targeted_task[""default""][""model""] + + # Try to infer tokenizer from model or config name (if provided as str) + if tokenizer is None: + if isinstance(model, str): + tokenizer = model + else: + # Impossible to guest what is the right tokenizer here + raise Exception( + ""Impossible to guess which tokenizer to use. "" + ""Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."" + ) + + # Instantiate tokenizer if needed + if isinstance(tokenizer, (str, tuple)): + if isinstance(tokenizer, tuple): + # For tuple we have (tokenizer name, {kwargs}) + tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) + else: + tokenizer = AutoTokenizer.from_pretrained(tokenizer) + + # Instantiate model if needed + if isinstance(model, str): + model = AutoModelForSeq2SeqLM.from_pretrained(model) + + if task == ""question-generation"": + if ans_model is None: + # load default ans model + ans_model = targeted_task[""default""][""ans_model""] + ans_tokenizer = AutoTokenizer.from_pretrained(ans_model) + ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) + else: + # Try to infer tokenizer from model or config name (if provided as str) + if ans_tokenizer is None: + if isinstance(ans_model, str): + ans_tokenizer = ans_model + else: + # Impossible to guest what is the right tokenizer here + raise Exception( + ""Impossible to guess which tokenizer to use. "" + ""Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."" + ) + + # Instantiate tokenizer if needed + if isinstance(ans_tokenizer, (str, tuple)): + if isinstance(ans_tokenizer, tuple): + # For tuple we have (tokenizer name, {kwargs}) + ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1]) + else: + ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer) + + if isinstance(ans_model, str): + ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) + + if task == ""e2e-qg"": + return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) + elif task == ""question-generation"": + return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda) + else: + return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import socket +import os +import rsa +from os.path import expanduser +from pathlib import Path +import requests +import platform +from appbe.dataPath import DATA_DIR +import socket +import getmac +import subprocess +import sys +import json +from datetime import datetime +import binascii +computername = socket.getfqdn() +global_key = ''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb +k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue +N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+ +NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit +NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b +fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB +-----END RSA PUBLIC KEY----- +''' + +quarter_key = ''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1 +08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh +NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ +WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9 +njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01 +gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB +-----END RSA PUBLIC KEY----- +''' + +halfYear_key=''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv +wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF +tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh +R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp +7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn +wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB +-----END RSA PUBLIC KEY----- +''' +oneYear_key=''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP +Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG +ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB +Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/ +fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0 +KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB +-----END RSA PUBLIC KEY----- +''' +full_key=''' +-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn +ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0 +elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN +4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+ +t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ +Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB +-----END RSA PUBLIC KEY----- +''' +def validate_key_Pair(privatepath,publickey): + with open(privatepath, 'rb') as privatefile: + keydata = privatefile.read() + privatefile.close() + try: + privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM') + data = 'Validate Global License' + signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1') + pubkey = rsa.PublicKey.load_pkcs1(publickey) + except: + return False + try: + rsa.verify(data.encode('utf-8'), signature, pubkey) + return True + except Exception as e: + return False + +def updateDRecord(licensepath): + domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic') + if(os.path.isfile(licensepath)): + with open(licensepath, 'rb') as f: + licensekey = f.read() + f.close() + with open(domain_license_path, 'wb') as f: + f.write(licensekey) + f.close() + if(validate_key_Pair(domain_license_path,global_key)): + return True,'Valid Domain License' + else: + return False,'Invalid Domain License' + else: + return False,'File Not Exists' + +def generateLicenseKey(userKey): + record = {'UserKey':userKey} + record = json.dumps(record) + status = 'Error' + url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license' + try: + response = requests.post(url, data=record,headers={""x-api-key"":""3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM"",""Content-Type"":""application/json"",}) + if response.status_code == 200: + outputStr=response.content + outputStr = outputStr.decode('utf-8','ignore') + outputStr = outputStr.strip() + license_dict = json.loads(str(outputStr)) + if license_dict['status'] == 'success': + status = 'Success' + licenseKey = license_dict['msg'] + else: + status = 'Error' + licenseKey = '' + else: + status = 'Error' + licenseKey = '' + except Exception as inst: + print(inst) + status = 'Error' + licenseKey = '' + msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''} + return msg +def updateRecord(licensepath): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + license_path = os.path.join(currentDirectory,'..','lic','license.lic') + if(os.path.isfile(licensepath)): + with open(licensepath, 'rb') as f: + licensekey = f.read() + f.close() + with open(license_path, 'wb') as f: + f.write(licensekey) + f.close() + status,msg = check_domain_license() + if status: + status,msg = getdaysfromstartdate() + if status: + status,msg = check_days_license(int(msg)) + return status,msg + else: + return False,'File Not Exists' + + +def check_domain_license(): + if 'CORP.HCL.IN' in computername: + return True,'HCL Domain' + else: + return True,'HCL Domain' + +def diff_month(d1, d2): + return (d1.year - d2.year) * 12 + d1.month - d2.month + + +def getdaysfromstartdate(): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt') + + if(os.path.isfile(startdatePath)): + with open(startdatePath, ""rb"") as fl: + encrypted_message = fl.read() + fl.close() + + privkey = '''-----BEGIN RSA PRIVATE KEY----- +MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1" +"Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+ +GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr +38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp +WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP +ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL +xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH +2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet +XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK +AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4 +Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY +vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs +dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl +MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+ +gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1 +uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw +3NVWCIalMia+aKnvRHqHy/cQfEo2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ +msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc +sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps +hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu +V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk +EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4 +UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy +Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo +LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu ++edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ= +-----END RSA PRIVATE KEY----- + ''' + privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') + decrypted_message = rsa.decrypt(encrypted_message, privkey) + decrypted_message = decrypted_message.decode() + import datetime + start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d') + + current_date = datetime.datetime.today().strftime('%Y-%m-%d') + current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d') + + Months = diff_month(current_date,start_time) + return True,Months + else: + return False,'Start Date Not Exists' +def check_days_license(months): + currentDirectory = os.path.dirname(os.path.abspath(__file__)) + license_path = os.path.join(currentDirectory,'..','lic','license.lic') + if(os.path.isfile(license_path)): + if(validate_key_Pair(license_path,full_key)): + return True,'Valid License' + elif(validate_key_Pair(license_path,oneYear_key)): + if months <= 12: + return True,'Valid License' + else: + return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' + elif(validate_key_Pair(license_path,halfYear_key)): + if months <= 6: + return True,'Valid License' + else: + return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' + elif(validate_key_Pair(license_path,quarter_key)): + if months <= 3: + return True,'Valid License' + else: + return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' + else: + return False,'Invalid License' + else: + return False,'License Not exists.Please contact ERS Research for renewal.' + +def checklicense(): + import binascii + license_path = os.path.join(DATA_DIR,'License','license.lic') + if(os.path.isfile(license_path)): + try: + with open(license_path, 'r') as privatefile: + license_key = privatefile.read() + privatefile.close() + encrypted_message = binascii.unhexlify(license_key.encode()) + privkey = '''-----BEGIN RSA PRIVATE KEY----- + MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY + m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3 + anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh + ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc + kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf + AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO + CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC + cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83 + rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa + A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7 + NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV + F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn + FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07 + AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh + vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p + aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k + 6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe + fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw + Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k + MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy + vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2 + 3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a + IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA + lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+ + 9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2 + -----END RSA PRIVATE KEY----- + ''' + privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') + decrypted_message = rsa.decrypt(encrypted_message, privkey) + msg = decrypted_message.decode().split('####') + product = msg[0] + computernameLicense = msg[1] + computername = socket.getfqdn() + licenseValid = False + if product.lower() == 'aion': + if computernameLicense == computername: + uuidlicense = msg[3] + uuid = guid() + if uuidlicense == uuid: + current_date = datetime.now() + license_expiry_date = msg[5] + license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S') + if current_date > license_expiry_date: + return False,'License Expire' + else: + return True,'' + return False,'License Error' + except Exception as e: + print(e) + return False,'License Error' + else: + return False,'Generate License' +def generate_record_key(product,version): + computername = socket.getfqdn() + macaddress = getmac.get_mac_address() + license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S') + try: + user = os.getlogin() + except: + user = 'NA' + uuid = guid() + msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date + pkeydata='''-----BEGIN RSA PUBLIC KEY----- +MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1 +kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq +ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME +oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG +T9omCH90Dkm1oMMQ0Y2JBLe" +"zgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN +RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB +-----END RSA PUBLIC KEY----- +''' + pubkey = rsa.PublicKey.load_pkcs1(pkeydata) + encrypted_message = rsa.encrypt(msg.encode(), pubkey) + encrypted_message = binascii.hexlify(encrypted_message).decode() + return(encrypted_message) + +def run(cmd): + try: + return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding=""utf-8"").stdout.strip() + except Exception as e: + print(e) + return None + +def guid(): + if sys.platform == 'darwin': + return run( + ""ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\\\\"" '/IOPlatformUUID/{print $(NF-1)}'"", + ) + + if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys': + return run('wmic csproduct get uuid').split('\\n')[2].strip() + + if sys.platform.startswith('linux'): + return run('cat /var/lib/dbus/machine-id') or \\ + run('cat /etc/machine-id') + + if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'): + return run('cat /etc/hostid') or \\ + run('kenv -q smbios.system.uuid') + + +def updateLicense(licensekey): + license_folder = os.path.join(DATA_DIR,'License') + license_folder = Path(license_folder) + license_folder.mkdir(parents=True, exist_ok=True) + license_file = license_folder/'license.lic' + with open(license_file, ""w"") as fl: + fl.write(licensekey) + fl.close() + + +def enterRecord(version): + validLicense,msg = checklicense() + if not validLicense: + key = generate_record_key('AION',version) + msg = {'status':msg,'key':key,'licenseKey':'','link':''} + return validLicense,msg + #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer +from http.server import BaseHTTPRequestHandler,HTTPServer +#from SocketServer import ThreadingMixIn +from socketserver import ThreadingMixIn +''' +from augustus.core.ModelLoader import ModelLoader +from augustus.strict import modelLoader +''' +import pandas as pd +import os,sys +from os.path import expanduser +import platform +import numpy as np +import configparser +import threading +import subprocess +import argparse +import re +import cgi +from datetime import datetime +import json +import sys +from datetime import datetime +user_records = {} +class LocalModelData(object): + models = {} + +class HTTPRequestHandler(BaseHTTPRequestHandler): + + def do_POST(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + model = self.path.split('/')[-2] + operation = self.path.split('/')[-1] + data = json.loads(data) + dataStr = json.dumps(data) + if operation.lower() == 'predict': + predict_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'AION','aion_predict.py') + outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resp = outputStr + elif operation.lower() == 'explain': + predict_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'AION','aion_xai.py') + outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + elif None != re.search('/AION/pattern_anomaly_predict', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + model = self.path.split('/')[-1] + data = self.rfile.read(length) + + data = json.loads(data) + anomaly = False + remarks = '' + clusterid = -1 + home = expanduser(""~"") + if platform.system() == 'Windows': + configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json') + filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') + clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv') + probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv') + else: + configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json') + filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') + clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv') + probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv') + + + dfclus = pd.read_csv(clusterfilename) + dfprod = pd.read_csv(probfilename) + f = open(configfilename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + activity = configSettingsJson['activity'] + sessionid = configSettingsJson['sessionid'] + f = open(filename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + groupswitching = configSettingsJson['groupswitching'] + page_threshold = configSettingsJson['transitionprobability'] + chain_count = configSettingsJson['transitionsequence'] + chain_probability = configSettingsJson['sequencethreshold'] + currentactivity = data[activity] + if bool(user_records): + sessionid = data[sessionid] + print(sessionid,user_records['SessionID']) + if sessionid != user_records['SessionID']: + user_records['SessionID'] = sessionid + prevactivity = '' + user_records['probarry'] = [] + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + else: + prevactivity = user_records['Activity'] + user_records['Activity'] = currentactivity + pageswitch = True + if prevactivity == currentactivity or prevactivity == '': + probability = 0 + pageswitch = False + remarks = '' + else: + user_records['pageclicks'] += 1 + df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] + if df1.empty: + remarks = 'Anomaly Detected - User in unusual state' + anomaly = True + clusterid = -1 + probability = 0 + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + avg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + else: + probability = df1['Probability'].iloc[0] + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + davg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + remarks = '' + if user_records['prevclusterid'] != -1: + if probability == 0 and user_records['prevclusterid'] != clusterid: + user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 + if user_records['pageclicks'] == 1: + remarks = 'Anomaly Detected - Frequent Cluster Hopping' + anomaly = True + else: + remarks = 'Cluster Hopping Detected' + user_records['pageclicks'] = 0 + if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: + remarks = 'Anomaly Detected - Multiple Cluster Hopping' + anomaly = True + elif probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + #print(pageswitch) + #print(probability) + if pageswitch == True: + if probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + remarks = '' + if davg < float(chain_probability): + if anomaly == False: + remarks = 'Anomaly Detected - In-frequent Pattern Detected' + anomaly = True + else: + user_records['SessionID'] = data[sessionid] + user_records['Activity'] = data[activity] + user_records['probability'] = 0 + user_records['probarry'] = [] + user_records['chainprobability'] = 0 + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + user_records['prevclusterid'] = clusterid + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""status"":""SUCCESS"",""data"":{""Anomaly"":""'+str(anomaly)+'"",""Remarks"":""'+str(remarks)+'""}}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + else: + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp =" +"'{""Anomaly"":""Error"",""Remarks"":""'+str(Int)+'""}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + elif None != re.search('/AION/pattern_anomaly_settings', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + #print(data) + #keyList = list(data.keys()) + #print(keyList[0]) + model = self.path.split('/')[-1] + #print(model) + data = json.loads(data) + #dataStr = json.dumps(data) + groupswitching = data['groupswitching'] + transitionprobability = data['transitionprobability'] + transitionsequence = data['transitionsequence'] + sequencethreshold = data['sequencethreshold'] + home = expanduser(""~"") + if platform.system() == 'Windows': + filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') + else: + filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') + + + #print(filename) + data = {} + data['groupswitching'] = groupswitching + data['transitionprobability'] = transitionprobability + data['transitionsequence'] = transitionsequence + data['sequencethreshold'] = sequencethreshold + updatedConfig = json.dumps(data) + with open(filename, ""w"") as fpWrite: + fpWrite.write(updatedConfig) + fpWrite.close() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""Status"":""SUCCESS""}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + else: + print(""python ==> else1"") + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + print(""PYTHON ######## REQUEST ####### ENDED"") + return + + def do_GET(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/predict', self.path): + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + model = self.path.split('/')[-1] + display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json') + displaymsg = 'Data in JSON Format' + if(os.path.isfile(display_path)): + with open(display_path) as file: + config = json.load(file) + file.close() + features = config['modelFeatures'] + datajson={} + for feature in features: + datajson[feature] = 'Value' + displaymsg = json.dumps(datajson) + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + """""".format(url=self.path,displaymsg=displaymsg) + self.wfile.write(msg.encode()) + else: + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + return + +class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): + allow_reuse_address = True + + def shutdown(self): + self.socket.close() + HTTPServer.shutdown(self) + +class SimpleHttpServer(): + def __init__(self, ip, port): + self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) + + def start(self): + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.daemon = True + self.server_thread.start() + + def waitForThread(self): + self.server_thread.join() + + def stop(self): + self.server.shutdown() + self.waitForThread() + +if __name__=='__main__': + parser = argparse.ArgumentParser(description='HTTP Server') + parser.add_argument('port', type=int, help='Listening port for HTTP Server') + parser.add_argument('ip', help='HTTP Server IP') + args = parser.parse_args() + + server = SimpleHttpServer(args.ip, args.port) + #delete file + #create file + #write just msg as ""started"" + print('HTTP Server Running...........') + #file close + server.start() + server.waitForThread() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer +from http.server import BaseHTTPRequestHandler,HTTPServer +#from SocketServer import ThreadingMixIn +from socketserver import ThreadingMixIn +''' +from augustus.core.ModelLoader import ModelLoader +from augustus.strict import modelLoader +''' +import pandas as pd +from datetime import datetime +import os,sys +from os.path import expanduser +import platform +import numpy as np +import configparser +import threading +import subprocess +import argparse +import re +import cgi +import time +from datetime import datetime +import json +import sys +from datetime import datetime +import sqlite3 +from os.path import expanduser +from pathlib import Path +from io import BytesIO +DEPLOY_DATABASE_PATH = os.path.join(os.path.join(os.path.dirname(__file__)),'database') +targetPath = Path(DEPLOY_DATABASE_PATH) +targetPath.mkdir(parents=True, exist_ok=True) +modelVersion = 'run_1' +version = 1 +class sqlite_db(): + + def __init__(self, location, database_file=None): + if not isinstance(location, Path): + location = Path(location) + if database_file: + self.database_name = database_file + else: + self.database_name = location.stem + '.db' + db_file = str(location/self.database_name) + self.conn = sqlite3.connect(db_file) + self.cursor = self.conn.cursor() + self.tables = [] + + def table_exists(self, name): + if name in self.tables: + return True + elif name: + query = f""SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"" + listOfTables = self.cursor.execute(query).fetchall() + if len(listOfTables) > 0 : + self.tables.append(name) + return True + return False + + def read(self, table_name,condition=''): + if condition == '': + return pd.read_sql_query(f""SELECT * FROM {table_name}"", self.conn) + else: + return pd.read_sql_query(f""SELECT * FROM {table_name} WHERE {condition}"", self.conn) + + def create_table(self,name, columns, dtypes): + query = f'CREATE TABLE IF NOT EXISTS {name} (' + + for column, data_type in zip(columns, dtypes): + query += f""'{column}' TEXT,"" + query = query[:-1] + query += ');' + self.conn.execute(query) + return True + def update(self,table_name,updates,condition): + update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' + self.cursor.execute(update_query) + self.conn.commit() + return True + def write(self,data, table_name): + if not self.table_exists(table_name): + self.create_table(table_name, data.columns, data.dtypes) + tuple_data = list(data.itertuples(index=False, name=None)) + insert_query = f'INSERT INTO {table_name} VALUES(' + for i in range(len(data.columns)): + insert_query += '?,' + insert_query = insert_query[:-1] + ')' + self.cursor.executemany(insert_query, tuple_data) + self.conn.commit() + return True + + def delete(self, name): + pass + + def close(self): + self.conn.close() + +user_records = {} +class LocalModelData(object): + models = {} + +class HTTPRequestHandler(BaseHTTPRequestHandler): + + def do_POST(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + #data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) + data = self.rfile.read(length) + model = self.path.split('/')[-2] + operation = self.path.split('/')[-1] + #data = json.loads(data) + #dataStr = json.dumps(data) + home = expanduser(""~"") + dataStr = data + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + model_path = os.path.join(os.path.dirname(__file__),modelVersion) + DATA_FILE_PATH = os.path.join(os.path.dirname(__file__),'temp') + Path(DATA_FILE_PATH).mkdir(parents=True, exist_ok=True) + isdir = os.path.isdir(model_path) + if isdir: + if operation.lower() == 'predict': + if not sqlite_dbObj.table_exists('servingDetails'): + data = {'usecase':model,'noOfPredictCalls':0,'noOfDriftCalls':0} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('servingDetails',data.columns, data.dtypes) + df2 = pd.read_json(BytesIO(dataStr), orient ='records') + if not sqlite_dbObj.table_exists('prodData'): + sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) + sqlite_dbObj.write(df2,'prodData') + data = sqlite_dbObj.read('servingDetails',""usecase = '""+model+""'"") + if len(data) == 0: + data = {'usecase':model,'noOfPredictCalls':1,'noOfDriftCalls':0} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.write(data,'servingDetails') + else: + noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 + sqlite_dbObj.update('servingDetails',""noOfPredictCalls = '""+str(noofPredictCalls)+""'"",""usecase = '""+model+""'"") + predict_path = os.path.join(model_path,'aion_predict.py') + outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resp = outputStr + elif operation.lower() == 'monitoring': + if not sqlite_dbObj.table_exists('monitoring'): + data = {'usecase':model,'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) + trainingDataPath = os.path.join(os.path.dirname(__file__),modelVersion,'data','preprocesseddata.csv') + data = sqlite_dbObj.read('prodData') + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + data.to_csv(dataFile, index=False) + predict_path = os.path.join(model_path,'aion_ipdrift.py') + inputJSON={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} + outputStr = subprocess.check_output([sys.executable,predict_path,json.dumps(inputJSON)]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + outputData = json.loads(outputStr) + status = outputData['status'] + if status == 'SUCCESS': + Msg = str(outputData['data']) + else: + Msg = 'Error during drift analysis' + " +" now = datetime.now() # current date and time + date_time = now.strftime(""%m/%d/%Y, %H:%M:%S"") + data = {'usecase':model,'status':status,'Msg':Msg,'RecordTime':date_time,'version':version} + data = pd.DataFrame(data, index=[0]) + sqlite_dbObj.write(data,'monitoring') + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + elif None != re.search('/AION/pattern_anomaly_predict', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + model = self.path.split('/')[-1] + data = self.rfile.read(length) + + data = json.loads(data) + anomaly = False + remarks = '' + clusterid = -1 + home = expanduser(""~"") + if platform.system() == 'Windows': + configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json') + filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') + clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv') + probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv') + else: + configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json') + filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') + clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv') + probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv') + + + dfclus = pd.read_csv(clusterfilename) + dfprod = pd.read_csv(probfilename) + f = open(configfilename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + activity = configSettingsJson['activity'] + sessionid = configSettingsJson['sessionid'] + f = open(filename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + groupswitching = configSettingsJson['groupswitching'] + page_threshold = configSettingsJson['transitionprobability'] + chain_count = configSettingsJson['transitionsequence'] + chain_probability = configSettingsJson['sequencethreshold'] + currentactivity = data[activity] + if bool(user_records): + sessionid = data[sessionid] + print(sessionid,user_records['SessionID']) + if sessionid != user_records['SessionID']: + user_records['SessionID'] = sessionid + prevactivity = '' + user_records['probarry'] = [] + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + else: + prevactivity = user_records['Activity'] + user_records['Activity'] = currentactivity + pageswitch = True + if prevactivity == currentactivity or prevactivity == '': + probability = 0 + pageswitch = False + remarks = '' + else: + user_records['pageclicks'] += 1 + df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] + if df1.empty: + remarks = 'Anomaly Detected - User in unusual state' + anomaly = True + clusterid = -1 + probability = 0 + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + avg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + else: + probability = df1['Probability'].iloc[0] + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + davg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + remarks = '' + if user_records['prevclusterid'] != -1: + if probability == 0 and user_records['prevclusterid'] != clusterid: + user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 + if user_records['pageclicks'] == 1: + remarks = 'Anomaly Detected - Frequent Cluster Hopping' + anomaly = True + else: + remarks = 'Cluster Hopping Detected' + user_records['pageclicks'] = 0 + if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: + remarks = 'Anomaly Detected - Multiple Cluster Hopping' + anomaly = True + elif probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + #print(pageswitch) + #print(probability) + if pageswitch == True: + if probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + remarks = '' + if davg < float(chain_probability): + if anomaly == False: + remarks = 'Anomaly Detected - In-frequent Pattern Detected' + anomaly = True + else: + user_records['SessionID'] = data[sessionid] + user_records['Activity'] = data[activity] + user_records['probability'] = 0 + user_records['probarry'] = [] + user_records['chainprobability'] = 0 + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + user_records['prevclusterid'] = clusterid + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""status"":""SUCCESS"",""data"":{""Anomaly"":""'+str(anomaly)+'"",""Remarks"":""'+str(remarks)+'""}}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + else: + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""Anomaly"":""Error"",""Remarks"":""'+str(Int)+'""}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + elif None != re.search('/AION/pattern_anomaly_settings', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + #print(data) + #keyList = list(data.keys()) + #print(keyList[0]) + model = self.path.split('/')[-1] + #print(model) + data = json.loads(data) + #dataStr = json.dumps(data) + groupswitching = data['groupswitching'] + transitionprobability = data['transitionprobability'] + transitionsequence = data['transitionsequence'] + sequencethreshold = data['sequencethreshold'] + home = expanduser(""~"") + if platform.system() == 'Windows': + filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') + else: + filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') + + + #print(filename) + data = {} + data['groupswitching'] = groupswitching + data['transitionprobability'] = transitionprobability + data['transitionsequence'] = transitionsequence + data['sequencethreshold'] = sequencethreshold + updatedConfig = json.dumps(data) + with open(filename, ""w"") as fpWrite: + fpWrite.write(updatedConfig) + fpWrite.close() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""Status"":""SUCCESS""}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + else: + print(""python ==> else1"") + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + print(""PYTHON ######## REQUEST ####### ENDED"") + return + + def do_GET(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): + usecase = self.path.split('/')[-2] + operation = self.path.split('/')[-1] + if operation.lower() == 'metrices': + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + if sqlite_dbObj.table_exists('servingDetails'): + df1 = sqlite_dbObj.read('servingDetails',""usecase = '""+usecase+""'"") + else: + df1 = pd.DataFrame() + if sqlite_dbObj.table_exists('monitoring'): + df2 = sqlite_dbObj.read('monitoring') + else: + df2 = pd.DataFrame() + if sqlite_dbObj.table_exists('modeldetails'): + df3 = sqlite_dbObj.read('modeldetails') + else: + df3 = pd.DataFrame() + msg='\\n' + msg+='\\n' + msg+='Model Metrices\\n' + msg+='\\n' + msg+="""""""""""" + msg+='\\n' + msg+='

Model Metrices - Deployed Version '+str(version)+'

' + msg+='
\\n' + msg+='\\n' + msg+="""""" + + + + +\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += '
ModelVersionScoreTypeScore
""+str(df3.usecase[idx])+""""+str(df3.version[idx])+""""+str(df3.scoreType[idx])+""""+str(df3.score[idx])+""
\\n' + msg += '
\\n' + msg += '
\\n' + msg+='\\n' + msg+='\\n' + msg+='\\n' + if df1.shape[0] > 0: + msg+='\\n' + else: + msg+='\\n' + msg+='\\n' + msg+='\\n' + msg+='\\n' + msg+='\\n' + msg+='\\n' + msg += '
No of Predictions'+str(df1['noOfPredictCalls'].iloc[0])+'0
No of Ground Truth0
\\n' + msg += '
\\n' + msg+='\\n' + msg+="""""" + + + + + + +"""""" + for idx in reversed(df2.index): + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += ""\\n"" + msg += '
UseCaseVersionStatusMessageTime
""+str(df2.usecase[idx])+""""+str(df3.version[idx])+""""+str(df2.status[idx])+""""+str(df2.Msg[idx])+""""+str(df2.RecordTime[idx])+""
\\n' + + msg += '\\n' + msg += '\\n' + self.send_response(200) + self.send_response(200) + self.send_header('Content-Type', 'text/html') + self.end_headers() + self.wfile.write(msg.encode()) + else: + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + return + +class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): + allow_reuse_address = True + + def shutdown(self): + self.socket.close() + HTTPServer.shutdown(self) + +class file_status(): + + def __init__(self,file): + self.files_status = {} + self.initializeFileStatus(file) + + def initializeFileStatus(self, file): + self.files_status = {'path': file, 'time':file.stat().st_mtime} + + def is_file_changed(self): + if self.files_status['path'].stat().st_mtime > self.files_status['time']: + self.files_status['time'] = self.files_status['path'].stat().st_mtime + return True + return False + + def run(self): + while( True): + time.sleep(30) + if self.is_file_changed(): + readRun() + +class SimpleHttpServer(): + def __init__(self, ip, port,model_file_path): + self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) + self.status_checker = file_status(model_file_path) + + def start(self): + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.daemon = True + self.server_thread.start() + self.status_thread = threading.Thread(target=self.status_checker.run) + self.status_thread.start() + + def waitForThread(self): + self.server_thread.join() + + def stop(self): + self.server.shutdown() + self.waitForThread() + +def readRun(boot=False): + filename = os.path.join(os.path.join(os.path.dirname(__file__)),'run') + f = open (filename, ""r"") + data = json.loads(f.read()) + global modelVersion + global version + modelVersion = 'run_'+str(data['version']) + version = str(data['version']) + sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') + dataa = {'usecase':data['usecase'],'version':data['version'],'scoreType':data['scoreType'],'score':data['score']} + data = pd.DataFrame(dataa, index=[0]) + if not sqlite_dbObj.table_exists('modeldetails'): + sqlite_dbObj.create_table('modeldetails',data.columns, data.dtypes) + rdata = sqlite_dbObj.read('modeldetails',""version = '""+dataa['version']+""'"") + if (rdata.shape[0]) == 0 or (not boot): + sqlite_dbObj.write(data,'modeldetails') +readRun(boot=True) + +if __name__=='__main__': + filename = os.path.join(os.path.join(os.path.dirname(__file__)),'run') + parser = argparse.ArgumentParser(description='HTTP Server') + parser.add_argument('port', type=int, help='Listening port for HTTP Server') + parser.add_argument('ip', help='HTTP Server IP') + args = parser.parse_args() + + server = SimpleHttpServer(args.ip, args.port,Path(filename)) + #delete file + #create file + #write just msg as ""started"" + print('HTTP Server Running...........') + #file close + server.start() + server.waitForThread() + import sys +import os +import pickle +import json +import traceback +import warnings +warnings.filterwarnings(""ignore"") +import numpy as np +import pandas as pd +import dask.dataframe as dd +import scipy +from pandas import json_normalize +import dask.distributed +from dask_ml.wrappers import ParallelPostFit + +class incBatchPredictor(): + + def __init__(self): + self.home = os.path.dirname(os.path.abspath(__file__)) + self.configPath = os.path.join(self.home, 'etc', 'config.json') + self.configDict = {} + self.incProfilerPath = '' + self.incSelectorPath = '' + self.modelPath = '' + self.incProfiler = None + self.incSelector = None + self.model = None + self.targetFeature = None + self.trainingFeatures = None + self.modelName = '' + self.problemType = '' + self.algorithm = '' + self.dataLocation = """" + self.nworkers = None + self.npartitions = None + self.threadsPerWorker = None + + def get_nworkers(self): + return self.nworkers + def get_npartitions(self): + return self.npartitions + def get_threadsPerWorker(self): + return self.threadsPerWorker + + def readData(self,data): + try: + if os.path.splitext(data)[1] in ["".tsv"","".csv"","".data""]: + df = dd.read_csv(data, # sep=r'\\s*,\\s*', + assume_missing=True, + parse_dates=True, infer_datetime_format=True, + sample=1000000, + # dtype={'caliper': 'object', + # 'timestamp': 'object'}, + # dtype='object', + na_values=['-','?'] + ) + df = df.repartition(self.npartitions) + else: + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + for key in jsonData: + if type(jsonData[key]) == str: + try: + x = eval(jsonData[key]) + if type(x) == int: + jsonData[key] = int(jsonData[key]) + print(""check inside ==int"") + if type(x) == float: + jsonData[key] = float(jsonData[key]) + except: + pass + + df = json_normalize(jsonData) + df = df.replace(r'^\\s*$', np.NaN, regex=True) + df = dd.from_pandas(df, npartitions=self.npartitions) + except KeyError as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(json.dumps(output)) + except Exception as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(json.dumps(output)) + return df + + def readConfig(self): + with open(self.configPath, 'r', encoding= 'utf8') as f: + self.configDict = json.load(f) + self.targetFeature = self.configDict['targetFeature'] + self.trainingFeatures = self.configDict['trainingFeatures'] + self.modelName = self.configDict[""modelName""] + self.modelVersion = self.configDict[""modelVersion""] + self.dataLocation = self.configDict[""dataLocation""] + self.deployLocation = self.configDict[""deployLocation""] + self.incProfilerPath = self.configDict[""profilerLocation""] + self.incSelectorPath = self.configDict[""selectorLocation""] + self.problemType = self.configDict['analysisType'] + self.algorithm = self.configDict[""algorithm""] + self.modelPath = self.configDict[""modelLocation""] + self.scoringCriteria = self.configDict['scoringCriteria'] + self.nworkers = int(self.configDict[""n_workers""]) + self.npartitions = int(self.configDict[""npartitions""]) + self.threadsPerWorker = int(self.configDict[""threads_per_worker""]) + + def pickleLoad(self, file): + if os.path.exists(file): + with open(file, 'rb') as f: + model = pickle.load(f) + return model + else: + return None + + def loadSavedModels(self): + self.incProfiler = self.pickleLoad(os.path.join(self.home, 'model',self.incProfilerPath)) + if self.incSelectorPath != '': + self.incSelector = self.pickleLoad(os.path.join(self.home, 'model',self.incSelectorPath)) + self.model = self.pickleLoad(os.path.join(self.home, 'model',self.modelPath)) + + def dataFramePreProcess(self, df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + df = df.replace('-', np.nan) + df = df.replace('?', np.nan) + return df + + def profiler(self, df): + X = self.dataFramePreProcess(df) + if 'self.targetFeature' in X: + X = X.drop(self.targetFeature, axis=1) + X = self.incProfiler.transform(X) + if self.incSelectorPath != '': + X = self.incSelector.transform(X.to_dask_array(lengths=True)) + # X = dd.from_dask_array(X) + return X + + def trainedModel(self,X): + ParallelPostFit(estimator=self.model) + # preds = self.model.predict(X) + if self.algorithm==""Distributed Light Gradient Boosting (LightGBM)"": + X = X.to_dask_array(lengths=True) + preds = self.model.predict(X).compute() + return preds + + def apply_output_format(self,df,modeloutput): + label_maping = None + if self.problemType.lower() == 'regression': + if not isinstance(modeloutput, np.ndarray): + modeloutput = modeloutput.to_numpy() + dask_arr = dd.from_array(modeloutput) + dask_arr.name = 'prediction' + df = df.merge(dask_arr.to_frame()) + df['rounded_prediction'] = df['prediction'].round(2) + + elif self.problemType.lower() == 'classification': + print(""type: "", type(modeloutput)) + if not isinstance(modeloutput, np.ndarray): + modeloutput = modeloutput.to_numpy() + dask_arr = dd.from_array(modeloutput) + dask_arr.name = ""prediction"" + df = df.merge(dask_arr.to_frame()) + with open(self.deployLocation + ""/etc/"" + ""label_mapping.json"") as jsonfile: + label_maping = json.load(jsonfile) + df[""prediction""] = df[""" +"prediction""].astype(int) + df[""prediction""] = df[""prediction""].astype(str) + df[""prediction_label""] = df[""prediction""].map(label_maping) + if df[""prediction_label""].dtype == None: + df[""prediction_label""] = df[""prediction""] + + outputjson = df.compute().to_json(orient='records') + outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + + def predict(self,data): + try: + self.readConfig() + df = self.readData(data) + dfOrg = df.copy() + + if len(self.configDict)!=0: + self.loadSavedModels() + df = self.profiler(df) + modeloutput = self.trainedModel(df) + # dfOrg = dfOrg[self.allFtrs] + output = self.apply_output_format(dfOrg, modeloutput) + else: + pass + except Exception as e: + print(traceback.format_exc()) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + return output + +if __name__ == ""__main__"": + + incBPobj = incBatchPredictor() + incBPobj.readConfig() + nWorkers = incBPobj.get_nworkers() + threads_per_worker = incBPobj.get_threadsPerWorker() + cluster = dask.distributed.LocalCluster(n_workers=nWorkers, + threads_per_worker=threads_per_worker) + client = dask.distributed.Client(cluster) + output = incBPobj.predict(sys.argv[1]) + print(""predictions:"",output) + client.close() + cluster.close() ''' +from AION_185 import aion_prediction +from AION_185 import featureslist +from AION_185 import aion_drift +from AION_185 import aion_performance +''' +#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer +from http.server import BaseHTTPRequestHandler,HTTPServer +#from SocketServer import ThreadingMixIn +from socketserver import ThreadingMixIn +''' +from augustus.core.ModelLoader import ModelLoader +from augustus.strict import modelLoader +''' +import pandas as pd +import os,sys +from os.path import expanduser +import platform +import numpy as np +import configparser +import threading +import subprocess +import argparse +import re +import cgi +from datetime import datetime +import json +import sys +from datetime import datetime +user_records = {} +class LocalModelData(object): + models = {} + +class HTTPRequestHandler(BaseHTTPRequestHandler): + + def do_POST(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/predict', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + #data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) + data = self.rfile.read(length) + data = json.loads(data) + dataStr = json.dumps(data) + outputStr = aion_prediction.predict(dataStr) + outputStr = outputStr.strip() + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + elif None != re.search('/AION/features', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + outputStr = featureslist.getfeatures() + outputStr = outputStr.strip() + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + elif None != re.search('/AION/monitoring', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + model = self.path.split('/')[-1] + data = json.loads(data) + dataStr = json.dumps(data) + outputStr = aion_drift.drift(dataStr) + outputStr = outputStr.strip() + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + elif None != re.search('/AION/performance', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + data = json.loads(data) + dataStr = json.dumps(data) + outputStr = aion_performance.drift(dataStr) + outputStr = outputStr.strip() + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + elif None != re.search('/AION/pattern_anomaly_predict', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + model = self.path.split('/')[-1] + data = self.rfile.read(length) + + data = json.loads(data) + anomaly = False + remarks = '' + clusterid = -1 + home = expanduser(""~"") + if platform.system() == 'Windows': + configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json') + filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') + clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv') + probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv') + else: + configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json') + filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') + clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv') + probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv') + + + dfclus = pd.read_csv(clusterfilename) + dfprod = pd.read_csv(probfilename) + f = open(configfilename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + activity = configSettingsJson['activity'] + sessionid = configSettingsJson['sessionid'] + f = open(filename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + groupswitching = configSettingsJson['groupswitching'] + page_threshold = configSettingsJson['transitionprobability'] + chain_count = configSettingsJson['transitionsequence'] + chain_probability = configSettingsJson['sequencethreshold'] + currentactivity = data[activity] + if bool(user_records): + sessionid = data[sessionid] + print(sessionid,user_records['SessionID']) + if sessionid != user_records['SessionID']: + user_records['SessionID'] = sessionid + prevactivity = '' + user_records['probarry'] = [] + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + else: + prevactivity = user_records['Activity'] + user_records['Activity'] = currentactivity + pageswitch = True + if prevactivity == currentactivity or prevactivity == '': + probability = 0 + pageswitch = False + remarks = '' + else: + user_records['pageclicks'] += 1 + df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] + if df1.empty: + remarks = 'Anomaly Detected - User in unusual state' + anomaly = True + clusterid = -1 + probability = 0 + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + avg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + else: + probability = df1['Probability'].iloc[0] + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + davg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + remarks = '' + if user_records['prevclusterid'] != -1: + if probability == 0 and user_records['prevclusterid'] != clusterid: + user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 + if user_records['pageclicks'] == 1: + remarks = 'Anomaly Detected - Frequent Cluster Hopping' + anomaly = True + else: + remarks = 'Cluster Hopping Detected' + user_records['pageclicks'] = 0 + if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: + remarks = 'Anomaly Detected - Multiple Cluster Hopping' + anomaly = True + elif probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + #print(pageswitch) + #print(probability) + if pageswitch == True: + if probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + remarks = '' + if davg < float(chain_probability): + if anomaly == False: + remarks = 'An" +"omaly Detected - In-frequent Pattern Detected' + anomaly = True + else: + user_records['SessionID'] = data[sessionid] + user_records['Activity'] = data[activity] + user_records['probability'] = 0 + user_records['probarry'] = [] + user_records['chainprobability'] = 0 + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + user_records['prevclusterid'] = clusterid + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""status"":""SUCCESS"",""data"":{""Anomaly"":""'+str(anomaly)+'"",""Remarks"":""'+str(remarks)+'""}}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + else: + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""Anomaly"":""Error"",""Remarks"":""'+str(Int)+'""}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + elif None != re.search('/AION/pattern_anomaly_settings', self.path): + ctype, pdict = cgi.parse_header(self.headers.get('content-type')) + if ctype == 'application/json': + length = int(self.headers.get('content-length')) + data = self.rfile.read(length) + #print(data) + #keyList = list(data.keys()) + #print(keyList[0]) + model = self.path.split('/')[-1] + #print(model) + data = json.loads(data) + #dataStr = json.dumps(data) + groupswitching = data['groupswitching'] + transitionprobability = data['transitionprobability'] + transitionsequence = data['transitionsequence'] + sequencethreshold = data['sequencethreshold'] + home = expanduser(""~"") + if platform.system() == 'Windows': + filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') + else: + filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') + + + #print(filename) + data = {} + data['groupswitching'] = groupswitching + data['transitionprobability'] = transitionprobability + data['transitionsequence'] = transitionsequence + data['sequencethreshold'] = sequencethreshold + updatedConfig = json.dumps(data) + with open(filename, ""w"") as fpWrite: + fpWrite.write(updatedConfig) + fpWrite.close() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + resp = '{""Status"":""SUCCESS""}' + resp=resp+""\\n"" + resp=resp.encode() + self.wfile.write(resp) + else: + print(""python ==> else2"") + data = {} + else: + print(""python ==> else1"") + self.send_response(403) + self.send_header('Content-Type', 'application/json') + self.end_headers() + print(""PYTHON ######## REQUEST ####### ENDED"") + return + + def do_GET(self): + print(""PYTHON ######## REQUEST ####### STARTED"") + if None != re.search('/AION/predict', self.path): + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + features = featureslist.getfeatures() + displaymsg = 'Data in JSON Format' + config = json.loads(features) + features = config['features'] + datajson={} + for feature in features: + if feature['Type'].lower() != 'target': + datajson[feature['feature']] = 'Value' + displaymsg = json.dumps(datajson) + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg} + """""".format(url=self.path,displaymsg=displaymsg) + self.wfile.write(msg.encode()) + elif None != re.search('/AION/monitoring', self.path): + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + displaymsg='{""trainingDataLocation"":""Reference Data File Path"",""currentDataLocation"":""Latest Data File Path""}' + msg="""""" +URL:{url} +RequestType: POST +Content-Type=application/json +Body: {displaymsg}"""""".format(url=self.path,displaymsg=displaymsg) + self.wfile.write(msg.encode()) + elif None != re.search('/AION/features', self.path): + outputStr = featureslist.getfeatures() + outputStr = outputStr.strip() + resp = outputStr + resp=resp+""\\n"" + resp=resp.encode() + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(resp) + else: + msg="""""" +URL for prediction: /AION/predict +URL for features List: /AION/features +URL for monitoring: /AION/monitoring +URL for performance: /AION/performance"""""" + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + self.wfile.write(msg.encode()) + return +class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): + allow_reuse_address = True + + def shutdown(self): + self.socket.close() + HTTPServer.shutdown(self) + +class SimpleHttpServer(): + def __init__(self, ip, port): + self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) + + def start(self): + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.daemon = True + self.server_thread.start() + + def waitForThread(self): + self.server_thread.join() + + def stop(self): + self.server.shutdown() + self.waitForThread() + +if __name__=='__main__': + parser = argparse.ArgumentParser(description='HTTP Server') + parser.add_argument('port', type=int, help='Listening port for HTTP Server') + parser.add_argument('ip', help='HTTP Server IP') + args = parser.parse_args() + + server = SimpleHttpServer(args.ip, args.port) + #delete file + #create file + #write just msg as ""started"" + print('HTTP Server Running...........') + #file close + server.start() + server.waitForThread() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' from script.inputprofiler import inputprofiler + +def preprocessing(data): + profilerobj = inputprofiler() + data = profilerobj.run(data) + data = data.astype(np.float64) + return(data) + +import matplotlib.pyplot as plt +try: + from sklearn.externals import joblib +except: + import joblib +import os,sys +import pandas as pd +from alibi.explainers import ALE,plot_ale +import io +import json +import urllib, base64 +import numpy as np +from scipy.stats import linregress +from statistics import mean + + +def get_ranked_values(explanation): + ranked_shap_vals = [] + for cls_idx in range(len(explanation.shap_values)): + this_ranking = ( + explanation.raw['importances'][str(cls_idx)]['ranked_effect'], + explanation.raw['importances'][str(cls_idx)]['names'] + ) + ranked_shap_vals.append(this_ranking) + + return ranked_shap_vals + +def feature_importance_using_shap(model,X,featuresNames,classes,x_test,x_test_waterfall): + from alibi.explainers import KernelShap + import shap + shap.initjs() + if hasattr(model, ""decision_function""): + pred_fcn = model.decision_function + elif hasattr(model, ""predict_proba""): + pred_fcn = model.predict_proba + else: + pred_fcn = model.predict + try: + svm_explainer = KernelShap(pred_fcn,feature_names=featuresNames) + xtest = x_test[0].reshape(1, -1) + svm_explainer.fit(X,n_background_samples=100) + svm_explanation = svm_explainer.explain(xtest) + try: + idx = 0 + instance = x_test[0][None, :] + pred = model.predict(instance) + class_idx = pred.item() + if isinstance(svm_explainer.expected_value,np.ndarray): + + forceplot = shap.force_plot(svm_explainer.expected_value[class_idx],svm_explanation.shap_values[class_idx][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) + else: + forceplot = shap.force_plot(svm_explainer.expected_value,svm_explanation.shap_values[0][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) + plt.tight_layout(pad = 0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + image_64 = '' + try: + plt.clf() + if isinstance(svm_explainer.expected_value,np.ndarray): + r = shap.multioutput_decision_plot(svm_explainer.expected_value.tolist(), + svm_explanation.shap_values, + idx, + feature_names=featuresNames, + feature_order='importance', + highlight=[class_idx], + legend_labels=classes, + return_objects=True, + legend_location='lower right',show=False) + else: + expectedvalues = [svm_explainer.expected_value] + + + r = shap.multioutput_decision_plot(expectedvalues, + svm_explanation.shap_values, + idx, + feature_names=featuresNames, + highlight = [0], + return_objects=True, + legend_labels=['Value'], + feature_order='importance', + show=False) + plt.tight_layout(pad = 0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image2_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + image2_64 = '' + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + image2_64 = '' + image_64 = '' + try: + + plt.clf() + x_test_waterfall = x_test_waterfall[featuresNames] + + explainer = shap.Explainer(model.predict, x_test_waterfall, feature_names=featuresNames) + + shap_values = explainer(x_test) + + r = shap.plots.waterfall(shap_values[0]," +"show=False) + image = io.BytesIO() + plt.savefig(image, format='png', bbox_inches='tight') + image.seek(0) + string = base64.b64encode(image.read()) + image3_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + image3_64 = '' + return (image_64, image2_64, image3_64) + + + + +def feature_importance(xtrain,ytrain,xfeatures,yfeature,problemType): + if problemType == 'classification': + from sklearn.feature_selection import SelectFromModel + from sklearn.ensemble import ExtraTreesClassifier + selector = SelectFromModel(ExtraTreesClassifier()) + selector.fit(xtrain,ytrain) + values = selector.estimator_.feature_importances_ + elif problemType == 'regression': + from sklearn.feature_selection import SelectFromModel + from sklearn.linear_model import Lasso + selector = SelectFromModel(Lasso()) + selector.fit(xtrain,ytrain) + values = np.abs(selector.estimator_.coef_) + labels = xtrain.columns.tolist() + dft = pd.DataFrame() + dft['labels'] = labels + dft['values'] = values + maxrecord = dft.iloc[dft['values'].argmax()] + mostimportantfeature = maxrecord['labels'] + f_imp = dft.to_json(orient='records') + return(f_imp,mostimportantfeature) + + +def get_trust_score(prdictfn,proba_fun,X_train,y_train): + from alibi.confidence import TrustScore + ts = TrustScore(k_filter=10,alpha=.05,filter_type='distance_knn',leaf_size=40,metric='euclidean',dist_filter_type='point') + ts.fit(X_train, y_train, classes=3) + y_pred = prdictfn(X_train) + #y_prod = proba_fun(X_train) + #probas = y_prod[range(len(y_pred)), y_pred] + score, closest_class = ts.score(X_train, y_pred,k=2,dist_type='point') + return(mean(score)) + +def getCounterFactuals(model,prdictfn,features,x_train,categories): + from alibi.explainers import CounterFactualProto + cat_vars_ord = {} + categoryList=categories.keys().tolist() + categoryCountList=categories.tolist() + for i in range(0,len(categoryCountList)): + cat_vars_ord[categoryList[i]] = categoryCountList[i] + print(cat_vars_ord) + X = x_train[0].reshape((1,) + x_train[0].shape) + shape = X.shape + print(shape) + beta = .01 + c_init = 1. + c_steps = 5 + max_iterations = 500 + rng = (-1., 1.) # scale features between -1 and 1 + feature_range = (x_train.min(axis=0), x_train.max(axis=0)) + cf = CounterFactualProto(prdictfn,shape,cat_vars=cat_vars_ord) + explanation = cf.explain(X) + print(explanation) + +def getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap, class_percent=None): + threshold = 0.95 + from alibi.explainers import AnchorTabular + explainer = AnchorTabular(predict_fn, features) + explainer.fit(X_train.values) + X_test = X_test.values + anchors = [] + for idx in range(len(X_test)): + prediction = explainer.predictor(X_test[idx].reshape(1, -1))[0] + if len(labelMap) > 0: + predictionstr = list(labelMap.keys())[list(labelMap.values()).index(prediction)] + else: + predictionstr = prediction + explanation = explainer.explain(X_test[idx],threshold=threshold) + if str(explanation.anchor) == '[]': + if class_percent and class_percent.get(prediction, 0.0) > threshold: + anchor = f""Cannot explain the prediction of this class ({predictionstr}) since there is no salient subset of features that is necessary for the prediction to hold. This behaviour is typical when the data is very imbalanced and is seen for the majority class in a classification problem."" + else: + anchor = f'Can not get the explanation for {predictionstr}.' + precision = explanation.precision[0] + else: + anchor = '%s' % (' AND '.join(explanation.anchor)) + precision = explanation.precision + coverage = explanation.coverage + anchorjson = {} + anchorjson['features'] = eval(str(features)) + anchorjson['values'] = eval(str(list(X_test[idx]))) + anchorjson['prediction'] = str(predictionstr) + anchorjson['precision'] = str(round(precision,2)) + anchorjson['anchor'] = anchor + anchors.append(anchorjson) + print(anchors) + try: + return(json.dumps(anchors)) + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return(json.dumps({})) + +def ale_analysis(): + displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),""etc"",""display.json"") + with open(displaypath) as file: + config = json.load(file) + file.close() + model = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),""model"",config['saved_model'])) + predict_fn = lambda x: model.predict(x) + predictproba_fn = lambda x: model.predict_proba(x) + dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') + dataFrame=pd.read_csv(dathPath,compression='gzip') + #dataFrame = pd.read_csv(dathPath) + + testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','predicteddata.csv.gz') + testdataFrame=pd.read_csv(testdathPath,compression='gzip') + #testdataFrame = pd.read_csv(testdathPath) + features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] + + targetfeature = config['targetFeature']#'Species' + labelMap = config['labelMaps'] + targetData = dataFrame[targetfeature] + if config['problemType'].lower() == 'regression': + X_train = dataFrame[features] + X_test = testdataFrame.head(5) + X_test = X_test[features] + else: + valueCount=targetData.value_counts() + class_percent = (valueCount/ len(targetData)).to_dict() + categoryList=valueCount.keys().tolist() + class_names = categoryList + X_train = dataFrame[features] + X_test = testdataFrame.groupby('predict').first().reset_index() + X_test = X_test[features] + f_imp,m_imp_f = feature_importance(X_train,targetData,features,targetfeature,config['problemType'].lower()) + if hasattr(model, ""decision_function""): + logit_fun_lr = model.decision_function + try: + logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) + logit_exp_lr = logit_ale_lr.explain(X_train.values) + values = logit_exp_lr.ale_values + feature = logit_exp_lr.feature_names + feature_values = logit_exp_lr.feature_values + lines= [] + sentences = [] + for x in range(0,len(feature)): + f_value = feature_values[x] + value = values[x] + lines= [] + for y in range(0,len(class_names)): + line = [] + for z in value: + cordinate = z[y] + line.append(cordinate) + lines.append(line) + line = lines[0] + slope1, intercept1, r_value, p_value, std_err = linregress(f_value,line) + line = lines[1] + slope2, intercept2, r_value, p_value, std_err = linregress(f_value,line) + xi = (intercept1-intercept2) / (slope2-slope1) + xi = round(xi,2) + lastvalues = {} + i = 0 + for line in lines: + value = line[len(line)-1] + lastvalues[class_names[i]] = value + i = i+1 + Keymax = max(lastvalues, key=lastvalues.get) + Keymin = min(lastvalues, key=lastvalues.get) + Keymaxclass = list(labelMap.keys())[list(labelMap.values()).index(Keymax)] + Keyminclass = list(labelMap.keys())[list(labelMap.values()).index(Keymin)] + sentense = 'Effect of '+str(feature[x])+'
For data samples having '+str(feature[x])+' >= ~'+str(xi)+' ,there is a very high chance that they are of class '+str(Keymaxclass)+' '+targetfeature+'. For data samples having '+str(feature[x])+' < ~'+str(xi)+' there is a very high change that they are of class '+str(Keyminclass)+' '+targetfeature+'.' + sentences.append(sentense) + except: + sentense = '' + sentences.append(sentense) + xi = 0 + elif hasattr(model, ""predict_proba""): + logit_fun_lr = model.predict_proba + logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) + logit_exp_lr = logit_ale_lr.explain(X_train.values) + values = logit_exp_lr.ale_values + feature = logit_exp_lr.feature_names + feature_values = logit_exp_lr.feature_values + lines= [] + sentences = [] + sentense = 'Graphs gives a feature value how much more(less) probability does the model assign to each class relative to mean prediction. This also means that any increase in relative probability of one class must result into a decrease in probability of another class.' + sentences.append(sentense) + xi = 0 + elif hasattr(model, ""predict""): + logit_fun_lr = model.predict + logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=['Value']) + logit_exp_lr = logit_ale_lr.explain(X_train.values) + values = logit_exp_lr.ale_values + feature = logit_exp_lr.feature_names + feature_values = logit_exp_lr.feature_values + lines= [] + sentences = [] + sentense = 'The ALE value corresponding to that feature value is difference to the mean effect of that feature. Put differently, the ALE value is the relative feature effect on the prediction at that feature value.' + sentences.append(sentense) + xi = 0 + if (len(features)%2 ==0): + n_cols = int(len(features)/2) + else: + n_cols = int(len(features)/2)+1 + figheight = n_cols*3 + try: + plot_ale(logit_exp_lr,n_cols=2, fig_kw={'figwidth': 8, 'figheight': figheight}) + plt.tight_layout(pad = 0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except: + image_64 = '' + #score = get_trust_score(model.predict,proba_fun_lr,X_train.values,targetData.values) + if config['problemType'].lower() == 'classification': + anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap,class_percent) + else: + anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap) + #anchors=[] + #anchorjson = json.dumps(anchors) + #feature_importance_using_shap(model,X_train.values,features,class_names) + #getCounterFactuals(model,predictproba_fn,features,X_train.values,valueCount) + output_json = {""status"":""SUCCESS"",""data"":{""data"":image_64,""most_influencedfeature"":m_imp_f,""interceptionpoint"":xi,""sentences"":sentences,""feature_importance"":json.loads(f_imp),""anchorjson"":json.loads(anchorjson)}} + output_json = json.dumps(output_json) + print(""aion_ai_explanation:"",output_json) + return(output_json) + +def local_analysis(jsonData): + jsonData = json.loads(jsonData) + displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),""etc"",""display.json"") + with open(displaypath) as file: + config = json.load(file) + file.close() + model = joblib.load(os" +".path.join(os.path.dirname(os.path.abspath(__file__)),""model"",config['saved_model'])) + predict_fn = lambda x: model.predict(x) + + dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') + dataFrame=pd.read_csv(dathPath,compression='gzip') + testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'predicteddata.csv.gz') + testdataFrame = pd.read_csv(testdathPath, compression='gzip') + features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] + + targetfeature = config['targetFeature']#'Species' + + targetData = dataFrame[targetfeature] + valueCount=targetData.value_counts() + categoryList=valueCount.keys().tolist() + class_names = categoryList + #class_names = class_names.sort() + X_train = dataFrame[features] + from pandas import json_normalize + df_test = json_normalize(jsonData) + df_test = preprocessing(df_test) + df_test = df_test[features] + from alibi.explainers import AnchorTabular + explainer = AnchorTabular(predict_fn, features) + explainer.fit(X_train.values) + df_test = df_test.values + prediction = explainer.predictor(df_test.reshape(1, -1))[0] + labelMap = config['labelMaps'] + if len(labelMap) > 0: + prediction = list(labelMap.keys())[list(labelMap.values()).index(prediction)] + else: + prediction = str(prediction) + try: + explanation = explainer.explain(df_test,threshold=0.85) + if str(explanation.anchor) == '[]': + anchor = 'NA' + precision = str(round(explanation.precision[0],2)) + else: + anchor = '%s' % (' AND '.join(explanation.anchor)) + precision = str(round(explanation.precision,2)) + coverage = explanation.coverage + except Exception as e: + print(e) + anchor = 'NA' + precision = 0 + coverage = 0 + df_test_waterfall = testdataFrame + forceplot,multidecisionplot,waterfallplot = feature_importance_using_shap(model,X_train.head(300).values,features,class_names,df_test,df_test_waterfall) + output_json = {""status"":""SUCCESS"",""data"":{""anchor"":anchor,""precision"":precision,""coverage"":coverage,""prediction"":prediction,""forceplot"":forceplot,""multidecisionplot"":multidecisionplot,""waterfallplot"":waterfallplot}} + #print(output_json) + output_json = json.dumps(output_json) + print(""aion_ai_explanation:"",output_json) + return(output_json) +if __name__ == '__main__': + analysis_type = sys.argv[1] + if analysis_type.lower() == 'global': + ale_analysis() + if analysis_type.lower() == 'local': + data = sys.argv[2] + local_analysis(data) from script.inputprofiler import inputprofiler + +def preprocessing(data): + profilerobj = inputprofiler() + data = profilerobj.run(data) + data = data.astype(np.float64) + return(data) + +import matplotlib.pyplot as plt +try: + from sklearn.externals import joblib +except: + import joblib +import os,sys +import pandas as pd +from alibi.explainers import ALE,plot_ale +import io +import json +import urllib, base64 +import numpy as np +from scipy.stats import linregress +from statistics import mean +from tensorflow.keras.models import load_model +from tensorflow.keras import backend as K +import tensorflow as tf +tf.compat.v1.disable_eager_execution() +def recall_m(y_true, y_pred): + true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) + possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) + recall = true_positives / (possible_positives + K.epsilon()) + return recall + +def precision_m(y_true, y_pred): + true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) + predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) + precision = true_positives / (predicted_positives + K.epsilon()) + return precision + +def f1_m(y_true, y_pred): + precision = precision_m(y_true, y_pred) + recall = recall_m(y_true, y_pred) + return 2*((precision*recall)/(precision+recall+K.epsilon())) + + +def rmse_m(y_true, y_pred): + return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) + +def r_square(y_true, y_pred): + SS_res = K.sum(K.square(y_true-y_pred)) + SS_tot = K.sum(K.square(y_true-K.mean(y_true))) + return (1 - SS_res/(SS_tot+K.epsilon())) + +def get_ranked_values(explanation): + ranked_shap_vals = [] + for cls_idx in range(len(explanation.shap_values)): + this_ranking = ( + explanation.raw['importances'][str(cls_idx)]['ranked_effect'], + explanation.raw['importances'][str(cls_idx)]['names'] + ) + ranked_shap_vals.append(this_ranking) + + return ranked_shap_vals + +def feature_importance_using_shap(model,X,featuresNames,classes,x_test,problemType,modelname,x_test_waterfall): + from alibi.explainers import KernelShap + import shap + shap.initjs() + if hasattr(model, ""decision_function"") and problemType.lower() == 'classification': + pred_fcn = model.decision_function + elif hasattr(model, ""predict_proba"") and problemType.lower() == 'classification': + pred_fcn = lambda x: model.predict_proba(np.expand_dims(x, axis=2)) + else: + if modelname == 'Neural Network': + pred_fcn = lambda x: model.predict(x) + else: + pred_fcn = lambda x: model.predict(np.expand_dims(x, axis=2)) + svm_explainer = KernelShap(pred_fcn,feature_names=featuresNames) + xtest = x_test[0].reshape(1, -1) + svm_explainer.fit(X,n_background_samples=100) + svm_explanation = svm_explainer.explain(xtest) + try: + idx = 0 + instance = x_test[0][None, :] + if problemType.lower() == 'classification': + if modelname == 'Neural Network': + instance = x_test + else: + instance = np.expand_dims(x_test, axis=2) + pred = np.argmax(model.predict(instance),axis=1) + class_idx = pred.item() + else: + instance = np.expand_dims(x_test, axis=2) + pred = model.predict(instance) + class_idx = 0 + if isinstance(svm_explainer.expected_value,np.ndarray): + forceplot = shap.force_plot(svm_explainer.expected_value[class_idx],svm_explanation.shap_values[class_idx][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) + else: + forceplot = shap.force_plot(svm_explainer.expected_value,svm_explanation.shap_values[0][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) + plt.tight_layout(pad = 0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + image_64 = '' + try: + plt.clf() + if isinstance(svm_explainer.expected_value,np.ndarray): + r = shap.multioutput_decision_plot(svm_explainer.expected_value.tolist(), + svm_explanation.shap_values, + idx, + feature_names=featuresNames, + feature_order='importance', + highlight=[class_idx], + legend_labels=classes, + return_objects=True, + legend_location='lower right',show=False) + else: + expectedvalues = [svm_explainer.expected_value] + + + r = shap.multioutput_decision_plot(expectedvalues, + svm_explanation.shap_values, + idx, + feature_names=featuresNames, + highlight = [0], + return_objects=True, + legend_labels=['Value'], + feature_order='importance', + show=False) + plt.tight_layout(pad = 0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image2_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + image2_64 = '' + try: + + plt.clf() + explainer = shap.DeepExplainer(model, X) + + shap_values = explainer.shap_values(x_test_waterfall.values) + + exp = shap.plots._waterfall.waterfall_legacy(explainer.expected_value[0].numpy(), shap_values[0][0],feature_names=featuresNames,show=False) + image = io.BytesIO() + plt.savefig(image, format='png', bbox_inches='tight') + image.seek(0) + string = base64.b64encode(image.read()) + image3_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + image3_64 = '' + return(image_64,image2_64,image3_64) + + + + +def feature_importance(xtrain,ytrain,xfeatures,yfeature,problemType): + if problemType == 'classification': + from sklearn.feature_selection import SelectFromModel + from sklearn.ensemble import ExtraTreesClassifier + selector = SelectFromModel(ExtraTreesClassifier()) + selector.fit(xtrain,ytrain) + values = selector.estimator_.feature_importances_ + elif problemType == 'regression': + from sklearn.feature_selection import SelectFromModel + from sklearn.linear_model import Lasso + selector = SelectFromModel(Lasso()) + selector.fit(xtrain,ytrain) + values = np.abs(selector.estimator_.coef_) + labels = xtrain.columns.tolist() + dft = pd.DataFrame() + dft['labels'] = labels + dft['values'] = values + maxrecord = dft.iloc[dft['values'].argmax()] + mostimportantfeature = maxrecord['labels'] + f_imp = dft.to_json(orient='records') + return(f_imp,mostimportantfeature) + + +def get_trust_score(prdictfn,proba_fun,X_train,y_train): + from alibi.confidence import TrustScore + ts = TrustScore(k_filter=10,alpha=.05,filter_type='distance_knn',leaf_size=40,metric='euclidean',dist_filter_type='point') + ts.fit(X_train, y_train, classes=3) + y_pred = prdictfn(X_train) + #y_prod = proba_fun(X_train) + #probas = y_prod[range(len(y_pred)), y_pred] + score, closest_class = ts.score(X_train, y_pred,k=2,dist_type='point') + return(mean(score)) + +def getCounterFactuals(model,prdictfn,features,x_train,categories): + from alibi.explainers import CounterFactualProto + cat_vars_ord = {} + categoryList=categories.keys().tolist() + categoryCountList=categories.tolist() + for i in range(0,len(categoryCountList)): + cat_vars_ord[categoryList[i]] = categoryCountList[i] + print(cat_vars_ord) + X = x_train[0].reshape((1,) + x_train[0].shape) + shape = X.shape + print(shape) + beta = .01 + c_init = 1. + c_steps = 5 + max_iterations = 500 + rng = (-1., 1.) # scale features between -1 and 1 + feature_range = (x_" +"train.min(axis=0), x_train.max(axis=0)) + cf = CounterFactualProto(prdictfn,shape,cat_vars=cat_vars_ord) + explanation = cf.explain(X) + print(explanation) + +def getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap, class_percent=None): + threshold = 0.95 + from alibi.explainers import AnchorTabular + explainer = AnchorTabular(predict_fn, features) + explainer.fit(X_train.values) + X_test = X_test.values + anchors = [] + for idx in range(len(X_test)): + prediction = explainer.predictor(X_test[idx].reshape(1, -1))[0] + if isinstance(prediction,np.ndarray): + prediction = prediction[0] + if len(labelMap) > 0: + predictionstr = list(labelMap.keys())[list(labelMap.values()).index(prediction)] + else: + predictionstr = str(prediction) + try: + explanation = explainer.explain(X_test[idx],threshold=threshold) + + if str(explanation.anchor) == '[]': + if class_percent and class_percent.get(prediction, 0.0) > threshold: + anchor = f""Cannot explain the prediction of this class ({predictionstr}) since there is no salient subset of features that is necessary for the prediction to hold. This behaviour is typical when the data is very imbalanced and is seen for the majority class in a classification problem."" + else: + anchor = f'Can not get the explaination for {predictionstr}.' + precision = explanation.precision[0] + else: + anchor = '%s' % (' AND '.join(explanation.anchor)) + precision = explanation.precision + coverage = explanation.coverage + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + anchor = 'Reason Not found' + precision = 0 + + + anchorjson = {} + anchorjson['features'] = eval(str(features)) + anchorjson['values'] = eval(str(list(X_test[idx]))) + anchorjson['prediction'] = predictionstr + anchorjson['precision'] = precision + anchorjson['anchor'] = anchor + anchors.append(anchorjson) + return(json.dumps(anchors)) + + +def ale_analysis(): + displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),""etc"",""display.json"") + with open(displaypath) as file: + config = json.load(file) + file.close() + scoreParam = config['scoreParam'] + filename_dl = os.path.join(os.path.dirname(os.path.abspath(__file__)),""model"",config['saved_model']) + if(scoreParam.lower() == 'rmse'): + model = load_model(filename_dl,custom_objects={""rmse"": rmse_m},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[rmse_m]) + elif(scoreParam.lower() == 'r2'): + model = load_model(filename_dl,custom_objects={""r2"": r_square},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[r_square]) + elif(scoreParam.lower() == 'recall'): + model = load_model(filename_dl,custom_objects={""recall"": recall_m},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[recall_m]) + elif(scoreParam.lower() == 'precision'): + model = load_model(filename_dl,custom_objects={""precision"": precision_m},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[precision_m]) + elif(scoreParam.lower() == 'roc_auc'): + model = load_model(filename_dl,compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[tf.keras.metrics.AUC()]) + elif(scoreParam.lower() == 'f1_score'): + model = load_model(filename_dl,custom_objects={""f1_score"": f1_m},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[f1_m]) + else: + model = load_model(filename_dl) + if config['modelname'] == 'Neural Network': + predict_fn = lambda x: model.predict(x) + else: + predict_fn = lambda x: model.predict(np.expand_dims(x, axis=2)) + + predictproba_fn = lambda x: model.predict_proba(np.expand_dims(x, axis=2)) + dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') + dataFrame=pd.read_csv(dathPath,compression='gzip') + + testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','predicteddata.csv.gz') + testdataFrame=pd.read_csv(testdathPath,compression='gzip') + features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] + + targetfeature = config['targetFeature']#'Species' + labelMap = config['labelMaps'] + targetData = dataFrame[targetfeature] + if config['problemType'].lower() == 'regression': + X_train = dataFrame[features] + X_test = testdataFrame.head(5) + X_test = X_test[features] + else: + valueCount=targetData.value_counts() + class_percent = (valueCount/ len(targetData)).to_dict() + categoryList=valueCount.keys().tolist() + class_names = categoryList + X_train = dataFrame[features] + X_test = testdataFrame.groupby('predict').first().reset_index() + X_test = X_test[features] + f_imp,m_imp_f = feature_importance(X_train,targetData,features,targetfeature,config['problemType'].lower()) + if hasattr(model, ""decision_function"") and config['problemType'].lower() == 'classification': + logit_fun_lr = model.decision_function + try: + logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) + logit_exp_lr = logit_ale_lr.explain(X_train.values) + values = logit_exp_lr.ale_values + feature = logit_exp_lr.feature_names + feature_values = logit_exp_lr.feature_values + lines= [] + sentences = [] + for x in range(0,len(feature)): + f_value = feature_values[x] + value = values[x] + lines= [] + for y in range(0,len(class_names)): + line = [] + for z in value: + cordinate = z[y] + line.append(cordinate) + lines.append(line) + line = lines[0] + slope1, intercept1, r_value, p_value, std_err = linregress(f_value,line) + line = lines[1] + slope2, intercept2, r_value, p_value, std_err = linregress(f_value,line) + xi = (intercept1-intercept2) / (slope2-slope1) + xi = round(xi,2) + lastvalues = {} + i = 0 + for line in lines: + value = line[len(line)-1] + lastvalues[class_names[i]] = value + i = i+1 + Keymax = max(lastvalues, key=lastvalues.get) + Keymin = min(lastvalues, key=lastvalues.get) + Keymaxclass = list(labelMap.keys())[list(labelMap.values()).index(Keymax)] + Keyminclass = list(labelMap.keys())[list(labelMap.values()).index(Keymin)] + sentense = 'Effect of '+str(feature[x])+'
For data samples having '+str(feature[x])+' >= ~'+str(xi)+' ,there is a very high chance that they are of class '+str(Keymaxclass)+' '+targetfeature+'. For data samples having '+str(feature[x])+' < ~'+str(xi)+' there is a very high change that they are of class '+str(Keyminclass)+' '+targetfeature+'.' + sentences.append(sentense) + except: + sentense = '' + sentences.append(sentense) + xi = 0 + elif hasattr(model, ""predict_proba"") and config['problemType'].lower() == 'classification': + logit_fun_lr = lambda x: model.predict_proba(np.expand_dims(x, axis=2)) + logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) + print(model.__class__) + + try: + logit_exp_lr = logit_ale_lr.explain(X_train.values) + except: + X = np.expand_dims(X_train, axis=2) + logit_exp_lr = logit_ale_lr.explain(X) + values = logit_exp_lr.ale_values + feature = logit_exp_lr.feature_names + feature_values = logit_exp_lr.feature_values + lines= [] + sentences = [] + sentense = 'Graphs gives a feature value how much more(less) probability does the model assign to each class relative to mean prediction. This also means that any increase in relative probability of one class must result into a decrease in probability of another class.' + sentences.append(sentense) + xi = 0 + elif hasattr(model, ""predict""): + try: + if config['modelname'] == 'Neural Network': + logit_fun_lr = lambda x: model.predict(x) + else: + logit_fun_lr = lambda x: model.predict(np.expand_dims(x, axis=2)) + logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=['Value']) + logit_exp_lr = logit_ale_lr.explain(X_train.values) + values = logit_exp_lr.ale_values + feature = logit_exp_lr.feature_names + feature_values = logit_exp_lr.feature_values + lines= [] + sentences = [] + sentense = 'The ALE value corresponding to that feature value is difference to the mean effect of that feature. Put differently, the ALE value is the relative feature effect on the prediction at that feature value.' + sentences.append(sentense) + xi = 0 + except: + xi = 0 + sentences = [] + if (len(features)%2 ==0): + n_cols = int(len(features)/2) + else: + n_cols = int(len(features)/2)+1 + figheight = n_cols*3 + try: + plot_ale(logit_exp_lr,n_cols=2, fig_kw={'figwidth': 8, 'figheight': figheight}) + plt.tight_layout(pad = 0) + image = io.BytesIO() + plt.savefig(image, format='png') + image.seek(0) + string = base64.b64encode(image.read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + except: + image_64 = '' + #score = get_trust_score(model.predict,proba_fun_lr,X_train.values,targetData.values) + if config['problemType'].lower() == 'classification': + anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap,class_percent) + else: + anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap) + #anchors=[] + #anchorjson = json.dumps(anchors) + #feature_importance_using_shap(model,X_train.values,features,class_names) + #getCounterFactuals(model,predictproba_fn,features,X_train.values,valueCount) + output_json = {""status"":""SUCCESS"",""data"":{""data"":image_64,""most_influencedfeature"":m_imp_f,""interceptionpoint"":xi,""sentences"":sentences,""feature_importance"":json.loads(f_imp),""anchorjson"":json.loads(anchorjson)}} + output_json = json.dumps(output_json) + print(""aion_ai_explanation:"",output_json) + return(output_json) + +def local_analysis(jsonData): + jsonData = json.loads(jsonData) + displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),""etc"",""display.json"") + with open(displaypath) as file: + config = json.load(file) + file.close() + scoreParam = config['scoreParam'] + filename_dl = os.path.join(os.path.dirname(os.path.abspath(__file__)),""model"",config['saved_model']) + if(scoreParam.lower() == 'rmse'): + model = load_model(filename_dl,custom_objects={""rmse"": rmse_m},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[rmse_m]) + elif(scoreParam.lower() == 'r2'): + model = load_model(filename_dl,custom_objects={""r2"": r_square},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[r_square]) + elif(scoreParam.lower()" +"== 'recall'): + model = load_model(filename_dl,custom_objects={""recall"": recall_m},compile=False) + model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[recall_m]) + elif(scoreParam.lower() == 'precision'): + model = load_model(filename_dl," +"processed ""+str(self.num_records)+"".-- Loss: ""+str(loss)+"". -- accuracy: ""+str(accuracy)) + logger.info(""FL Client model intercept: ""+str(model.intercept_)) + logger.info(""FL Client model coefficients: ""+str(model.coef_)) + self.model_save(self.model) + return loss, len(self.X_test), {""accuracy"": accuracy} + elif (self.problem_type.lower() == 'regression'): + if (self.model_name.lower() == 'linearregression' ): + # loss = log_loss(self.y_test, self.model.predict(self.X_test)) + mse=mean_squared_error(self.y_test, self.model.predict(self.X_test)) + rmse = np.sqrt(mean_squared_error(self.y_test, self.model.predict(self.X_test))) + mae = mean_absolute_error(self.y_test, self.model.predict(self.X_test)) + r2=r2_score(self.y_test, self.model.predict(self.X_test)) + loss = rmse + results = { + ""mean_absolute_error"": mae, + ""mean_squared_error"": mse, + ""root_mean_squared_error"": rmse, + ""r2"":r2, + } + print(f""{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(rmse)}. -- metrics: {results}. "") + logger.info(str(self.client_id)+"" Sending weights -- data processed ""+str(self.num_records)+"".-- Loss: ""+str(rmse)+"". -- metrics: ""+str(results)) + logger.info(""FL Client model intercept: ""+str(self.model.intercept_)) + logger.info(""FL Client model coefficients: ""+str(self.model.coef_)) + self.model_save(self.model) + return loss, len(self.X_test), results + +"""""" The below aion fl client is for deep learning process. +Why different client for sklearn and deeplearn ?: Because, flower calling the client object and process all functions (get_parameters,fit and evaluate) internally. So, user space we cannot combine both (sklearn n dl) using if..else. """""" +class aionflc_dl(flower.client.NumPyClient): + def __init__(self,model,num_rounds,model_name,version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train, X_test, y_train, y_test,model_params): + self.count=0 + self.num_rounds=round(num_rounds) + self.model_name=model_name + self.version=version + self.wait_time=int(wait_time) + self.client_id=client_id + self.num_records=num_records + self.model_overwrite=model_overwrite + self.model=model + self.problem_type=problem_type + self.X_train, self.X_test, self.y_train, self.y_test=X_train, X_test, y_train, y_test + self.model_params=model_params + + # """""" The below part not used now. In future, for our own grpc communication, this module will be used.Call this function where we want. Need to modify aiongrpcproto.proto according our requirement."""""" + # def callaiongrpcclient(self): + # clientins = aiongrpcclient() + # status=clientins.startgrpcclient() + # return status + + #Save the final model + def model_save(self,model): + ##Locate standard model dir to save model + cwd = os.path.abspath(os.path.dirname(__file__)) + model_location=os.path.join(cwd, 'models') + try: + os.makedirs(model_location) + except FileExistsError as fe: + # here,model_location already exists + pass + + model_name=self.model_name + # version=self.version + ## Saving model + if (self.model_overwrite.lower() == 'false'): + version=str(self.count) + if (model_name.lower() == ""deeplearning""): + file_name=model_name+'_'+self.problem_type+'_'+version+"".h5"" + saved_model=os.path.normpath(os.path.join(model_location,file_name)) + log(INFO, ""flclient saved_model path: %s "",str(saved_model)) + try: + model.save(saved_model) + return True + except Exception as e: + logger.info(""model save error. Err.Msg: ""+str(e)) + return False + else: + file_name=model_name+'_'+self.problem_type+'_'+version+"".sav"" + saved_model=os.path.normpath(os.path.join(model_location,file_name)) + log(INFO, ""flclient saved_model path: %s "",str(saved_model)) + try: + + with open (saved_model,'wb') as f: + pkl.dump(model,f) + return True + except Exception as e: + logger.info(""model save error. Err.Msg: ""+str(e)) + return False + elif (self.model_overwrite.lower() == 'true'): + version=str(self.version) + if (model_name.lower() == ""deeplearning""): + file_name=model_name+'_'+self.problem_type+'_'+version+"".h5"" + saved_model=os.path.normpath(os.path.join(model_location,file_name)) + log(INFO, ""flclient saved_model path: %s "",str(saved_model)) + try: + model.save(saved_model) + return True + except Exception as e: + logger.info(""model save error. Err.Msg: ""+str(e)) + return False + else: + file_name=model_name+'_'+self.problem_type+'_'+version+"".sav"" + saved_model=os.path.normpath(os.path.join(model_location,file_name)) + log(INFO, ""flclient saved_model path: %s "",str(saved_model)) + try: + with open (saved_model,'wb') as f: + pkl.dump(model,f) + return True + except Exception as e: + logger.info(""model save error. Err.Msg: ""+str(e)) + return False + else: + ##Write own user instruction + pass + + def get_parameters(self, config): + """"""Get parameters of the local model."""""" + return self.model.get_weights() + + def get_properties(self,model,time_out): + """"""Return the current client properties."""""" + client_info={'client_id':self.client_id} + time_out=100 + return client_info,model,time_out + + def fit(self, parameters, config): + """"""Train parameters on the locally held training set."""""" + # Update local model parameters + self.model.set_weights(parameters) + num_partitions=(self.num_rounds) + # num_partitions=round(5) + xtrain=np.array_split(self.X_train, num_partitions)[self.count] + ytrain=np.array_split(self.y_train, num_partitions)[self.count] + # y_train = np_utils.to_categorical(y_train, num_classes) + # y_test = np_utils.to_categorical(y_test, num_classes) + # Get hyperparameters for this round + batch_size: int = int(self.model_params[""batch_size""]) + epochs: int = int(self.model_params[""epochs""]) + # round: int = config[""rnd""] + # self.round_id = round + log(INFO, ""==========================="") + log(INFO, ""Start training model on local client %s round %i"", + self.client_id, config['rnd']) + time.sleep(self.wait_time) + self.count+=1 + # Train the model using hyperparameters from config + history = self.model.fit( + xtrain, + ytrain, + batch_size, + epochs, + shuffle=False, + # validation_split=0.1, + validation_data=(self.X_test, self.y_test), + verbose=1 + ) + # Return updated model parameters and results + parameters_prime = self.model.get_weights() + num_examples_train = len(self.X_train) + model_name = self.model_name + problem_type = self.problem_type + if model_name == ""deeplearning"": + if problem_type == ""classification"": + acc = self.model.history.history['val_accuracy'] + log(INFO, ""Validated accuracy at the end of current round of client %s : %.2f %%"", + self.client_id, acc[-1]*100) + log(INFO, ""Finished training model on local client %s"", self.client_id) + results = { + ""loss"": history.history[""loss""][0], + ""accuracy"": history.history[""accuracy""][0], + ""val_loss"": history.history[""val_loss""][0], + ""val_accuracy"": history.history[""val_accuracy""][0], + } + if problem_type == ""regression"": + mean_absolute_error = history.history['mean_absolute_error'][0] + mean_squared_error = history.history['mean_squared_error'][0] + y_pred = self.model.predict(self.X_test) + from sklearn import metrics + root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred)) + log(INFO, ""Mean Absolute Error at the end of current round of client %s : %f"", + self.client_id, mean_absolute_error) + log(INFO, ""Mean Squared Error at the end of current round of client %s : %f"", + self.client_id, mean_squared_error) + log(INFO, ""Root Mean Squared Error at the end of current round of client %s : %f"", + self.client_id, root_mean_squared_error) + log(INFO, ""Finished training model on local client %s"", self.client_id) + results = { + ""mean_absolute_error"": mean_absolute_error, + ""mean_squared_error"": mean_squared_error, + ""root_mean_squared_error"": root_mean_squared_error, + } + return parameters_prime, num_examples_train, results + + + def evaluate(self, parameters, config): + """"""Evaluate parameters on the locally held test set."""""" + + # Update local model with global parameters + self.model.set_weights(parameters) + num_partitions=(self.num_rounds) + # Get config values + # batch_size: int = config[""val_batch_size""] + batch_size: int = int(self.model_params[""batch_size""]) + steps: int = np.ceil(len(self.X_test)/batch_size) + num_examples_test = len(self.X_test) + log(INFO, ""Run for only %i steps"", steps) + # Evaluate global model parameters on the local test data and return results + model_name = self.model_name + problem_type = self.problem_type + self.model_save(self.model) + if model_name == ""deeplearning"": + if problem_type == ""classification"": + loss, accuracy = self.model.evaluate(self.X_test, self.y_test,verbose=0) + log(INFO, ""Client %s : Accuracy %.2f %%"", self.client_id, accuracy*100) + log(INFO, ""Client %s : Loss %.4f "", self.client_id, loss) + return loss, num_examples_test, {""accuracy"": accuracy} + if problem_type == ""regression"": + loss, mean_absolute_error, mean_squared_error = self.model.evaluate(self.X_test, self.y_test, + steps=steps,verbose=1) + y_pred = self.model.predict(self.X_test) + + root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred)) + + log(INFO, ""Client %s : mean_absolute_error %f "", + self.client_id, mean_absolute_error) + + log(INFO, ""Client %s : mean_squared_error %f "", + self.client_id, mean_squared_error) + + log(INFO, ""Client %s : root_mean_squared_error %f "", + self.client_id, root_mean_squared_error) + return loss, num_examples_test, {""mean_absolute_error"": mean_absolute_error, + ""mean_squared_error"": mean_squared_error, + ""root_mean_squared_error"": root_mean_squared_error} + +def randclientid(s,c): + c=string.ascii_uppercase + string.digits + return ''.join(random.choice(c) for x in range(s)) + +## Loading input data +def dataLoad(jsonfile): + with open(jsonfile, 'r') as file: + data = json.load(file) + server_ip=str(data[""server_IP""]) + server_port=str(data[""server_port""]) + model_name=str(data[""model_name""]) + problem_type=str(data[""problem_type""]) + data_location=str(data[""data_location""]) + # deploy_location=str(data[""deploy_location""]) + model_params=data[""model_hyperparams""] + train_size=int(data[""train_size""]) + model_version=str(data[""version""]) + selected_feature=data[""selected_feature""] + if (type(selected_feature) is str): + selected_feature=selected_feature.split(',') + model_overwrite=data['model_overwrite'] + target_feature=data[""target_feature""] + num_records=int(data['num_records_per_round']) + wait_time=data['wait_time'] + server_address=server_ip+':'+server_port + # server_address=f""{server_ip}:{server_port}"" + return server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite + +# def getfilepath() + +"""""" Main aion federated learning client function call. """""" +if __name__ == ""__main__"": + ##Client random id gen. + + rand_id=randclientid(9, ""ABC1234567890"") + client_id='flclient-'+str(rand_id) + + try: + json_file=sys.argv[1] + except Exception as e: + # sys.stdout.write(""Please provide input configuration file. example: < python.exe 'fedclient\\aionflc.py' 'fedclient\\config.json' > "") + log(INFO, ""Please provide input configuration file. example: \\" +"n"") + server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite = dataLoad(json_file) + file_name=model_name+'_'+model_version+"".log"" + cwd = os.path.abspath(os.path.dirname(__file__)) + log_location = os.path.join(cwd, 'logs') + try: + os.makedirs(log_location) + except FileExistsError as fe: + # here,log_location already exists + pass + + try: + logobj = logging.getLogger('AION') + fl_log=os.path.normpath(os.path.join(log_location,file_name)) + log(INFO, ""flclient log file path: %s "",str(fl_log)) + logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + except Exception as e: + log(INFO, ""logging error. Error Msg: %s "",str(e)) + pass + ## default data location ~data\\inputfile.csv + data_location = os.path.normpath(os.path.join(cwd, data_location)) + df = pd.read_csv(data_location) + df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] + df=df.reset_index(drop=True) + y=df[target_feature] + # X = df.drop(target_feature, axis=1) + # + # print(""selected_feature: \\n"",selected_feature) + X=df[selected_feature] + input_shape = X.shape[1] # len(selected_feature) + output_shape = len(y.value_counts()) + test_size=(100-train_size)/100 + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size) + no_classes = len(df.groupby(target_feature).count()) + no_features=len(selected_feature) + ## Pass the train data. + (X_train, y_train) = utils.partition(X_train, y_train, 1)[0] + scaler = StandardScaler() + X_train_scaled = scaler.fit_transform(X_train) + X_test_scaled = scaler.transform(X_test) + # y_train = pd.get_dummies(y_train) + # y_test = pd.get_dummies(y_test) + y_train_dl = pd.get_dummies(y_train, sparse=True) + y_test_dl = pd.get_dummies(y_test, sparse=True) + + if (problem_type.lower() == ""classification""): + if (model_name.lower() == ""logisticregression""): + #n_classes = df[target_feature].nunique() + no_classes = len(df.groupby(target_feature).count()) + no_features=len(selected_feature) + logger.info(""no_classes: ""+str(no_classes)) + logger.info(""no_features: ""+str(no_features)) + modelName=""logisticregression"" + model = None + + model = LogisticRegression(**model_params, warm_start=True) + + try: + status=utils.setmodelName(model_name) + utils.set_initial_params(model,no_classes,no_features) + except Exception as e: + print(""util error: \\n"",e) + num_rounds=round(len(df)/num_records) + log(INFO, ""Federated learning Client connecting to Server @: %s "",str(server_address)) + try: + flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test)) + except Exception as e: + logger.info(""AION FL Client instance error: \\n""+str(e)) + log(INFO, ""AION federated learning Client %s execution completed."",str(client_id)) + + elif (model_name.lower() == ""deeplearning""): + optimizer = model_params[""optimizer""] + loss_func = model_params[""losses""] + act_func = model_params[""activation""] + last_act_func = model_params[""last_activation""] + input_shape = X.shape[1] # len(selected_feature) + output_shape = len(y.value_counts()) + print(f""input_shape:{input_shape}, output_shape:{output_shape}."") + + model = None + if output_shape == 2: + if last_act_func == ""sigmoid"" and loss_func == ""binary_crossentropy"": + model = dl_model.dl_binary_classification(input_shape, output_shape, + optimizer, loss_func, + act_func, last_act_func) + elif last_act_func == ""softmax"" and loss_func == ""categorical_crossentropy"": + model = dl_model.dl_binary_classification(input_shape, output_shape, + optimizer, loss_func, + act_func, last_act_func) + else: + model = dl_model.dl_multiClass_classification(input_shape, + output_shape, optimizer, loss_func, + act_func, last_act_func) + print(model.summary()) + # status=utils.setmodelName(modelName) + # utils.set_initial_params(model,no_classes,no_features) + num_rounds=round(len(df)/num_records) + log(INFO, ""Federated learning Client connecting to Server @: %s "",str(server_address)) + try: + flower.client.start_numpy_client(server_address=server_address, + client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train_dl, y_test_dl,model_params)) + except Exception as e: + logger.info(""AION FL Client instance error: \\n""+str(e)) + log(INFO, ""AION federated learning Client %s execution completed."",str(client_id)) + logger.info(""AION federated learning Client execution completed.""+str(client_id)) + + + elif(problem_type.lower() == ""regression""): + if (model_name.lower() == ""linearregression""): + # model=LinearRegression(**model_params,warm_start=True) + if model_params['fit_intercept'] == 'True': + model_params['fit_intercept'] = True + else: + model_params['fit_intercept'] = False + if model_params['copy_X'] == 'True': + model_params['copy_X'] = True + else: + model_params['copy_X'] = False + if model_params['positive'] == 'True': + model_params['positive'] = True + else: + model_params['positive'] = False + model=LinearRegression(**model_params) + + status=utils.setmodelName(model_name) + utils.set_initial_params_reg(model,X_train.shape[0],X_train.shape[1]) + num_rounds=round(len(df)/num_records) + log(INFO, ""Federated learning Client connecting to Server @: %s "",str(server_address)) + try: + flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test)) + except Exception as e: + logger.info(""AION FL Client instance error: \\n""+str(e)) + log(INFO, ""AION federated learning Client %s execution completed."",str(client_id)) + + elif(model_name.lower() == ""deeplearning""): + input_shape = X.shape[1] # len(selected_feature) + output_shape = len(y.value_counts()) + optimizer = model_params[""optimizer""] + loss_func = model_params[""losses""] + act_func = model_params[""activation""] + model = None + model = dl_model.dl_regression_model(input_shape, 1, + optimizer, loss_func, act_func) + + num_rounds=round(len(df)/num_records) + log(INFO, ""Federated learning Client connecting to Server @: %s "",str(server_address)) + try: + flower.client.start_numpy_client(server_address=server_address, client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test,model_params)) + except Exception as e: + logger.info(""AION FL Client instance error: \\n""+str(e)) + log(INFO, ""AION federated learning Client %s execution completed."",str(client_id)) + import tensorflow as tf + + +def dl_regression_model(input_shape, output_shape, + optimizer, loss_func, act_func): + inputs = tf.keras.Input(shape=(input_shape,)) + x = tf.keras.layers.Dense(64, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(inputs) + x = tf.keras.layers.Dense(32, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(16, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(8, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + outputs = tf.keras.layers.Dense(output_shape, + kernel_initializer='he_normal', + bias_initializer='zeros')(x) + + model = tf.keras.Model(inputs=inputs, outputs=outputs) + + model.compile(loss=loss_func , + optimizer=optimizer, + metrics=[""mean_absolute_error"", + ""mean_squared_error"", + ]) + return model + +def dl_multiClass_classification(input_shape, output_shape, + optimizer, loss_func, act_func, last_act_func): + inputs = tf.keras.Input(shape=(input_shape,)) + x = tf.keras.layers.Dense(64, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(inputs) + x = tf.keras.layers.Dense(32, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(16, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(8, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + outputs = tf.keras.layers.Dense(output_shape, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=last_act_func)(x) + model = tf.keras.Model(inputs=inputs, outputs=outputs) + model.compile(optimizer, loss_func, metrics=[""accuracy""]) + return model + +def dl_binary_classification(input_shape, output_shape, + optimizer, loss_func, act_func, last_act_func): + inputs = tf.keras.Input(shape=(input_shape,)) + x = tf.keras.layers.Dense(64, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(inputs) + x = tf.keras.layers.Dense(32, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(16, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(8, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + outputs = tf.keras.layers.Dense(output_shape, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=last_act_func)(x) + model = tf.keras.Model(inputs=inputs, outputs=outputs) + model.compile(optimizer, loss_func, + metrics=[""accuracy""]) + return model + from typing import Tuple, Union, List +import numpy as np +from sklearn.linear_model import LogisticRegression +from sklearn.linear_model import LinearRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.linear_model import SGDClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.tree import DecisionTreeClassifier +from flwr.common.logger import log +from logging import INFO + +XY = Tuple[np.ndarray, np.ndarray] +Dataset = Tuple[XY, XY] +LogRegParams = Union[XY, Tuple[np.ndarray]] +XYList = List[XY] +modelUsed=None +modelname=None +def setmodelName(modelselected): + try: + modelname=str(modelselected) + print(""setmodelName ,given modelname: \\n"",modelname) + if (modelname.lower() == 'logisticregression'): + modelUsed=LogisticRegression() + return True + elif (modelname.lower() == ""linearregression""): + modelUsed = LinearRegression() + return True + elif (modelname.lower() == ""sgdclassifier""): + #from sklearn.linear_model import SGDClassifier + modelUsed=SGDClassifier() + return True + elif (modelname.lower() == ""knn""): + modelUsed = KNeighborsClassifier() + return True + elif" +"(modelname.lower() == ""decisiontreeclassifier""): + modelUsed = DecisionTreeClassifier() + return True + else: + return False + except Exception as e: + log(INFO, ""set fl model name fn issue: "",e) + +def get_model_parameters(model:modelUsed) -> LogRegParams: + """"""Returns the paramters of a sklearn LogisticRegression model."""""" + model_name=model.__class__.__name__ + if model.fit_intercept: + params = (model.coef_, model.intercept_) + else: + params = (model.coef_,) + + return params + + +def set_model_params( + model:modelUsed, params: LogRegParams +) -> modelUsed: + """"""Sets the parameters of a sklean LogisticRegression model."""""" + model.coef_ = params[0] + model_name=model.__class__.__name__ + try: + if model.fit_intercept: + model.intercept_ = params[1] + except Exception as e: + log(INFO, ""set_model_params fn issue: "",e) + pass + + return model + +def set_initial_params_reg(model,no_vals,no_features): + """"""Sets initial parameters as zeros Required since model params are + uninitialized until model.fit is called. + + But server asks for initial parameters from clients at launch. Refer + to sklearn.linear_model.LogisticRegression documentation for more + information. + """""" + + no_vals = no_vals + n_features = no_features + # model.classes_ = np.array([i for i in range(n_classes)]) + model.coef_ = np.zeros( n_features,) + model_name=model.__class__.__name__ + + try: + if model.fit_intercept: + # model.intercept_ = np.ones((no_vals,1)) + model.intercept_ = np.zeros((no_vals,)) + except Exception as e: + log(INFO, ""set_initial_params fn issue: "",e) + pass + +def set_initial_params(model,no_classes,no_features): + """"""Sets initial parameters as zeros Required since model params are + uninitialized until model.fit is called. + + But server asks for initial parameters from clients at launch. Refer + to sklearn.linear_model.LogisticRegression documentation for more + information. + """""" + + n_classes = no_classes + n_features = no_features + model.classes_ = np.array([i for i in range(n_classes)]) + model.coef_ = np.zeros((n_classes, n_features)) + model_name=model.__class__.__name__ + try: + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + except Exception as e: + log(INFO, ""set_initial_params fn issue: "",e) + pass + + + +def shuffle(X: np.ndarray, y: np.ndarray) -> XY: + """"""Shuffle X and y."""""" + rng = np.random.default_rng() + idx = rng.permutation(len(X)) + return X[idx], y[idx] + + +def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: + """"""Split X and y into a number of partitions."""""" + return list( + zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) + ) + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import sys +import os +import pickle +import json +import traceback +import warnings +warnings.filterwarnings(""ignore"") + +import numpy as np +import pandas as pd +import scipy +from pandas import json_normalize +from word2number import w2n + +from river import stream + + +class incBatchPredictor(): + + def __init__(self): + self.home = os.path.dirname(os.path.abspath(__file__)) + self.configPath = os.path.join(self.home, 'production', 'Config.json') + self.configDict = {} + self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl') + self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl') + self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl') + self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl') + self.incFill = None + self.incLabelMapping = None + self.incCatEncoder = None + self.incScaler = None + self.model = None + self.targetCol = None + self.modelName = '' + self.problemType = '' + self.numFtrs = [] + self.catFtrs = [] + + def readData(self, data): + try: + if os.path.splitext(data)[1] == "".tsv"": + df=pd.read_csv(data,encoding='utf-8',sep='\\t') + elif os.path.splitext(data)[1] == "".csv"": + df=pd.read_csv(data,encoding='utf-8') + elif os.path.splitext(data)[1] == "".dat"": + df=pd.read_csv(data,encoding='utf-8') + else: + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + df = json_normalize(jsonData) + df.rename(columns=lambda x:x.strip(), inplace=True) + return df + except KeyError as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(json.dumps(output)) + except Exception as e: + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + print(json.dumps(output)) + + def readConfig(self): + with open(self.configPath, 'r', encoding= 'utf8') as f: + self.configDict = json.load(f) + self.targetCol = self.configDict['targetCol'] + if 'numFtrs' in self.configDict: + self.numFtrs = self.configDict['numFtrs'] + if 'catFtrs' in self.configDict: + self.catFtrs = self.configDict['catFtrs'] + if 'allNumCols' in self.configDict: + self.allNumCols = self.configDict['allNumCols'] + if 'allCatCols' in self.configDict: + self.allCatCols = self.configDict['allCatCols'] + if 'wordToNumCols' in self.configDict: + self.wordToNumericCols = self.configDict['wordToNumCols'] + self.emptyFtrs = self.configDict['emptyFtrs'] + self.allFtrs = self.configDict['allFtrs'] + self.modelName = self.configDict['modelName'] + self.problemType = self.configDict['problemType'] + self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl') + self.scoreParam = self.configDict['scoreParam'] + self.score = self.configDict['score'] + + def pickleLoad(self, file): + if os.path.exists(file): + with open(file, 'rb') as f: + model = pickle.load(f) + return model + else: + return None + + def s2n(self,value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan + + def convertWordToNumeric(self,dataframe,feature): + try: + dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) + return dataframe + except Exception as inst: + self.log.info(""convertWordToNumeric Failed ===>""+str(inst)) + return dataframe + + + + def loadSavedModels(self): + self.incFill = self.pickleLoad(self.incFillPath) + self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath) + self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath) + self.incScaler = self.pickleLoad(self.incScalerPath) + self.model = self.pickleLoad(self.modelPath) + + + def apply_river_model(self, x, profModel): + print(profModel.imputers) + return pd.Series(profModel.transform_one(x)) + + def apply_enc(self, x): + return pd.Series(self.incCatEncoder.transform_one(x)) + + def dataFramePreProcess(self, df): + df = df.replace(r'^\\s*$', np.NaN, regex=True) + df = df.replace('-', np.nan) + df = df.replace('?', np.nan) + columns = list(df.columns) + if self.wordToNumericCols: + for ftr in self.wordToNumericCols: + if ftr in columns: + tempDataFrame=df.copy(deep=True) + testDf = self.convertWordToNumeric(tempDataFrame,ftr) + try: + df[ftr]=testDf[ftr].astype(float) + except: + pass + columns = list(df.columns) + for empCol in self.emptyFtrs: + if empCol in columns: + df = df.drop(columns=[empCol]) + return df + + def profiler(self, df): + df = df[self.allFtrs] + df = self.dataFramePreProcess(df) + if 'num_fill' in self.configDict: + if self.configDict['num_fill'] == 'drop': + df = df.dropna(axis = 0, subset=self.allNumCols) + elif self.configDict['num_fill'] == 'zero': + df[self.numFtrs] = df[self.numFtrs].fillna(value = 0.0) + else: + for x in self.numFtrs: + if x == self.targetCol: + continue + df[x] = df[x].fillna(value = self.configDict['num_fill'][x]) + if 'cat_fill' in self.configDict: + if self.configDict['cat_fill'] == 'drop': + df = df.dropna(axis = 0, subset=self.allCatCols) + elif self.configDict['cat_fill'] == 'zero': + df[self.catFtrs] = df[self.catFtrs].fillna(value = 0.0) + else: + for x in self.catFtrs: + if x == self.targetCol: + continue + df[x] = df[x].fillna(value = self.configDict['cat_fill'][x]) + + if self.incCatEncoder: + transformed_data = df[self.catFtrs].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns') + df[self.catFtrs] = transformed_data + + if self.incScaler: + df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs]) + + return df + + def trainedModel(self,X): + testStream = stream.iter_pandas(X) + preds = [] + if self.problemType.lower() == 'regression': + for xi,yi in testStream: + try: + pred = self.model.predict_proba_one(xi) + preds.append(pred) + except: + pred = self.model.predict_one(xi) + preds.append(pred) + preds = pd.DataFrame(preds) + return preds + elif self.problemType.lower() == 'classification': + for xi,yi in testStream: + try: + pred = self.model.predict_proba_one(xi) + preds.append(pred) + except: + continue + out = pd.DataFrame(preds) + return out + + + def apply_output_format(self,df,modeloutput): + if self.problemType.lower() == 'regression': + df['prediction'] = modeloutput[0] + df['prediction'] = df['prediction'].round(2) + elif self.problemType.lower() == 'classification': + modeloutput = round(modeloutput,2) + if modeloutput.shape[1] == 1: + df['prediction'] = modeloutput + df['prediction'] = df['prediction'].astype(int) + else: + try: + predCol = modeloutput.idxmax(axis=1) + df['prediction'] = predCol.astype(int) + df['prediction'] = self.incLabelMapping.inverse_transform(df['prediction']) + except: + df['prediction'] = modeloutput.idxmax(axis=1) + df['probability'] = modeloutput.max(axis=1).round(2) + modeloutput.columns = modeloutput.columns.astype(int) + modeloutput.columns = self.incLabelMapping.inverse_transform(list(modeloutput.columns)) + df['remarks'] = modeloutput.apply(lambda x: x.to_json(), axis=1) + outputjson = df.to_json(orient='records') +" +" outputjson = {""status"":""SUCCESS"",""data"":json.loads(outputjson)} + return(json.dumps(outputjson)) + + + + def predict(self,data): + try: + df = self.readData(data) + dfOrg = df.copy() + self.readConfig() + if len(self.configDict)!=0: + self.loadSavedModels() + df = self.profiler(df) + modeloutput = self.trainedModel(df) + dfOrg = dfOrg[self.allFtrs] + output = self.apply_output_format(dfOrg, modeloutput) + else: + pass + except Exception as e: + print(traceback.format_exc()) + output = {""status"":""FAIL"",""message"":str(e).strip('""')} + return output + +if __name__ == ""__main__"": + incBPobj = incBatchPredictor() + output = incBPobj.predict(sys.argv[1]) + print(""predictions:"",output) + +import sys +import os +import pickle +import json +import timeit +import warnings +import traceback +import logging +from pathlib import Path +warnings.filterwarnings(""ignore"") + + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from pandas import json_normalize +import shutil +from word2number import w2n +from pytz import timezone +import datetime + +from sklearn.model_selection import train_test_split +from sklearn.metrics import roc_auc_score, accuracy_score, r2_score,mean_absolute_error, mean_squared_error, recall_score, precision_score, f1_score + +from river import stream + +class incBatchLearner(): + + def __init__(self): + self.home = os.path.dirname(os.path.abspath(__file__)) + self.configPath = os.path.join(self.home, 'production', 'Config.json') + self.configDict = {} + self.updConfigDict = None + self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl') + self.incOutlierRemPath = os.path.join(self.home, 'production', 'profiler', 'incOutlierRem.pkl') + self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl') + self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl') + self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl') + self.testPath = os.path.join(self.home, 'data', 'test.csv') + self.modelName = '' + self.incFill = None + self.incLabelMapping = None + self.incCatEncoder = None + self.incScaler = None + self.incOutlierRem = None + self.model = None + self.targetCol = None + self.numFtrs = [] + self.catFtrs = [] + self.allFtrs = [] + self.logFileName=os.path.join(self.home,'log','model_training_logs.log') + filehandler = logging.FileHandler(self.logFileName, 'a','utf-8') + formatter = logging.Formatter('%(message)s') + filehandler.setFormatter(formatter) + self.log = logging.getLogger('eion') + self.log.propagate = False + self.log.addHandler(filehandler) + self.log.setLevel(logging.INFO) + + + def readData(self, data, isTest = False): + if not isTest: + self.log.info('New Data Path: '+str(data)) + else: + self.log.info('Test Data Path: '+str(data)) + startTime = timeit.default_timer() + if os.path.splitext(data)[1] == "".tsv"": + df=pd.read_csv(data,encoding='utf-8',sep='\\t') + elif os.path.splitext(data)[1] == "".csv"": + df=pd.read_csv(data,encoding='utf-8') + elif os.path.splitext(data)[1] == "".dat"": + df=pd.read_csv(data,encoding='utf-8') + else: + if os.path.splitext(data)[1] == "".json"": + with open(data,'r',encoding='utf-8') as f: + jsonData = json.load(f) + else: + jsonData = json.loads(data) + df = json_normalize(jsonData) + dataLoadTime = timeit.default_timer() - startTime + self.log.info('\\nData Load time(sec) :'+str(dataLoadTime)) + self.log.info('\\n First ten rows of new data') + self.log.info(df.head(10)) + self.log.info('Data Frame shape: '+str(df.shape)) + df.rename(columns=lambda x:x.strip(), inplace=True) + return df + + def readConfig(self): + with open(self.configPath, 'r', encoding= 'utf8') as f: + self.configDict = json.load(f) + self.configDict['partialFit']+=1 + self.log.info('************* Partial Fit '+str(self.configDict['partialFit'])+' *************** \\n') + msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone(""Asia/Kolkata"")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') + self.log.info(msg) + self.targetCol = self.configDict['targetCol'] + if 'numFtrs' in self.configDict: + self.numFtrs = self.configDict['numFtrs'] + if 'catFtrs' in self.configDict: + self.catFtrs = self.configDict['catFtrs'] + if 'allNumCols' in self.configDict: + self.allNumCols = self.configDict['allNumCols'] + if 'allCatCols' in self.configDict: + self.allCatCols = self.configDict['allCatCols'] + if 'encCols' in self.configDict: + self.encCols = self.configDict['encCols'] + if 'wordToNumCols' in self.configDict: + self.wordToNumericCols = self.configDict['wordToNumCols'] + self.emptyFtrs = self.configDict['emptyFtrs'] + if 'encTarget' in self.configDict: + self.encTarget = self.configDict['encTarget'] + if 'noOfClasses' in self.configDict: + self.allClasses = list(range(int(self.configDict['noOfClasses']))) + self.misval_ratio = self.configDict['misval_ratio'] + self.allFtrs = self.configDict['allFtrs'] + self.modelName = self.configDict['modelName'] + self.problemType = self.configDict['problemType'] + self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl') + self.scoreParam = self.configDict['scoreParam'] + self.score = self.configDict['score'] + + def pickleLoad(self, file, filename): + if os.path.exists(file): + with open(file, 'rb') as f: + model = pickle.load(f) + file_size = os.path.getsize(file) + self.log.info(str(filename)+"" size is :""+str(file_size)+""bytes"") + return model + else: + return None + + def s2n(self,value): + try: + x=eval(value) + return x + except: + try: + return w2n.word_to_num(value) + except: + return np.nan + + def convertWordToNumeric(self,dataframe,feature): + try: + dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) + return dataframe + except Exception as inst: + self.log.info(""convertWordToNumeric Failed ===>""+str(inst)) + return dataframe + + + def pickleDump(self, model, path): + if model is not None: + with open(path, 'wb') as f: + pickle.dump(model, f) + + + def splitTrainTest(self,X,y): + if self.problemType.lower() == 'regression': + xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True) + else: + try: + xtrain,xtest,ytrain,ytest=train_test_split(X,y,stratify=y,test_size=0.2,shuffle=True) + except: + xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True) + return xtrain,xtest,ytrain,ytest + + def loadSavedModels(self): + self.incFill = self.pickleLoad(self.incFillPath, 'Online Missing Value Filler') + self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath, 'Online Label Encoder') + self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath, 'Online Categorical Encoder') + self.incScaler = self.pickleLoad(self.incScalerPath, 'Online Scaler') + self.incOutlierRem = self.pickleLoad(self.incOutlierRemPath, 'Online Outlier Detector') + self.model = self.pickleLoad(self.modelPath, str(os.path.basename(self.modelPath))[:-4]) + self.log.info('\\nData Profiler and ML models loaded in Memory') + + def saveModels(self): + os.makedirs(os.path.join(self.home, 'production', 'profiler')) + os.makedirs(os.path.join(self.home, 'production', 'model')) + if type(self.configDict['num_fill']) == type({}) or type(self.configDict['cat_fill']) == type({}): + self.pickleDump(self.incFill, self.incFillPath) + self.pickleDump(self.incLabelMapping, self.incLabelMappingPath) + self.pickleDump(self.incCatEncoder, self.incCatEncoderPath) + self.pickleDump(self.incScaler, self.incScalerPath) + self.pickleDump(self.incOutlierRem, self.incOutlierRemPath) + self.pickleDump(self.model, self.modelPath) + self.log.info('Models saved into production') + def saveConfig(self): + with open(self.configPath, 'w', encoding= 'utf8') as f: + json.dump(self.updConfigDict, f, ensure_ascii=False) + + def apply_river_model(self, x, profModel, isTest): + if not isTest: + profModel.learn_one(x) + return pd.Series(profModel.transform_one(x)) + + def apply_enc(self, x, isTest): + if not isTest: + y = x[self.encTarget] + self.incCatEncoder.learn_one(x, y) + return pd.Series(self.incCatEncoder.transform_one(x)) + + def apply_od_pipe(self, x): + score = self.incOutlierRem.score_one(x) + is_anomaly = self.incOutlierRem.classify(score) + self.incOutlierRem.learn_one(x) + return is_anomaly + + + def dataFramePreProcess(self, df): + df = df.replace('-', np.NaN) + df = df.replace('?', np.NaN) + df = df.replace(r'^\\s*$', np.NaN, regex=True) + columns = list(df.columns) + if self.wordToNumericCols: + for ftr in self.wordToNumericCols: + if ftr in columns: + self.log.info('Converting '+ftr+' to numeric type...') + tempDataFrame=df.copy(deep=True) + testDf = self.convertWordToNumeric(tempDataFrame,ftr) + try: + df[ftr]=testDf[ftr].astype(float) + except: + pass + columns = list(df.columns) + for empCol in self.emptyFtrs: + if empCol in columns: + df = df.drop(columns=[empCol]) + columns = list(df.columns) + self.log.info( 'Detecting Missing Values') + nonNAArray=[] + numOfRows = df.shape[0] + for i in columns: + numNa=df.loc[(pd.isna(df[i])),i ].shape[0] + nonNAArray.append(tuple([i,numNa])) + self.missingCols = [] + self.emptyCols = [] + for item in nonNAArray: + numofMissingVals = item[1] + if(numofMissingVals !=0): + self.log.info('-------> Feature '+str(item[0])) + self.log.info('----------> Number of Empty Rows '+str(numofMissingVals)) + self.missingCols.append(item[0]) + + if(numofMissingVals >= numOfRows * self.misval_ratio): + self.log.info('----------> Empty: Yes') + self.log.info('----------> Permitted Rows: '+str(int(numOfRows * self.misval_ratio))) + self.emptyCols.append(item[0]) + if(len(self.missingCols) !=0): + self.log.info( '----------- Detecting for Missing Values End -----------\\n') + else: + self.log.info( '-------> Missing Value Features :Not Any') + self.log.info( '----------- Detecting for Missing Values End -----------\\n') + return df + + + def profiler(self, df, isTest=False): + if not isTest: + self.log.info('Starting" +"profiling of New Training Data') + else: + self.log.info('Starting profiling of Testing Data') + startTime = timeit.default_timer() + df = self.dataFramePreProcess(df) + if 'num_fill' in self.configDict: + if self.configDict['num_fill'] == 'drop': + df = df.dropna(axis = 0, subset=self.allNumCols) + elif self.configDict['num_fill'] == 'zero': + df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0) + else: + df[self.allNumCols]= df[self.allNumCols].apply(pd.to_numeric) + df = df.astype(object).where(df.notna(), None) #river expects nan values to be None + df[self.allNumCols]= df[self.allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['num_fill'], isTest), axis='columns') + if not isTest: + self.updConfigDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in self.allNumCols} + if 'cat_fill' in self.configDict: + if self.configDict['cat_fill'] == 'drop': + df = df.dropna(axis = 0, subset=self.allCatCols) + elif self.configDict['cat_fill'] == 'zero': + df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0) + else: + df = df.astype(object).where(df.notna(), None) + df[self.allCatCols]= df[self.allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(),self.incFill['cat_fill'], isTest), axis='columns') + if not isTest: + self.updConfigDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in self.allCatCols} + if not isTest: + self.log.info('Missing value profiler model updated') + + + if self.incLabelMapping: + uq_classes = df[self.targetCol].unique() + le_classes = list(self.incLabelMapping.classes_) + uq_classes = [type(le_classes[0])(x) for x in uq_classes] + unseen_classes = set(uq_classes) - set(le_classes) + self.log.info('Existing classes: '+str(le_classes)) + if len(unseen_classes)>0: + self.log.info('New unseen classes: '+str(unseen_classes)) + le_classes.extend(unseen_classes) + from sklearn.preprocessing import LabelEncoder + self.incLabelMapping = LabelEncoder() + self.incLabelMapping.fit(le_classes) + self.log.info(self.incLabelMapping.classes_) + self.log.info('Label encoder refitted with new unseen classes') + df[self.targetCol] = df[self.targetCol].apply(str) + df[self.targetCol] = self.incLabelMapping.transform(df[self.targetCol]) + if not isTest: + self.log.info('Target column label encoding is done') + + if self.incCatEncoder: + if self.problemType.lower() == 'regression': + from sklearn.preprocessing import StandardScaler + sc = StandardScaler() + self.encTarget = 'scaledTarget' + df['scaledTarget'] = sc.fit_transform(df[self.targetCol].to_numpy().reshape(-1,1)) + transformed_data = df[self.encCols].apply(lambda row: self.apply_enc(row.to_dict(), isTest), axis='columns') + if self.targetCol in transformed_data.columns: + transformed_data.drop(self.targetCol, inplace=True, axis = 1) + df[self.catFtrs] = transformed_data + if not isTest: + self.updConfigDict['catEnc'] = [] + if len(self.catFtrs) == 1: + col = self.catFtrs[0] + self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()}) + else: + for i, col in enumerate(self.catFtrs): + if i==0: + no = '' + else: + no = str(i) + self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()}) + self.log.info('Categorical encoding is done and profiler model updated') + + if self.incScaler: + if not isTest: + self.incScaler = self.incScaler.partial_fit(df[self.numFtrs]) + self.log.info('Numerical features scaled and profiler model updated') + df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs]) + + if self.incOutlierRem and not isTest: + df = df[df[self.numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)] + df.reset_index(drop=True, inplace=True) + self.log.info('Outliers removed and profiler model updated') + + if not isTest: + self.log.info('Check config file in production folder for updated profiler values') + profilerTime = timeit.default_timer() - startTime + self.log.info('\\nProfiling time(sec) :'+str(profilerTime)) + return df + + + + def riverTrain(self, X, Y): + trainStream = stream.iter_pandas(X, Y) + for i, (xi, yi) in enumerate(trainStream): + if yi!=None: + self.model.learn_one(xi, yi) + + def riverEvaluate(self, xtest): + testStream = stream.iter_pandas(xtest) + preds = [] + for xi,yi in testStream: + pred = self.model.predict_one(xi) + preds.append(pred) + return preds + + def trainModel(self,df): + startTime = timeit.default_timer() + X = df[self.allFtrs] + Y = df[self.targetCol] + try: + self.riverTrain(X,Y) + trainTime = timeit.default_timer() - startTime + self.log.info('\\nModel Training time(sec) :'+str(trainTime)) + self.log.info(self.modelName+' model updated') + self.log.info('First fit model params are '+str(self.configDict['modelParams'])) + except Exception as e: + raise e + + + + def archiveModels(self): + source = os.path.join(self.home, 'production') + archivePath = os.path.join(self.home,'archives') + if os.path.isdir(archivePath): + NoOfArchives = sum(os.path.isdir(os.path.join(self.home,'archives',str(i))) for i in os.listdir(archivePath)) + destination = os.path.join(self.home,'archives',str(NoOfArchives+1)) + else: + destination = os.path.join(archivePath,'1') + if not os.path.exists(destination): + os.makedirs(destination) + allfiles = os.listdir(source) + for f in allfiles: + src_path = os.path.join(source, f) + dst_path = os.path.join(destination, f) + shutil.move(src_path, dst_path) + self.log.info('Previous production models archived') + + def get_score(self,metric,actual,predict): + if 'accuracy' in str(metric).lower(): + score = accuracy_score(actual,predict) + score = score*100 + elif 'recall' in str(metric).lower(): + score = recall_score(actual,predict,average='macro') + score = score*100 + elif 'precision' in str(metric).lower(): + score = precision_score(actual,predict,average='macro') + score = score*100 + elif 'f1_score' in str(metric).lower(): + score = f1_score(actual,predict, average='macro') + score = score*100 + elif 'roc_auc' in str(metric).lower(): + try: + score = roc_auc_score(actual,predict,average=""macro"") + except: + try: + actual = pd.get_dummies(actual) + predict = pd.get_dummies(predict) + score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr') + except: + score = 0 + score = score*100 + elif ('mse' in str(metric).lower()) or ('neg_mean_squared_error' in str(metric).lower()): + score = mean_squared_error(actual,predict) + elif ('rmse' in str(metric).lower()) or ('neg_root_mean_squared_error' in str(metric).lower()): + score=mean_squared_error(actual,predict,squared=False) + elif ('mae' in str(metric).lower()) or ('neg_mean_absolute_error' in str(metric).lower()): + score=mean_absolute_error(actual,predict) + elif 'r2' in str(metric).lower(): + score=r2_score(actual,predict)*100 + return round(score,2) + + def checkColumns(self, df): + self.log.info('Checking DataColumns in new data') + dfCols = list(df.columns) + allCols = self.allFtrs.copy() + allCols.append(self.targetCol) + missingCols = [] + for col in allCols: + if col not in dfCols: + missingCols.append(col) + if len(missingCols)>0: + raise Exception('DataFrame is missing columns: '+str(missingCols)) + else: + self.log.info('All required columns are present: '+str(list(dfCols)[:500])) + + def plotMetric(self): + y = self.configDict['metricList'] + fedrows = self.configDict['trainRowsList'] + fig = plt.figure() + ax = fig.gca() + if self.configDict['problemType'] == 'classification': + ax.set_yticks(np.arange(0, 110, 10)) + plt.ylim(ymin=0) + if self.configDict['problemType'] == 'regression': + minMet = min(y) + maxMet = max(y) + plt.ylim(minMet - 10, maxMet+10) + plt.plot(y) + plt.ylabel(self.scoreParam) + plt.xlabel('Partial Fits') + plt.title(str(self.scoreParam)+' over training rows') + if type(fedrows[0])!=type(''): + fedrows = [str(x) for x in fedrows] + x = list(range(len(fedrows))) + for i in range(len(fedrows)): + plt.annotate(fedrows[i], (x[i], y[i] + 5)) + if self.configDict['problemType'] == 'classification': + plt.annotate(round(y[i],1), (x[i], y[i] - 3)) + plt.grid() + plt.savefig(os.path.join(self.home, 'production','metric')) + return + + def updateLearning(self,data): + try: + self.readConfig() + self.updConfigDict = self.configDict.copy() + df = self.readData(data) + self.checkColumns(df) + self.loadSavedModels() + X = df[self.allFtrs] + y = df[self.targetCol] + xtrain,xtest,ytrain,ytest = self.splitTrainTest(X,y) + dftrain = pd.concat((xtrain, ytrain), axis = 1) + dftest = pd.concat((xtest, ytest), axis = 1) + dftrain = self.profiler(dftrain) + dftest = self.profiler(dftest, isTest = True) + xtest = dftest[self.allFtrs] + ytest = dftest[self.targetCol] + self.trainModel(dftrain) + preds = self.riverEvaluate(xtest) + score = self.get_score(self.scoreParam, ytest, preds) + self.updConfigDict['score'] = score + self.log.info('Previous '+self.scoreParam+': '+str(self.configDict['score'])) + self.log.info('Current '+self.scoreParam+': '+str(self.updConfigDict['score'])) + self.configDict['trainRowsList'].append(self.configDict['trainRowsList'][-1]+xtrain.shape[0]) + self.log.info('Number of data points trained on so far: '+str(self.configDict['trainRowsList'][-1])) + self.configDict['metricList'].append(self.updConfigDict['score']) + self.archiveModels() + self.plotMetric() + self.saveModels() + self.saveConfig() + msg = self.scoreParam+': Previous:'+str(self.configDict['score'])+' Current:'+ str(self.updConfigDict['score']) + output = {""status"":""SUCCESS"",""Msg"":msg} + self.log.info(str(output)) + except Exception as e: + print(traceback.format_exc()) + self.log.info('Partial Fit Failed '+str(traceback.format_" +"exc())) + if self.updConfigDict != None: + self.saveConfig() + output = {""status"":""FAIL"",""Msg"":str(e).strip('""')} + return json.dumps(output) + +if __name__ == ""__main__"": + incBLObj = incBatchLearner() + output = incBLObj.updateLearning(sys.argv[1]) + print(""aion_learner_status:"",output) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import io +import json +import logging +import pandas as pd +import sys +import numpy as np +from pathlib import Path +from word2number import w2n +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import OrdinalEncoder +from sklearn.preprocessing import OneHotEncoder +from sklearn.impute import SimpleImputer, KNNImputer +from sklearn.pipeline import Pipeline, FeatureUnion +from sklearn.preprocessing import FunctionTransformer +from sklearn.preprocessing import MinMaxScaler,StandardScaler +from sklearn.preprocessing import PowerTransformer +from sklearn.compose import ColumnTransformer +from sklearn.base import TransformerMixin +from sklearn.ensemble import IsolationForest +from category_encoders import TargetEncoder +import scipy +try: + import transformations.data_profiler_functions as cs +except: + import data_profiler_functions as cs + +if 'AION' in sys.modules: + try: + from appbe.app_config import DEBUG_ENABLED + except: + DEBUG_ENABLED = False +else: + DEBUG_ENABLED = False +log_suffix = f'[{Path(__file__).stem}] ' + + +class profiler(): + + def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None): + if not isinstance(xtrain, pd.DataFrame): + raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type') + if xtrain.empty: + raise ValueError(f'{log_suffix}Data frame is empty') + if target and target in xtrain.columns: + self.target = xtrain[target] + xtrain.drop(target, axis=1, inplace=True) + self.target_name = target + elif ytrain: + self.target = ytrain + self.target_name = 'target' + else: + self.target = pd.Series() + self.target_name = None + self.data_path = data_path + self.encode_target = encode_target + self.label_encoder = None + self.data = xtrain + self.keep_unprocessed = keep_unprocessed + self.colm_type = {} + for colm, infer_type in zip(self.data.columns, self.data.dtypes): + self.colm_type[colm] = infer_type + self.numeric_feature = [] + self.cat_feature = [] + self.text_feature = [] + self.wordToNumericFeatures = [] + self.added_features = [] + self.pipeline = [] + self.dropped_features = {} + self.train_features_type={} + self.__update_type() + self.config = config + self.featureDict = config.get('featureDict', []) + self.output_columns = [] + self.feature_expender = [] + self.text_to_num = {} + self.force_numeric_conv = [] + if log: + self.log = log + else: + self.log = logging.getLogger('eion') + self.type_conversion = {} + + def log_dataframe(self, msg=None): + buffer = io.StringIO() + self.data.info(buf=buffer) + if msg: + log_text = f'Data frame after {msg}:' + else: + log_text = 'Data frame:' + log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t') + log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t')) + self.log.info(log_text) + + def transform(self): + if self.is_target_available(): + if self.target_name: + self.log.info(f""Target feature name: '{self.target_name}'"") + self.log.info(f""Target feature size: {len(self.target)}"") + else: + self.log.info(f""Target feature not present"") + self.log_dataframe() + print(self.data.info()) + try: + self.process() + except Exception as e: + self.log.error(e, exc_info=True) + raise + pipe = FeatureUnion(self.pipeline) + try: + if self.text_feature: + from text.textProfiler import set_pretrained_model + set_pretrained_model(pipe) + conversion_method = self.get_conversion_method() + process_data = pipe.fit_transform(self.data, y=self.target) + # save for testing + if DEBUG_ENABLED: + if isinstance(process_data, scipy.sparse.spmatrix): + process_data = process_data.toarray() + df = pd.DataFrame(process_data) + df.to_csv('debug_preprocessed.csv', index=False) + if self.text_feature and conversion_method == 'latentsemanticanalysis': + n_size = self.get_tf_idf_output_size( pipe) + dimensions = self.get_tf_idf_dimensions() + if n_size < dimensions or n_size > dimensions: + dimensions = n_size + from sklearn.decomposition import TruncatedSVD + reducer = TruncatedSVD( n_components = dimensions) + reduced_data = reducer.fit_transform( process_data[:,-n_size:]) + text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process') + pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer)) + if isinstance(process_data, scipy.sparse.spmatrix): + process_data = process_data.toarray() + process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1) + last_step = self.feature_expender.pop() + self.feature_expender.append({'feature_reducer':list(last_step.values())[0]}) + + except EOFError as e: + if ""Compressed file ended before the end-of-stream marker was reached"" in str(e): + raise EOFError('Pretrained model is not downloaded properly') + + self.update_output_features_names(pipe) + if isinstance(process_data, scipy.sparse.spmatrix): + process_data = process_data.toarray() + df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns) + + if self.is_target_available() and self.target_name: + df[self.target_name] = self.target + if self.keep_unprocessed: + df[self.keep_unprocessed] = self.data[self.keep_unprocessed] + self.log_numerical_fill() + self.log_categorical_fill() + self.log_normalization() + return df, pipe, self.label_encoder + + def log_type_conversion(self): + if self.log: + self.log.info('----------- Inspecting Features -----------') + self.log.info('----------- Type Conversion -----------') + count = 0 + for k, v in self.type_conversion.items(): + if v[0] != v[1]: + self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}') + self.log.info('Status:- |... Feature inspection done') + + def check_config(self): + removeDuplicate = self.config.get('removeDuplicate', False) + self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate) + self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio'])) + self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio'])) + self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel'])) + featureDict = self.config.get('featureDict', []) + if isinstance(featureDict, dict): + self.config['featureDict'] = [] + if isinstance(featureDict, str): + self.config['featureDict'] = [] + + def process(self): + #remove duplicate not required at the time of prediction + self.check_config() + self.remove_constant_feature() + self.remove_empty_feature(self.config['misValueRatio']) + self.remove_index_features() + self.dropna() + if self.config['removeDuplicate']: + self.drop_duplicate() + #self.check_categorical_features() + #self.string_to_numeric() + self.process_target() + self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)} + self.parse_process_step_config() + self.process_drop_fillna() + self.log_type_conversion() + self.update_num_fill_dict() + if DEBUG_ENABLED: + print(self.num_fill_method_dict) + self.update_cat_fill_dict() + self.create_pipeline() + self.text_pipeline(self.config) + self.apply_outlier() + if DEBUG_ENABLED: + self.log.info(self.process_method) + self.log.info(self.pipeline) + + def is_target_available(self): + return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target) + + def process_target(self, operation='encode', arg=None): + if self.is_target_available(): + # drop null values + self.__update_index( self.target.notna(), 'target') + if self.encode_target: + self.label_encoder = LabelEncoder() + self.target = self.label_encoder.fit_transform(self.target) + return self.label_encoder + return None + + def is_target_column(self, column): + return column == self.target_name + + def fill_default_steps(self): + + num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{})) + normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none') + for colm in self.numeric_feature: + if num_fill_method: + self.fill_missing_value_method(colm, num_fill_method.lower()) + if normalization_method: + self.fill_normalizer_method(colm, normalization_method.lower()) + + cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{})) + cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{})) + for colm in self.cat_feature: + if cat_fill_method: + self.fill_missing_value_method(colm, cat_fill_method.lower()) + if cat_encode_method: + self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True) + + def parse_process_step_config(self): + self.process_method = {} + user_provided_data_type = {} + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + user_provided_data_type[colm] = feat_conf['type'] + if user_provided_data_type: + self.update_user_provided_type(user_provided_data_type) + + self.fill_default_steps() + for feat_conf in self.featureDict: + colm = feat_conf.get('feature', '') + if not self.is_target_column(colm): + if colm in self.data.columns: + if feat_conf.get('fillMethod', None): + self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower()) + if feat_conf.get('categoryEncoding', None): + self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower()) + if feat_conf.get('normalization', None): + self.fill_normalizer_method(colm, feat_conf['normalization'].lower()) + if feat_conf.get('outlier', None): + self.fill_outlier_method(colm, feat_conf['outlier'].lower()) + if feat_conf.get('outlierOperation', None): + self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower()) + + + def get_tf_idf_dimensions(self): + dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default') + return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim] + + def get_tf_idf_output_size(self, pipe): + start_index = {} + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} + if start_index: + for" +"key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + return len(v) + return 0 + + def update_output_features_names(self, pipe): + columns = self.output_columns + start_index = {} + index_shifter = 0 + for feat_expender in self.feature_expender: + if feat_expender: + step_name = list(feat_expender.keys())[0] + for key,value in start_index.items(): + for k,v in value.items(): + index_shifter += len(v) + index = list(feat_expender.values())[0] + for transformer_step in pipe.transformer_list: + if transformer_step[1].steps[-1][0] in step_name: + start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} + #print(start_index) + if start_index: + for key,value in start_index.items(): + for k,v in value.items(): + if k == 'vectorizer': + v = [f'{x}_vect' for x in v] + self.output_columns[key:key] = v + self.added_features = [*self.added_features, *v] + + + def text_pipeline(self, conf_json): + + if self.text_feature: + from text.textProfiler import textProfiler + from text.textProfiler import textCombine + pipeList = [] + text_pipe = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", self.text_feature) + ], remainder=""drop"")), + (""text_fillNa"",SimpleImputer(strategy='constant', fill_value='')), + (""merge_text_feature"", textCombine())]) + obj = textProfiler() + pipeList = obj.textProfiler(conf_json, pipeList, self.data_path) + last_step = ""merge_text_feature"" + for pipe_elem in pipeList: + text_pipe.steps.append((pipe_elem[0], pipe_elem[1])) + last_step = pipe_elem[0] + text_transformer = ('text_process', text_pipe) + self.pipeline.append(text_transformer) + self.feature_expender.append({last_step:len(self.output_columns)}) + + def create_pipeline(self): + num_pipe = {} + for k,v in self.num_fill_method_dict.items(): + for k1,v1 in v.items(): + if k1 and k1 != 'none': + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)), + (k1, self.get_num_scaler(k1)) + ]) + else: + num_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_num_imputer(k)) + ]) + self.output_columns.extend(v1) + cat_pipe = {} + for k,v in self.cat_fill_method_dict.items(): + for k1,v1 in v.items(): + cat_pipe[f'{k}_{k1}'] = Pipeline([ + ('selector', ColumnTransformer([ + (""selector"", ""passthrough"", v1) + ], remainder=""drop"")), + (k, self.get_cat_imputer(k)), + (k1, self.get_cat_encoder(k1)) + ]) + if k1 not in ['onehotencoding']: + self.output_columns.extend(v1) + else: + self.feature_expender.append({k1:len(self.output_columns)}) + for key, pipe in num_pipe.items(): + self.pipeline.append((key, pipe)) + for key, pipe in cat_pipe.items(): + self.pipeline.append((key, pipe)) + + ""Drop: feature during training but replace with zero during prediction "" + def process_drop_fillna(self): + drop_column = [] + if 'numFill' in self.process_method.keys(): + for col, method in self.process_method['numFill'].items(): + if method == 'drop': + self.process_method['numFill'][col] = 'zero' + drop_column.append(col) + if 'catFill' in self.process_method.keys(): + for col, method in self.process_method['catFill'].items(): + if method == 'drop': + self.process_method['catFill'][col] = 'zero' + drop_column.append(col) + if drop_column: + self.data.dropna(subset=drop_column, inplace=True) + + def update_num_fill_dict(self): + self.num_fill_method_dict = {} + if 'numFill' in self.process_method.keys(): + for f in cs.supported_method['fillNa']['numeric']: + self.num_fill_method_dict[f] = {} + for en in cs.supported_method['normalization']: + self.num_fill_method_dict[f][en] = [] + for col in self.numeric_feature: + numFillDict = self.process_method.get('numFill',{}) + normalizationDict = self.process_method.get('normalization',{}) + if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''): + self.num_fill_method_dict[f][en].append(col) + if not self.num_fill_method_dict[f][en] : + del self.num_fill_method_dict[f][en] + if not self.num_fill_method_dict[f]: + del self.num_fill_method_dict[f] + + def update_cat_fill_dict(self): + self.cat_fill_method_dict = {} + if 'catFill' in self.process_method.keys(): + for f in cs.supported_method['fillNa']['categorical']: + self.cat_fill_method_dict[f] = {} + for en in cs.supported_method['categoryEncoding']: + self.cat_fill_method_dict[f][en] = [] + for col in self.cat_feature: + catFillDict = self.process_method.get('catFill',{}) + catEncoderDict = self.process_method.get('catEncoder',{}) + if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''): + self.cat_fill_method_dict[f][en].append(col) + if not self.cat_fill_method_dict[f][en] : + del self.cat_fill_method_dict[f][en] + if not self.cat_fill_method_dict[f]: + del self.cat_fill_method_dict[f] + + + def __update_type(self): + self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed)) + self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed)) + self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed)) + self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed)) + + def update_user_provided_type(self, data_types): + allowed_types = ['numerical','categorical', 'text'] + skipped_types = ['date','index'] + type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),} + mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types} + skipped_features = [k for k,v in data_types.items() if v in skipped_types] + if skipped_features: + self.keep_unprocessed.extend( skipped_features) + self.keep_unprocessed = list(set(self.keep_unprocessed)) + self.update_type(mapped_type, 'user provided data type') + + def get_type(self, as_list=False): + if as_list: + return [self.colm_type.values()] + else: + return self.colm_type + + def update_type(self, data_types={}, reason=''): + invalid_features = [x for x in data_types.keys() if x not in self.data.columns] + if invalid_features: + valid_feat = list(set(data_types.keys()) - set(invalid_features)) + valid_feat_type = {k:v for k,v in data_types if k in valid_feat} + else: + valid_feat_type = data_types + for k,v in valid_feat_type.items(): + if v != self.colm_type[k].name: + try: + self.data.astype({k:v}) + self.colm_type.update({k:self.data[k].dtype}) + self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) + except: + self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason) + if v == np.dtype('float64') and self.colm_type[k].name == 'object': + if self.check_numeric( k): + self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce') + self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) + self.force_numeric_conv.append( k) + else: + raise ValueError(f""Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%"") + self.data = self.data.astype(valid_feat_type) + self.__update_type() + + def check_numeric(self, feature): + col_values = self.data[feature].copy() + col_values = pd.to_numeric(col_values, errors='coerce') + if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): + return True + return False + + def string_to_numeric(self): + def to_number(x): + try: + return w2n.word_to_num(x) + except: + return np.nan + for col in self.text_feature: + col_values = self.data[col].copy() + col_values = pd.to_numeric(col_values, errors='coerce') + if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): + self.text_to_num[col] = 'float64' + self.wordToNumericFeatures.append(col) + if self.text_to_num: + columns = list(self.text_to_num.keys()) + self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast') + self.update_type(self.text_to_num) + self.log.info('----------- Inspecting Features -----------') + for col in self.text_feature: + self.log.info(f'-------> Feature : {col}') + if col in self.text_to_num: + self.log.info('----------> Numeric Status :Yes') + self.log.info('----------> Data Type Converting to numeric :Yes') + else: + self.log.info('----------> Numeric Status :No') + self.log.info(f'\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric') + self.log.info(f'\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}') + self.log.info('----------- Inspecting Features End -----------') + + def check_categorical_features(self): + num_data = self.data.select_dtypes(include='number') + num_data_unique = num_data.nunique() + num_to_cat_col = {} + for i, value in enumerate(num_data_unique): + if value < self.config['categoryMaxLabel']: + num_to_cat_col[num_data_unique.index[i]] = 'category' + if num_to_cat_col: + self.update_type(num_to_cat_col, 'numerical to categorical') + str_to_cat_col = {} + str_data = self.data.select_dtypes(include='object') + str_data_unique = str_data.nunique() + for i, value in enumerate(str_data_unique): + if value < self.config['categoryMaxLabel']: + str_to_cat_col[str_data_unique.index[i]] = 'category' + for colm in str_data.columns: + if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']: + str_to_cat_col[colm] = 'category' + if str_to_cat_col: + self.update_type(str_to_cat_col, 'text to categorical') + + def drop_features(self, features=[], reason='unspecified'): + if isinstance(features, str): + features = [features] + feat_to_remove = [x for x in features if x in self.data.columns] + if feat_to_remove: + self.data.drop(feat_to_remove, axis=1, inplace=True) + for feat in feat_to_remove: + self.dropped_features[feat] = reason + self.log_drop_feature(feat_to_remove, reason) + self.__update_type() + + def __update_index(self, indices, reason=''): + if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data): + if not indices.all(): + self.data = self.data[indices] + if self.is_target_available(): + self.target = self.target[indices] + self.log_update_index((indices == False).sum(), reason) + + def dropna(self): + self.data.dropna(how='all',inplace=True) + if self.is_target_available(): + self.target = self.target[self.data.index] + + def drop_duplicate(self): + index = self.data.duplicated(keep='first') + self.__update_index( ~index, reason='dup" +"licate') + + def log_drop_feature(self, columns, reason): + self.log.info(f'---------- Dropping {reason} features ----------') + self.log.info(f'\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found') + self.log.info(f'-------> Drop Features: {columns}') + self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}') + + def log_update_index(self,count, reason): + if count: + if reason == 'target': + self.log.info('-------> Null Target Rows Drop:') + self.log.info(f'-------> Dropped rows count: {count}') + elif reason == 'duplicate': + self.log.info('-------> Duplicate Rows Drop:') + self.log.info(f'-------> Dropped rows count: {count}') + elif reason == 'outlier': + self.log.info(f'-------> Dropped rows count: {count}') + self.log.info('Status:- |... Outlier treatment done') + self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}') + + def log_normalization(self): + if self.process_method.get('normalization', None): + self.log.info(f'\\nStatus:- !... Normalization treatment done') + for method in cs.supported_method['normalization']: + cols = [] + for col, m in self.process_method['normalization'].items(): + if m == method: + cols.append(col) + if cols and method != 'none': + self.log.info(f'Running {method} on features: {cols}') + + def log_numerical_fill(self): + if self.process_method.get('numFill', None): + self.log.info(f'\\nStatus:- !... Fillna for numeric feature done') + for method in cs.supported_method['fillNa']['numeric']: + cols = [] + for col, m in self.process_method['numFill'].items(): + if m == method: + cols.append(col) + if cols: + self.log.info(f'-------> Running {method} on features: {cols}') + + def log_categorical_fill(self): + if self.process_method.get('catFill', None): + self.log.info(f'\\nStatus:- !... FillNa for categorical feature done') + for method in cs.supported_method['fillNa']['categorical']: + cols = [] + for col, m in self.process_method['catFill'].items(): + if m == method: + cols.append(col) + if cols: + self.log.info(f'-------> Running {method} on features: {cols}') + + + def remove_constant_feature(self): + unique_values = self.data.nunique() + constant_features = [] + for i, value in enumerate(unique_values): + if value == 1: + constant_features.append(unique_values.index[i]) + if constant_features: + self.drop_features(constant_features, ""constant"") + + def remove_empty_feature(self, misval_ratio=1.0): + missing_ratio = self.data.isnull().sum() / len(self.data) + missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)} + empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio] + if empty_features: + self.drop_features(empty_features, ""empty"") + + def remove_index_features(self): + index_feature = [] + + for feat in self.numeric_feature: + if self.data[feat].nunique() == len(self.data): + #if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)): + # index feature can be time based + count = (self.data[feat] - self.data[feat].shift() == 1).sum() + if len(self.data) - count == 1: + index_feature.append(feat) + self.drop_features(index_feature, ""index"") + + def fill_missing_value_method(self, colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['fillNa']['numeric']: + if 'numFill' not in self.process_method.keys(): + self.process_method['numFill'] = {} + if method == 'na' and self.process_method['numFill'].get(colm, None): + pass # don't overwrite + else: + self.process_method['numFill'][colm] = method + if colm in self.cat_feature: + if method in cs.supported_method['fillNa']['categorical']: + if 'catFill' not in self.process_method.keys(): + self.process_method['catFill'] = {} + if method == 'na' and self.process_method['catFill'].get(colm, None): + pass + else: + self.process_method['catFill'][colm] = method + + def check_encoding_method(self, method, colm,default=False): + if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]): + method = cs.target_encoding_method_change[method.lower()] + if default: + self.log.info(f""Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present"") + return method + + def fill_encoder_value_method(self,colm, method, default=False): + if colm in self.cat_feature: + if method.lower() in cs.supported_method['categoryEncoding']: + if 'catEncoder' not in self.process_method.keys(): + self.process_method['catEncoder'] = {} + if method == 'na' and self.process_method['catEncoder'].get(colm, None): + pass + else: + self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default) + else: + self.log.info(f""-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}"") + + def fill_normalizer_method(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['normalization']: + if 'normalization' not in self.process_method.keys(): + self.process_method['normalization'] = {} + if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None): + pass + else: + self.process_method['normalization'][colm] = method + else: + self.log.info(f""-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}"") + + def apply_outlier(self): + inlier_indice = np.array([True] * len(self.data)) + if self.process_method.get('outlier', None): + self.log.info('-------> Feature wise outlier detection:') + for k,v in self.process_method['outlier'].items(): + if k in self.numeric_feature: + if v == 'iqr': + index = cs.findiqrOutlier(self.data[k]) + elif v == 'zscore': + index = cs.findzscoreOutlier(self.data[k]) + elif v == 'disable': + index = None + if k in self.process_method['outlierOperation'].keys(): + if self.process_method['outlierOperation'][k] == 'dropdata': + inlier_indice = np.logical_and(inlier_indice, index) + elif self.process_method['outlierOperation'][k] == 'average': + mean = self.data[k].mean() + index = ~index + self.data.loc[index,[k]] = mean + self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}') + elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable': + self.log.info(f'-------> Total outliers in ""{k}"": {(~index).sum()}') + if self.config.get('outlierDetection',None): + if self.config['outlierDetection'].get('IsolationForest','False') == 'True': + if self.numeric_feature: + index = cs.findiforestOutlier(self.data[self.numeric_feature]) + inlier_indice = np.logical_and(inlier_indice, index) + self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):') + if inlier_indice.sum() != len(self.data): + self.__update_index(inlier_indice, 'outlier') + + def fill_outlier_method(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['outlier_column_wise']: + if 'outlier' not in self.process_method.keys(): + self.process_method['outlier'] = {} + if method not in ['Disable', 'na']: + self.process_method['outlier'][colm] = method + else: + self.log.info(f""-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}"") + + def fill_outlier_process(self,colm, method): + if colm in self.numeric_feature: + if method in cs.supported_method['outlierOperation']: + if 'outlierOperation' not in self.process_method.keys(): + self.process_method['outlierOperation'] = {} + self.process_method['outlierOperation'][colm] = method + else: + self.log.info(f""-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}"") + + def get_cat_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_cat_encoder(self,method): + if method == 'labelencoding': + return OrdinalEncoder() + elif method == 'onehotencoding': + return OneHotEncoder(sparse=False,handle_unknown=""ignore"") + elif method == 'targetencoding': + if not self.is_target_available(): + raise ValueError('Can not apply Target Encoding when target feature is not present') + return TargetEncoder() + + def get_num_imputer(self,method): + if method == 'mode': + return SimpleImputer(strategy='most_frequent') + elif method == 'mean': + return SimpleImputer(strategy='mean') + elif method == 'median': + return SimpleImputer(strategy='median') + elif method == 'knnimputer': + return KNNImputer() + elif method == 'zero': + return SimpleImputer(strategy='constant', fill_value=0) + + def get_num_scaler(self,method): + if method == 'minmax': + return MinMaxScaler() + elif method == 'standardscaler': + return StandardScaler() + elif method == 'lognormal': + return PowerTransformer(method='yeo-johnson', standardize=False) + + def recommenderStartProfiler(self,modelFeatures): + return cs.recommenderStartProfiler(self,modelFeatures) + + def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): + return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation) + + def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): + return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2) + + def get_conversion_method(self): + return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower() + +def set_features(features,profiler=None): + return cs.set_features(features,profiler) + + + import os +import sys +import numpy as np +import scipy +import pandas as pd +from pathlib import Path + +default_config = { + 'misValueRatio': '1.0', + 'numericFeatureRatio': '1.0', + 'categoryMaxLabel': '20', + 'str_to_cat_len_max': 10 +} + +target_encoding_method_change = {'targetencoding': 'labelencoding'} + +supported_method = { + 'fillNa': + { + 'categorical' : ['mode','zero','na'], + 'numeric' : ['median','mean','knnimputer','zero','drop','na'], + }, + 'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'], + 'normalization': ['standardscaler','minmax','lognormal', 'na','none'], + 'outlier_column_wise': ['iqr','zscore', 'disable', 'na'], + 'outlierOperation': ['dropdata', 'average', 'nochange'] + } + +def findiqrOutlier(df): + Q1 = df.quantile(0.25) + Q3 = df.quantile(0.75) + IQR = Q3 - Q1 + index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))) + return index + +def findzscoreOutlier(df): + z = np.abs(scipy.stats.zscore(df)) + index = (z < 3) + return index + +def findiforestOutlier(df): + from sklearn.ensemble import IsolationForest + isolation_forest = IsolationForest(n_estimators=100) + isolation_forest.fit(df) + y_pred_train = isolation_forest.predict(df) + return y_pred_train == 1 + +def get_one_true_option(d, default_value=None): + if isinstance(d, dict): + for k,v in d.items(): + if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): + return k + return default_value + +def get_boolean(value): + if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): + return True + else: + return False + +def recommenderStartProfiler(self,modelFeatures): + try: + self.log.info('----------> FillNA:0') + self.data = self.data.fillna(value=0) + self.log.info('Status:- !... Missing value treatment done') + self.log.info('----------> Remove Empty Row') + self.data = self.data.dropna(axis=0,how='all') + self.log.info('Status:- !... Empty feature treatment done') + userId,itemId,rating = modelFeatures" +".split(',') + self.data[itemId] = self.data[itemId].astype(np.int32) + self.data[userId] = self.data[userId].astype(np.int32) + self.data[rating] = self.data[rating].astype(np.float32) + return self.data + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + return(self.data) + +def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): + try: + dataset_directory = Path(folderlocation) + dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name'] + tfrecord_directory = Path(deployLocation)/'Video_TFRecord' + from savp import PreprocessSAVP + import csv + csvfile = open(dataset_csv_file, newline='') + csv_reader = csv.DictReader(csvfile) + PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory) + dataColumns = list(self.data.columns) + VideoProcessing = True + return dataColumns,VideoProcessing,tfrecord_directory + except Exception as inst: + self.log.info(""Error: dataProfiler failed ""+str(inst)) + +def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): + import os + try: + features = [doc_col_1, doc_col_2] + pipe = None + dataColumns = list(self.data.columns) + self.numofCols = self.data.shape[1] + self.numOfRows = self.data.shape[0] + from transformations.textProfiler import textProfiler + + self.log.info('-------> Execute Fill NA With Empty String') + self.data = self.data.fillna(value="" "") + self.log.info('Status:- |... Missing value treatment done') + self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1]) + self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2]) + self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2) + self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) + from tensorflow.keras.preprocessing.text import Tokenizer + pipe = Tokenizer() + pipe.fit_on_texts(self.data['text'].values) + self.log.info('-------> Tokenizer: Fit on Concatenate Field') + self.log.info('Status:- |... Tokenizer the text') + self.data[doc_col_1] = self.data[doc_col_1].astype(str) + self.data[doc_col_1] = self.data[doc_col_1].astype(str) + return (self.data, pipe, self.target_name, features) + except Exception as inst: + self.log.info(""StartProfiler failed "" + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + +def set_features(features,profiler=None): + if profiler: + features = [x for x in features if x not in profiler.added_features] + return features + profiler.text_feature + return features # -*- coding: utf-8 -*- +"""""" +Created on Wed Jun 15 14:36:11 2022 + +@author: @aionteam +"""""" +import flwr +import flwr as fl +import tensorflow as tf +from typing import Any, Callable, Dict, List, Optional, Tuple +import utils +from sklearn.metrics import log_loss +from sklearn.linear_model import LogisticRegression +from sklearn.linear_model import LinearRegression +from sklearn.metrics import mean_squared_error, mean_absolute_error,r2_score +from typing import Dict +import numpy as np +import logging +import os +# import sys +from flwr.common.logger import log +from logging import INFO +import pickle as pkl +from flwr.server.client_proxy import ClientProxy +import dl_model +from sklearn.preprocessing import StandardScaler +import pandas as pd +## Below import can be used when aion specific grpc communication used. +# from aionflgrpcserver import aionflgrpcserver + +# Make TensorFlow logs less verbose +os.environ[""TF_CPP_MIN_LOG_LEVEL""] = ""3"" +os.environ[""GRPC_VERBOSITY""] = ""debug"" + +# """""" AION Federated Learning Server. Geting weights from clients, aggregate the weights by FedAvg algorithm and update the client model."""""" +class flserver(): + def __init__(self,df,confdata): + + self.log = logging.getLogger('AION') + ## Below params will be used later + self.confparams=confdata + self.df=df + self.fl_round=0 + print(""Inside flserver init func"") + + + ## Flower server number of rounds for fl model update (clients-server) + def fit_round(self,rnd: int) -> Dict: + """"""Send round number to client."""""" + self.fl_round=rnd + log(INFO, ""==========================="") + self.log.info(""federated learning round: ""+str(rnd)) + log(INFO, ""federated learning round: %s "",str(rnd)) + log(INFO, ""==========================="") + # print(f""federated learning round: {rnd}"") + return {""rnd"": rnd} + + def fit_config(self,rnd: int) -> Dict: + """"""Send round number to client."""""" + self.round_count = rnd + log(INFO, ""==========================="") + log(INFO, ""Starting round %s ..."",str(rnd)) + log(INFO, ""==========================="") + model_hyperparams = self.confparams[""model_hyperparams""] + batch_size = model_hyperparams[""batch_size""] + local_epochs = model_hyperparams[""epochs""] + config = { + ""batch_size"": int(batch_size), + # ""local_epochs"": 1 if rnd < 2 else 2, + ""local_epochs"": int(local_epochs), + ""rnd"": rnd, + } + return config + + def evaluate_config(self, rnd: int): + model_hyperparams = self.confparams[""model_hyperparams""] + val_batch_size = model_hyperparams[""batch_size""] + # val_steps = 5 if rnd < 4 else 10 + return {""val_batch_size"": int(val_batch_size)} + + ## Loading configuration parameters + def configload(self,confparams): + try: + data=confparams + server_ip=str(data[""server_IP""]) + server_port=str(data[""server_port""]) + model_name=str(data[""model_name""]) + num_clients=int(data[""min_available_clients""]) + min_fit_clients=int(data[""min_fit_clients""]) + num_train_round=int(data[""fl_round""]) + data_location=str(data[""data_location""]) + model_params=data[""model_hyperparams""] + problem_type=data[""problem_type""] + server_address=f""{server_ip}:{server_port}"" + # model_location=str(data[""model_store""]) + model_version=str(data[""version""]) + selected_feature=data[""selected_feature""] + if (type(selected_feature) is str): + selected_feature=selected_feature.split(',') + target_feature=data[""target_feature""] + evaluation_required=data[""evaluation_required""] + self.log.info(""Federated Learning ""+str(server_address)) + except Exception as e: + log(INFO, ""Reading server config file issue. Err.Msg: %s "",str(e)) + return server_address,model_name,num_clients,min_fit_clients,num_train_round,data_location,model_params,problem_type,model_version,selected_feature,target_feature,evaluation_required + + ## Save the final model + def model_save(self,model,model_name,problem_type,version): + + cwd = os.path.abspath(os.path.dirname(__file__)) + model_location=os.path.join(cwd, 'models') + model_name=model_name + version=str(version) + model_name=self.confparams[""model_name""] + if (model_name.lower() == ""deeplearning""): + file_name = model_name + '_' +problem_type+'_'+version+ "".h5"" + else: + file_name=file_name = model_name + '_' +problem_type+'_'+version+"".sav"" + saved_model=os.path.normpath(os.path.join(model_location,file_name)) + self.log.info(""saved_model path: ""+str(saved_model)) + try: + with open (saved_model,'wb') as f: + pkl.dump(model,f) + return True + except Exception as e: + self.log.info(""fl server model save error. Error Msg: ""+str(e)) + return False + + ## Load the model, not used now. If user want to use aion trained model for evaluation at serverside, use this fn. + def model_load(self, path): + model_name=self.confparams[""model_name""] + if (model_name.lower() == ""deeplearning""): + loaded_model = tf.keras.models.load_model(path) + else: + loaded_model = pkl.load(open(path, 'rb')) + return loaded_model + + # Fo normal ml models, def get_eval_fn, evaluate each round results with own dataset. It is optional, without this, fed server will aggregate (fedAvg) client weights and update results to clients without evaluate. + def get_eval_fn(self,model,X,y,model_name,model_version): + """"""Return an evaluation function for server-side evaluation."""""" + self.log.info(""X_eval: \\n""+str(X.shape)) + self.log.info(""y_eval: \\n""+str(y.shape)) + # scaler = StandardScaler() + # X_scaled = scaler.fit_transform(X) + # y = pd.get_dummies(y) + # y_class = None + def evaluate(server_round: int, + parameters: fl.common.NDArrays, + config: Dict[str, fl.common.Scalar],): + # self.log.info(""server side fedavg weights \\n ""+str(parameters)) + try: + problem_type=self.confparams[""problem_type""] + # if (self.model_name.lower() == 'logisticregression' ): + # loss = log_loss(y, model.predict_proba(X)) + # else: + # loss = log_loss(y, model.predict(X)) + if (problem_type.lower() == 'classification'): + if (model_name.lower() == 'logisticregression' ): + utils.set_model_params(model, parameters) + loss = log_loss(y, model.predict_proba(X)) + # loss = log_loss(y, model.predict_proba(X)) + accuracy = model.score(X, y) + log(INFO, ""Server evaluation FL Round: %s processed Weights. -- Loss: %s, -- Accuracy: %s "",str(self.fl_round),str(loss), str(accuracy)) + self.log.info(""Accuracy: ""+str(accuracy)) + self.log.info(""model coefficients: ""+str(model.coef_)) + self.log.info(""model intercept: ""+str(model.intercept_)) + problem_type=self.confparams[""problem_type""] + self.model_save(model,model_name,problem_type,model_version) + return loss, {""accuracy"": accuracy} + else: + if (model_name.lower() == 'linearregression' ): + print(model, type(model)) + print(model.get_params) + # rmse = mean_squared_error(y, model.predict(X), square=True) + rmse = np.sqrt(mean_squared_error(y, model.predict(X))) + mae = mean_absolute_error(y, model.predict(X)) + r2=r2_score(y, model.predict(X)) + loss = rmse + mse=mean_squared_error(y, model.predict(X)) + rmse = np.sqrt(mean_squared_error(y, model.predict(X))) + mae = mean_absolute_error(y, model.predict(X)) + r2=r2_score(y, model.predict(X)) + loss = rmse + results = { + ""mean_absolute_error"": mae, + ""mean_squared_error"": mse, + ""root_mean_squared_error"": rmse, + ""r2"":r2, + } + # accuracy=r2 + log(INFO, ""Server evaluation FL Round: %s processed Weights. -- Loss: %s, -- metrics: %s "",str(self.fl_round),str(rmse), str(results)) + self.log.info(""model coefficients: ""+str(model.coef_)) + self.log.info(""model intercept: ""+str(model.intercept_)) + self.model_save(model,model_name,problem_type,model_version) + # return loss, len(X), results + return loss, results + + except Exception as e: + log(INFO, ""evaluate error msg: %s "",str(e)) + return evaluate + + # for deep learn models, def get_eval_fn, evaluate each round results with own dataset. It is optional, without this, fed server will aggregate (fedAvg) client weights and update results to clients without evaluate. + def get_eval_fn_dl(self, model,X,y,model_name,model_version): + try: + scaler = StandardScaler() + X_scaled = scaler.fit_transform(X) + # y = pd.get_dummies(y) + y_class = None + def evaluate( + server_round: int, + weights: fl.common.NDArrays, + config: Dict[str, fl.common.Scalar], + ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: + # Update model with the latest parameters + model.set_weights(weights) + problem_type = self.confparams[""problem_type""] + self.model_save(model, model_name,problem_type, model_version) + if model_name == 'deeplearning': + if problem_type == 'regression': + loss, mean_absolute_error, mean_squared_error = model.evaluate(X_scaled, + y, + verbose=1) + y_pred = model.predict(X_scaled) + from sklearn import metrics + root_mean_squared_error =" +"np.sqrt(metrics.mean_squared_error(y, y_pred)) + log(INFO, ""global model mean_absolute_error: %f "",mean_absolute_error) + log(INFO, ""global model mean_squared_error: %f "",mean_squared_error) + log(INFO, ""global model root_mean_squared_error: %f "",root_mean_squared_error) + return loss, {""mean_absolute_error"": mean_absolute_error, + ""mean_squared_error"": mean_squared_error, + ""root_mean_squared_error"": root_mean_squared_error} + + if problem_type == 'classification': + y_class = pd.get_dummies(y) + loss, accuracy = model.evaluate(X_scaled, y_class, verbose=1) + log(INFO, ""global model accuracy: %f "",round(accuracy * 100, 2)) + log(INFO, ""global model loss: %f "", round(loss, 2)) + return loss, {""accuracy"": accuracy} + except Exception as e: + log(INFO, ""get_eval_fn_dl error: %s "",str(e)) + + return evaluate + + + """""" Below part is the aion specific grpc functions. To start the grpc server and client. Currently below modules are not used. """""" + # def callaiongrpcserver(self): + # agrpcobj = aionflgrpcserver() + # status=agrpcobj.startgrpcerver() + # print(""server grpc start status: \\t"",status) + # return status + # def stopaiongrpcserver(self): + # agrpcobj = aionflgrpcserver() + # status=agrpcobj.shutserver() + # print(""server grpc stop status: \\t"",status) + # return status + + ## This function called from aionflmain.py, and run server. + + ## Getting flower fl strategy + def get_strategy(self,min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn): + strategy = fl.server.strategy.FedAvg( + min_fit_clients=min_fit_clients, + min_available_clients=num_clients, + evaluate_fn=eval_fn, + on_fit_config_fn=on_fit_config_fn, + on_evaluate_config_fn=on_evaluate_config_fn, + # initial_parameters=fl.common.weights_to_parameters(model.get_weights()), + ) + return strategy + + def runFLServer(self): + try: + server_address,model_name,num_clients,min_fit_clients,num_train_round,data_location,model_params,problem_type,model_version,selected_feature,target_feature,evaluation_required = self.configload(self.confparams) + df = self.df + if (evaluation_required.lower() == 'true'): + ## One more check for NaN,Inf occurance in dataframe + df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] + ## Remove index if passed. + df=df.reset_index(drop=True) + y=df[target_feature] + X=df[selected_feature] + if (problem_type.lower() == ""classification""): + if (model_name.lower() == ""logisticregression""): + #n_classes = df[target_feature].nunique() + no_classes = len(df.groupby(target_feature).count()) + no_features=len(selected_feature) + self.log.info(""no_classes: ""+str(no_classes)) + self.log.info(""no_features: ""+str(no_features)) + modelName=""logisticregression"" + try: + model = LogisticRegression(**model_params, warm_start=True) + except Exception as e: + self.log.info(""LR model error: \\n""+str(e)) + status=utils.setmodelName(modelName) + utils.set_initial_params(model,no_classes,no_features) + + eval_fn=self.get_eval_fn(model,X,y,model_name,model_version) + on_fit_config_fn=self.fit_round + on_evaluate_config_fn=None + min_fit_clients=2 + strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) + elif (model_name.lower() == ""deeplearning""): + # model_hyperparams = self.confparams[""model_hyperparams""] + optimizer = model_params[""optimizer""] + loss_func = model_params[""losses""] + act_func = model_params[""activation""] + last_act_func = model_params[""last_activation""] + input_shape = X.shape[1] # len(selected_feature) + output_shape = len(y.value_counts()) + model = None + if output_shape == 2: + if last_act_func == ""sigmoid"" and loss_func == ""binary_crossentropy"": + model = dl_model.dl_binary_classification(input_shape, output_shape, + optimizer, loss_func, + act_func, last_act_func) + elif last_act_func == ""softmax"" and loss_func == ""categorical_crossentropy"": + model = dl_model.dl_binary_classification(input_shape, output_shape, + optimizer, loss_func, + act_func, last_act_func) + else: + model = dl_model.dl_multiClass_classification(input_shape, + output_shape, optimizer, loss_func, + act_func, last_act_func) + + print(model.summary()) + eval_fn=self.get_eval_fn_dl(model,X,y,model_name,model_version) + on_fit_config_fn=self.fit_config + on_evaluate_config_fn=self.evaluate_config + strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) + + elif(problem_type.lower() == ""regression""): + + if (model_name.lower() == ""linearregression""): + model=LinearRegression(**model_params) + status=utils.setmodelName(model_name) + utils.set_initial_params_reg(model,X.shape[0],len(selected_feature)) + # utils.set_initial_params_reg(model,X.shape[0],X.shape[1]) + eval_fn=self.get_eval_fn(model,X,y,model_name,model_version) + on_fit_config_fn=self.fit_round + on_evaluate_config_fn=None + min_fit_clients=2 + strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) + + elif(model_name.lower() == ""deeplearning""): + input_shape = X.shape[1] # len(selected_feature) + output_shape = len(y.value_counts()) + optimizer = model_params[""optimizer""] + loss_func = model_params[""losses""] + act_func = model_params[""activation""] + model = None + model = dl_model.dl_regression_model(input_shape, 1, + optimizer, loss_func, act_func) + eval_fn=self.get_eval_fn_dl(model,X,y,model_name,model_version) + on_fit_config_fn=self.fit_config + on_evaluate_config_fn=self.evaluate_config + strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) + + + + elif (evaluation_required.lower() == 'false'): + + eval_fn=None + if (model_name.lower() == ""deeplearning""): + # min_fit_clients =int( model_params[""min_fit_clients""]) + on_fit_config_fn=self.fit_config + on_evaluate_config_fn=self.evaluate_config + strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) + else: + min_fit_clients=0 + on_fit_config_fn=self.fit_round + on_evaluate_config_fn=None + # strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) + strategy = fl.server.strategy.FedAvg( + min_available_clients=num_clients, + eval_fn=None, + on_fit_config_fn=self.fit_round,) + else: + log(INFO, ""Please opt server evaluation as True or False in server configuration file."") + + log(INFO, ""Federated learning Server started at @: %s "",str(server_address)) + server_rnd=1 + while (1): + try: + fl.server.start_server(server_address=server_address, strategy=strategy, config=fl.server.ServerConfig(num_rounds=num_train_round))# config={""num_rounds"": num_train_round})#config=fl.server.ServerConfig(num_rounds=3) #,force_final_distributed_eval=True) + except Exception as e: + log(INFO, ""Server exception: %s "",str(e)) + + log(INFO, ""AION federated learning server completed for execution cycle: %s "",str(server_rnd)) + # Evaluate the final trained model + + server_rnd+=1 + + + log(INFO, ""AION federated learning server execution successfully completed. Please check the log file for more information."") + return True + except Exception as e: + self.log.info(""AION Federated Learning Server run error. Error Msg: ""+str(e)) + log(INFO, ""Server not executing, err.msg: %s "",str(e)) + return False + + + +# Start Flower server for n rounds of federated learning +# if __name__ == ""__main__"": + + + +# ''' Testing purpose code ''' + + # super_obj=flserver1() + # json_file=sys.argv[1] + # super_obj.log.info(""User json_file: \\n""+str(json_file)) + # # configfile=None + # server_address,model_name,num_clients,num_train_round,data_location,model_version,model_version,selected_feature,target_feature = super_obj.configload(super_obj.confparams) + # df = pd.read_csv(data_location) + # # df=super_obj.df + # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] + # df=df.reset_index(drop=True) + + # y=df[target_feature] + # X = df.drop(target_feature, axis=1) + # no_classes = len(df.groupby(target_feature).count()) + # no_features=len(selected_feature.split(',')) + # print(""no_classes: \\n"",no_classes) + # print(""no_features: \\n"",no_features) + # # num_classes = y_train.apply(pd.Series.nunique) + + # if (model_name.lower() == ""logisticregression""): + # modelName=""logisticregression"" + # model = LogisticRegression(penalty=""l2"",max_iter=10, warm_start=True) + # ## May be used in future (model load for server side eval) + # # model=super_obj.model_load(model_location) + + + + # status=utils.setmodelName(modelName) + # utils.set_initial_params(model,no_classes,no_features) + # strategy = fl.server.strategy.FedAvg( + # min_available_clients=num_clients, + # eval_fn=super_obj.get_eval_fn(model,X,y), + # on_fit_config_fn=super_obj.fit_round,) + + + # # super_obj.log.info(""Stating federated learning server.....\\n"") + # log(INFO, ""Stating AION federated learning server....."") + # fl.server.start_server(server_address, strategy=strategy, config={""num_rounds"": num_train_round}) + # # super_obj.log.info(""federated learning server execution completed.\\n"") + # log(INFO, ""AION federated learning server execution completed....."") + + import tensorflow as tf + + +def dl_regression_model(input_shape, output_shape, + optimizer, loss_func, act_func): + inputs = tf.keras.Input(shape=(input_shape,)) + x = tf.keras.layers.Dense(64, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(inputs) + x = tf.keras.layers.Dense(32, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(16, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(8, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + outputs = tf.keras.layers.Dense(output_shape, + kernel_initializer='he_normal', + bias_initializer='zeros')(x) + + model = tf.keras.Model(inputs=inputs, outputs=outputs) + + model.compile(loss=loss_func , + optimizer=optimizer, + metrics=[""mean_absolute_error"", + ""mean_squared_error"", + ]) + return model + +def dl_multiClass_classification(input_shape, output_shape, + optimizer, loss_func, act_func, last_act_func): + inputs = tf.keras.Input(shape=(input_shape,)) + x = tf.keras.layers.Dense(64, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(inputs) + x = tf.keras.layers.Dense(32, + kernel_initializer='he_normal', + " +" bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(16, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(8, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + outputs = tf.keras.layers.Dense(output_shape, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=last_act_func)(x) + model = tf.keras.Model(inputs=inputs, outputs=outputs) + model.compile(optimizer, loss_func, metrics=[""accuracy""]) + return model + +def dl_binary_classification(input_shape, output_shape, + optimizer, loss_func, act_func, last_act_func): + inputs = tf.keras.Input(shape=(input_shape,)) + x = tf.keras.layers.Dense(64, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(inputs) + x = tf.keras.layers.Dense(32, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(16, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + x = tf.keras.layers.Dense(8, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=act_func)(x) + + outputs = tf.keras.layers.Dense(output_shape, + kernel_initializer='he_normal', + bias_initializer='zeros', + activation=last_act_func)(x) + model = tf.keras.Model(inputs=inputs, outputs=outputs) + model.compile(optimizer, loss_func, + metrics=[""accuracy""]) + return model + from typing import Tuple, Union, List +import numpy as np +from sklearn.linear_model import LogisticRegression +from sklearn.linear_model import LinearRegression +from sklearn.naive_bayes import GaussianNB +from sklearn.linear_model import SGDClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.tree import DecisionTreeClassifier +from flwr.common.logger import log +from logging import INFO + +XY = Tuple[np.ndarray, np.ndarray] +Dataset = Tuple[XY, XY] +LogRegParams = Union[XY, Tuple[np.ndarray]] +XYList = List[XY] +modelUsed=None +modelname=None +def setmodelName(modelselected): + try: + modelname=str(modelselected) + print(""setmodelName ,given modelname: \\n"",modelname) + if (modelname.lower() == 'logisticregression'): + modelUsed=LogisticRegression() + return True + elif (modelname.lower() == ""linearregression""): + modelUsed = LinearRegression() + return True + elif (modelname.lower() == ""sgdclassifier""): + #from sklearn.linear_model import SGDClassifier + modelUsed=SGDClassifier() + return True + elif (modelname.lower() == ""knn""): + modelUsed = KNeighborsClassifier() + return True + elif (modelname.lower() == ""decisiontreeclassifier""): + modelUsed = DecisionTreeClassifier() + return True + else: + return False + except Exception as e: + log(INFO, ""set fl model name fn issue: "",e) + +def get_model_parameters(model:modelUsed) -> LogRegParams: + """"""Returns the paramters of a sklearn LogisticRegression model."""""" + model_name=model.__class__.__name__ + if model.fit_intercept: + params = (model.coef_, model.intercept_) + else: + params = (model.coef_,) + + return params + + +def set_model_params( + model:modelUsed, params: LogRegParams +) -> modelUsed: + """"""Sets the parameters of a sklean LogisticRegression model."""""" + model.coef_ = params[0] + model_name=model.__class__.__name__ + try: + if model.fit_intercept: + model.intercept_ = params[1] + except Exception as e: + log(INFO, ""set_model_params fn issue: "",e) + pass + + return model + +def set_initial_params_reg(model,no_vals,no_features): + """"""Sets initial parameters as zeros Required since model params are + uninitialized until model.fit is called. + + But server asks for initial parameters from clients at launch. Refer + to sklearn.linear_model.LogisticRegression documentation for more + information. + """""" + + no_vals = no_vals + n_features = no_features + # model.classes_ = np.array([i for i in range(n_classes)]) + model.coef_ = np.zeros( n_features,) + model_name=model.__class__.__name__ + + try: + if model.fit_intercept: + # model.intercept_ = np.ones((no_vals,1)) + model.intercept_ = np.zeros((no_vals,)) + except Exception as e: + log(INFO, ""set_initial_params fn issue: "",e) + pass + +def set_initial_params(model,no_classes,no_features): + """"""Sets initial parameters as zeros Required since model params are + uninitialized until model.fit is called. + + But server asks for initial parameters from clients at launch. Refer + to sklearn.linear_model.LogisticRegression documentation for more + information. + """""" + + n_classes = no_classes + n_features = no_features + model.classes_ = np.array([i for i in range(n_classes)]) + model.coef_ = np.zeros((n_classes, n_features)) + model_name=model.__class__.__name__ + try: + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + except Exception as e: + log(INFO, ""set_initial_params fn issue: "",e) + pass + + + +def shuffle(X: np.ndarray, y: np.ndarray) -> XY: + """"""Shuffle X and y."""""" + rng = np.random.default_rng() + idx = rng.permutation(len(X)) + return X[idx], y[idx] + + +def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: + """"""Split X and y into a number of partitions."""""" + return list( + zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) + ) + + # -*- coding: utf-8 -*- +import pandas as pd +import numpy as np +import logging +import os +import sys +from flwr.common.logger import log +from logging import INFO +from flserver import flserver + +class aionfls: + def __init__(self): + self.confdata=None + + def configLoad(self,jsonfile): + import json + jsonfile=str(jsonfile) + with open(jsonfile, 'r') as file: + self.confdata = json.load(file) + return self.confdata + + def dataload(self,datapath): + df = pd.read_csv(datapath) #chunk_size=50000 + ## Data preprocess in test dataset, In aion, aion profiler will handle it. + df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] + df=df.reset_index(drop=True) + return df + +# Start Flower server for n rounds of federated learning +if __name__ == ""__main__"": + classobj=aionfls() + json_file=sys.argv[1] + confdata = classobj.configLoad(json_file) + data_location = confdata[""data_location""] + # deploy_location=confdata['deploy_location'] + cwd = os.path.abspath(os.path.dirname(__file__)) + model_name=confdata['model_name'] + version=str(confdata['version']) + file_name=model_name+'_'+version+"".log"" + try: + fl_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) + except Exception as e: + classobj.log.info(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + classobj.log = logging.getLogger('AION') + + print(""==============="") + print(""flserver main function"") + print(""==============="") + if (confdata['evaluation_required'].lower() == 'false'): + df=None + aionflobj=flserver(df,confdata) + print(""flserver condition is false"") + else: + ## User selected option is True + print(""flserver condition is true"") + data_location = os.path.normpath(os.path.join(cwd, data_location)) + # print(""current data_location"",data_location) + df=classobj.dataload(data_location) + aionflobj=flserver(df,confdata) + + status=aionflobj.runFLServer() + classobj.log.info(""Aion FL Server run Status: \\n""+str(status)) + + + + # -*- coding: utf-8 -*- +"""""" +Created on Sat Sep 10 23:57:56 2022 + +"""""" + +import numpy as np +import pandas as pd +from secrets import token_bytes +from ppxgboost import PaillierAPI as paillier +from ppxgboost import BoosterParser as boostparser +from ppxgboost import PPBooster as ppbooster +from ppxgboost.PPBooster import MetaData +from ppxgboost.PPKey import PPBoostKey +# from ope.pyope.ope import OPE +from pyope.ope import OPE +import joblib +import logging +import os +from flask import Flask,request,jsonify,render_template +# import pickle +from flask import Flask, request, jsonify +import json +import jsonpickle +import os.path +import time +import subprocess +import sys +from os.path import expanduser +import ntpath +import shutil +import platform +from pathlib import Path + +home = expanduser(""~"") +if platform.system() == 'Windows': + LOG_FILE_PATH = os.path.join(home,'AppData','Local','HCLT','AION','logs') +else: + LOG_FILE_PATH = os.path.join(home,'HCLT','AION','logs') + +app = Flask(__name__) + +class server_ppxgboost: + def __init__(self): + # self.problemtype=problemtype + self.confdata=None + + ## Loading config file + def configload(self): + cwd = os.path.abspath(os.path.dirname(__file__)) + file_name='config.json' + try: + config_file=os.path.normpath(os.path.join(cwd,'config',file_name)) + except Exception as e: + print(""config path error. Error Msg: \\n"",e) + with open(config_file, 'r') as file: + data = json.load(file) + + model_name=str(data[""model_name""]) + # version=str(data[""version""]) + + return model_name + ## Load server xgboost model from ../model dir. + def model_load( self,path): + cwd = os.path.abspath(os.path.dirname(__file__)) + file_name=path + try: + model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) + except Exception as e: + print(""Model path error. Error Msg: \\n"",e) + # print(path) + loaded_model = joblib.load(model_location) + return loaded_model + + ## Generate Encrypted prediction fn + def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max): + xgboost_model = model + meta_min_max = MetaData(min_max) + p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max) + enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max)) + enc_client_data=clientdata + # enc_predictions = ppbooster.predict_binary(enc_trees, X_test) # should rename the function + enc_predictions = ppbooster.predict_binary(p_trees, enc_client_data) + + return enc_predictions + + ## XGBoost wrapper for native model (native model to scikit learn xgboost model) + def xgboostwrappertonative(self,wrappermodel): + nativemodel = wrappermodel.get_booster() + return nativemodel + + def training_dataset_parser(self,train_data: pd.DataFrame): + """""" + :param train_data: dataframe training data + :return: minimum of the training dataset, and maximum of the training dataset. + """""" + return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))} + + + +## Homomorphic secure main server +cls_obj=server_ppxgboost() + +@app.route('/homomorphicprediction_server_api',methods=['GET','POST']) +def main_server(): + + data = request.get_json(force=True) + response_data = json.dumps(data) + + json_in= json.loads(response_data) + values = json_in['values'] + features=json_in['features'] + ppBoostKey=jsonpickle.decode(json_in['key']) + encrypted_clientdata=pd.DataFrame(values,columns =features) + ## Create encrypted predition from model + model=None + min_max = {'min': 0, 'max': 1000} + model_name = cls_obj.configload() + + # model_name=usecase_name + model_location=model_name + model_xgboost = cls_obj.model_load(model_location) + try: + ## For sklearn based xgboost model to native model + model = cls_obj.xgboostwrappertonative(model_xgboost) + except: + ## For native xgboost,we dont need to get booster. + model= model_xgboost + + ## FFor logging + cwd = os.path.abspath(os.path.dirname(__file__)) + # model_name=model_name + file_name =" +"model_name.rsplit('.', 1) + file_name=file_name[0] + file_name=file_name+"".log"" + try: + hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) + os.makedirs(os.path.dirname(hm_log), exist_ok=True) + except Exception as e: + print(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + log = logging.getLogger('AION') + log.info('Server regression encryption service started...') + print(""Encrypted client data is ....\\n"") + log.info(""Encrypted client data is (received by server): \\n""+str(encrypted_clientdata)) + print(""Client side encrypted data: \\n"",encrypted_clientdata) + enc_predictions = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max) + log.info(""server side encrypted prediction: \\n""+str(enc_predictions)) + ## Serialize the ppboost encrypted prediction by jsonpickle, normal pikle lib not working. + enc_predictions_json = jsonpickle.encode(enc_predictions) + # enc_predictions_json = enc_predictions.to_json() + return enc_predictions_json + +if __name__ == '__main__': + #app.run(debug=True) + app.run(host=""localhost"", port=9000, debug=True) # -*- coding: utf-8 -*- +"""""" +Created on Sat Sep 10 23:57:56 2022 + +@author: jayaramakrishnans +"""""" + +import numpy as np +import pandas as pd +from secrets import token_bytes +from ppxgboost import PaillierAPI as paillier +from ppxgboost import BoosterParser as boostparser +from ppxgboost import PPBooster as ppbooster +from ppxgboost.PPBooster import MetaData +from ppxgboost.PPKey import PPBoostKey +# from ope.pyope.ope import OPE +from pyope.ope import OPE +import joblib +import logging +import os +from flask import Flask,request,jsonify,render_template +# import pickle +from flask import Flask, request, jsonify +import json +import jsonpickle + +app = Flask(__name__) + +class server_ppxgboost: + def __init__(self): + # self.problemtype=problemtype + self.confdata=None + print(""Inside server_ppxgboost_1\\n"") + + ## Loading config file + def configload(self): + print(""Inside server_ppxgboost_1,configload\\n"") + cwd = os.path.abspath(os.path.dirname(__file__)) + file_name='config.json' + try: + config_file=os.path.normpath(os.path.join(cwd,'config',file_name)) + except Exception as e: + print(""config path error. Error Msg: \\n"",e) + with open(config_file, 'r') as file: + data = json.load(file) + + model_name=str(data[""model_name""]) + # version=str(data[""version""]) + + return model_name + ## Load server xgboost model from ../model dir. + def model_load( self,path): + print(""Inside server_ppxgboost_1,model_load\\n"") + cwd = os.path.abspath(os.path.dirname(__file__)) + file_name=path + try: + model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) + except Exception as e: + print(""Model path error. Error Msg: \\n"",e) + # print(path) + loaded_model = joblib.load(model_location) + return loaded_model + + ## Generate Encrypted prediction fn + def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max): + xgboost_model = model + meta_min_max = MetaData(min_max) + p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max) + + enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max)) + + enc_client_data=clientdata + # enc_predictions = ppbooster.predict_binary(enc_trees, X_test) # should rename the function + enc_predictions = ppbooster.predict_binary(p_trees, enc_client_data) + + return enc_predictions + + ## XGBoost wrapper for native model (native model to scikit learn xgboost model) + def xgboostwrappertonative(self,wrappermodel): + print(""Inside server_ppxgboost_7,xgboostwrappertonative= \\n"",wrappermodel) + nativemodel = wrappermodel.get_booster() + return nativemodel + + def training_dataset_parser(self,train_data: pd.DataFrame): + """""" + :param train_data: dataframe training data + :return: minimum of the training dataset, and maximum of the training dataset. + """""" + return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))} + + + +## Homomorphic secure main server +cls_obj=server_ppxgboost() + +@app.route('/homomorphicprediction_server_api',methods=['GET','POST']) +def main_server(): + data = request.get_json(force=True) + response_data = json.dumps(data) + + json_in= json.loads(response_data) + values = json_in['values'] + features=json_in['features'] + ppBoostKey=jsonpickle.decode(json_in['key']) + encrypted_clientdata=pd.DataFrame(values,columns =features) + ## Create encrypted predition from model + model=None + min_max = {'min': 0, 'max': 1000} + model_name = cls_obj.configload() + + # model_name=usecase_name + model_location=model_name + model_xgboost = cls_obj.model_load(model_location) + try: + ## For sklearn based xgboost model to native model + model = cls_obj.xgboostwrappertonative(model_xgboost) + except: + ## For native xgboost,we dont need to get booster. + model= model_xgboost + + ## FFor logging + cwd = os.path.abspath(os.path.dirname(__file__)) + # model_name=model_name + file_name = model_name.rsplit('.', 1) + file_name=file_name[0] + file_name=file_name+"".log"" + try: + hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) + os.makedirs(os.path.dirname(hm_log), exist_ok=True) + except Exception as e: + print(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + log = logging.getLogger('AION') + log.info('Server binary class encryption service started...') + print(""Encrypted client data is ....\\n"") + log.info(""Encrypted client data is (received by server): \\n""+str(encrypted_clientdata)) + enc_predictions = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max) + log.info(""server side encrypted prediction: \\n""+str(enc_predictions)) + ## Serialize the ppboost encrypted prediction by jsonpickle, normal pikle lib not working. + enc_predictions_json = jsonpickle.encode(enc_predictions) + # enc_predictions_json = enc_predictions.to_json() + return enc_predictions_json + +if __name__ == '__main__': + #app.run(debug=True) + app.run(host=""localhost"", port=9000, debug=True) # -*- coding: utf-8 -*- +"""""" +Created on Sat Sep 10 23:57:56 2022 + +"""""" + +import numpy as np +import sqlite3 +import sys +import pandas as pd +from secrets import token_bytes +from ppxgboost import PaillierAPI as paillier +from ppxgboost import BoosterParser as boostparser +from ppxgboost import PPBooster as ppbooster +from ppxgboost.PPBooster import MetaData +from ppxgboost.PPKey import PPBoostKey +# from ope.pyope.ope import OPE +from pyope.ope import OPE +import joblib +import logging +import os +from flask import Flask,request,jsonify,render_template +# import pickle +from flask import Flask, request, jsonify +import json +import jsonpickle +import time +from pathlib import Path +app = Flask(__name__) + +class server_ppxgboost: + def __init__(self): + # self.problemtype=problemtype + print(""Inside server_ppxgboost_1,init\\n"") + self.confdata=None + + ## Loading config file + def configload(self): + cwd = os.path.abspath(os.path.dirname(__file__)) + file_name='secure_config.json' + try: + config_file=os.path.normpath(os.path.join(cwd,'etc',file_name)) + except Exception as e: + print(""config path error. Error Msg: \\n"",e) + with open(config_file, 'r') as file: + data = json.load(file) + + model_name=str(data[""model_name""]) + # version=str(data[""version""]) + + return model_name + + ## Load server xgboost model from ../model dir. + def model_load( self,path): + cwd = os.path.abspath(os.path.dirname(__file__)) + file_name=path + try: + model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) + except Exception as e: + print(""Model path error. Error Msg: \\n"",e) + # print(path) + loaded_model = joblib.load(model_location) + return loaded_model + + def create_connection(self,db_file): + conn = None + try: + conn = sqlite3.connect(db_file) + conn.execute('''CREATE TABLE IF NOT EXISTS modelinfo (key BLOB NOT NULL,encrypttree BLOB NOT NULL)''') + except Exception as e: + print(e) + return conn + def count_encrypt_model(self,conn): + try: + sql = ""select count(*) from modelinfo"" + cur = conn.cursor() + cur.execute(sql) + cur_result = cur.fetchone() + return cur_result[0] + except Exception as e: + print(e) + def create_encryptmodel(self,conn,modeldetails): + sql = ''' INSERT INTO modelinfo(key,encrypttree) VALUES(?,?) ''' + cur = conn.cursor() + cur.execute(sql, modeldetails) + conn.commit() + + return cur.lastrowid + def search_encryptmodel(self,conn,key): + try: + sql = ""SELECT encrypttree FROM modelinfo where key='""+key+""'"" + cursor = conn.execute(sql) + for row in cursor: + return row[0] + return '' + except Exception as e: + print(e) + + def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max,noofclasses): + try: + db_file = Path(__file__).parent/'logs'/'encryptedModels' + conn = self.create_connection(db_file) + enc_trees = self.search_encryptmodel(conn,jsonpickle.encode(ppBoostKey)) + if enc_trees != '': + enc_trees = jsonpickle.decode(enc_trees) + else: + if self.count_encrypt_model(conn) >= 5: + outputjson = {""status"":""ERROR"",""msg"":""Maximum Number of Encrypted""} + return json.dumps(outputjson) + xgboost_model = model + meta_min_max = MetaData(min_max) + p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max) + enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max)) + modelinfo = (jsonpickle.encode(ppBoostKey),jsonpickle.encode(enc_trees)) + self.create_encryptmodel(conn,modelinfo) + + enc_client_data=clientdata + # try: + # num_classes = model.n_classes_ + # except: + # num_classes = noofclasses + num_classes = noofclasses + if num_classes == 0: + nc_predictions = ppbooster.predict_binary(p_trees, enc_client_data) + else: + enc_predictions = ppbooster.predict_multiclass(enc_trees, num_classes, enc_client_data) + + enc_predictions_json = jsonpickle.encode(enc_predictions) + outputjson = {""status"":""SUCCESS"",""data"":enc_predictions_json} + return json.dumps(outputjson) + except Exception as e: + outputjson = {""status"":""ERROR"",""msg"":str(e)} + return json.dumps(outputjson) + ## XGBoost wrapper for native model (native model to scikit learn xgboost model) + def xgboostwrappertonative(self,wrappermodel): + nativemodel = wrappermodel.get_booster() + try: + noOfClasses = wrappermodel.n_classes_ + except Exception as e: + print(e) + noOfClasses = 0 + return nativemodel,noOfClasses + + def training_dataset_parser(self,train_data: pd.DataFrame): + """""" + :param train_data: dataframe training data + :return: minimum of the training dataset, and maximum of the training dataset. + """""" + return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))} + + + +## Homomorphic secure main server +cls_obj=server_ppxgboost() + +def spredict(data): + try: + json_in= json.loads(data) + values = json_in['values'] + features=json_in['features'] + ppBoostKey=jsonpickle.decode(json_in['key']) + encrypted_clientdata=pd.DataFrame(values,columns =features) + model=None + min_max = {'min': 0, 'max': 1000} + model_name = cls_obj.configload() + model_location=model_name + model_xgboost = cls_obj.model_load(model_location) + try: + model,noofclasses = cls_obj.xgboostwrappertonative(model_xgboost) + except Exception as e: + print(e) + model= model_xgboost + noofclasses = " +"0 + cwd = os.path.abspath(os.path.dirname(__file__)) + # model_name=model_name + file_name = model_name.rsplit('.', 1) + file_name=file_name[0] + file_name=file_name+"".log"" + try: + hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) + os.makedirs(os.path.dirname(hm_log), exist_ok=True) + except Exception as e: + print(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + log = logging.getLogger('AION') + log.info('Server multiclass classification encryption service started...') + log.info(""Encrypted client data is (received by server): \\n""+str(encrypted_clientdata)) + output = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max,noofclasses) + print(""predictions:"",output) + print(""Inside server_ppxgboost_8,output= \\n"",output) + return output + except Exception as e: + outputjson = {""status"":""ERROR"",""msg"":str(e)} + output = json.dumps(outputjson) + print(""predictions:"",output) + return output +if __name__ == ""__main__"": + output = spredict(sys.argv[1]) # -*- coding: utf-8 -*- + +# -*- coding: utf-8 -*- +import pandas as pd +import numpy as np +import logging +import os +import sys +from logging import INFO +from script.heRegression import client_ppxgboost +from script.aion_predict import selector +from script.inputprofiler import inputprofiler +import argparse +class aion_hemulticlient: + def __init__(self): + self.confdata=None + + def dataload(self,datapath): + df = pd.read_csv(datapath) + ## Data preprocess in test dataset, In aion, aion profiler will handle it. + # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] + # df=df.reset_index(drop=True) + profilerobj = inputprofiler() + df = profilerobj.run(df) + selectobj = selector() + df = selectobj.run(df) + return df + +if __name__ == ""__main__"": + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--inputDataLocation', help='Input Data Path') + parser.add_argument('-k', '--keyGenerate', help='True') + parser.add_argument('-e', '--endPoint', help='Service End Point') + args = parser.parse_args() + if args.inputDataLocation: + dataLocation=args.inputDataLocation + if args.keyGenerate: + keyGenerate = args.keyGenerate + else: + keyGenerate='False' + print(keyGenerate) + if args.endPoint: + endPoint=args.endPoint + else: + raise('End Point Not Defined') + classobj=aion_hemulticlient() + df=classobj.dataload(dataLocation) + aionhmcobj=client_ppxgboost(df,keyGenerate,endPoint) + ppxgboost_pred=aionhmcobj.main_client() + print(""final decrypted prediction at client side: \\n"",ppxgboost_pred) + + + + # -*- coding: utf-8 -*- +import pandas as pd +from sklearn.model_selection import train_test_split +import numpy as np +from secrets import token_bytes + +from ppxgboost import PaillierAPI as paillier +from ppxgboost import BoosterParser as boostparser +from ppxgboost import PPBooster as ppbooster +from ppxgboost.PPBooster import MetaData +from ppxgboost.PPKey import PPBoostKey +# from ope.pyope.ope import OPE +from pyope.ope import OPE +import sys +sys.path.insert(0, '..') +import logging +from logging import INFO +import pickle +import requests +import json +# from json import JSONEncoder +import jsonpickle +import os +from pathlib import Path +##Aion main client class for ppxgboost based encryption,decryption +class client_ppxgboost: + def __init__(self,data,keyGenerate,endPoint): + + self.data=data + self.keyGenerate = keyGenerate + self.endPoint = endPoint + self.prediction=None + ## For logging + clientDirectory = os.path.abspath(os.path.dirname(__file__)) + # model_name=model_name + file_name = ""he_regression"" + file_name=file_name+"".log"" + self.keydir=os.path.join(clientDirectory,'..','keys') + os.makedirs(self.keydir, exist_ok=True) + try: + hm_log=os.path.normpath(os.path.join(clientDirectory,'logs',file_name)) + os.makedirs(os.path.dirname(hm_log), exist_ok=True) + except Exception as e: + print(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + self.log = logging.getLogger('AION') + self.log.info('Client Regression homomorphic encryption service started...') + + ## Loading configuration parameters, Not used now. + def configload(self): + try: + data=self.confdata + usecase_name=str(data[""usecase_name""]) + version=str(data[""version""]) + problem_type=data[""problem_type""] + model_location=str(data[""model_location""]) + data_location=str(data[""data_location""]) + selected_feature=data[""selected_feature""] + if (type(selected_feature) is str): + selected_feature=selected_feature.split(',') + target_feature=data[""target_feature""] + client_encryption_accuracy=str(data[""client_encryption_accuracy""]) + test_size=int(data[""test_size""]) + test_size=test_size/100 + + except Exception as e: + self.log.info(""Reading server config file issue. Err.Msg: %s ""+str(e)) + return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size + + ## Load the model, Not used at client now. + def model_load(self, path): + loaded_model = pickle.load(open(path, 'rb')) + return loaded_model + + + #Generating secure key + def generate_ppboostkey(self): + try: + public_key_file = Path(__file__).parent.parent/'keys'/'public.k' + private_key_file = Path(__file__).parent.parent/'keys'/'private.k' + prf_key_file = Path(__file__).parent.parent/'keys'/'prf.k' + ope_key_file = Path(__file__).parent.parent/'keys'/'ope.k' + encryptor_key_file = Path(__file__).parent.parent/'keys'/'encryptor.k' + boostkey_key_file = Path(__file__).parent.parent/'keys'/'boostkey.k' + if not boostkey_key_file.exists() or self.keyGenerate == 'True': + public_key, private_key = paillier.he_key_gen() + pub_file = open(public_key_file, 'w') + pub_file.write(jsonpickle.encode(public_key)) + pri_file = open(private_key_file, 'w') + pri_file.write(jsonpickle.encode(private_key)) + prf_key = token_bytes(16) + OPE_key = token_bytes(16) + prf_file = open(prf_key_file, 'w') + prf_file.write(jsonpickle.encode(prf_key)) + ope_file = open(ope_key_file, 'w') + ope_file.write(jsonpickle.encode(OPE_key)) + encrypter = OPE(OPE_key) + enc_file = open(encryptor_key_file, 'w') + enc_file.write(jsonpickle.encode(encrypter)) + ppBoostKey = PPBoostKey(public_key, prf_key, encrypter) + boost_file = open(boostkey_key_file, 'w') + boost_file.write(jsonpickle.encode(ppBoostKey)) + else: + pub_file = open(public_key_file, 'r') + public_key = jsonpickle.decode(pub_file.read()) + pub_file.close() + pri_file = open(private_key_file, 'r') + private_key = jsonpickle.decode(pri_file.read()) + pri_file.close() + prf_file = open(prf_key_file, 'r') + prf_key = jsonpickle.decode(prf_file.read()) + prf_file.close() + ope_file = open(ope_key_file, 'r') + OPE_key = jsonpickle.decode(ope_file.read()) + ope_file.close() + enc_file = open(encryptor_key_file, 'r') + encrypter = jsonpickle.decode(enc_file.read()) + enc_file.close() + boost_file = open(boostkey_key_file, 'r') + ppBoostKey = jsonpickle.decode(boost_file.read()) + boost_file.close() + return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey + except Exception as e: + print(e) + + + def ppxgboostreg_predict(self,enc_predictions,private_key): + dec = [] + for p in enc_predictions: + dec.append(paillier.decrypt(private_key, p)) + + + dec_df=pd.DataFrame(dec) + + return dec + + # class ppkeyEncoder(JSONEncoder): + # def default(self,o): + # return o.__dict__ + + ## Function to connect secure server via flask restapi (send enc data and receive enc prediction.) + def connect_xgboostserver(self,ppBoostKey,encrypted_xtest): + url = self.endPoint + enc_dict={} + + # df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist() + enc_dict['values']=encrypted_xtest.values.tolist() + enc_dict['features']=encrypted_xtest.columns.values.tolist() + enc_dict['key']= jsonpickle.encode(ppBoostKey) + json_out=json.dumps(enc_dict,indent=4) + headers = { + 'content-type': ""application/json"", + 'cache-control': ""no-cache"" + + } + response = requests.post(url,auth=('admin','aion'),data=json_out,headers=headers) + #print(response.content) + outputStr=response.content + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + predict_dict = json.loads(str(outputStr)) + if (predict_dict['status'] == 'SUCCESS'): + data = predict_dict['data'] + enc_predictions_ob=jsonpickle.decode(data) + return enc_predictions_ob + else: + print('Error') + + ## Create PaillierAPI based encrypted user given data , here, testdata=userdata + def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max): + feature_set_testdata=set(testdata.columns) + ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max)) + return testdata + + ## Create min and max of testdata df for pailler encryption,decryption + def training_dataset_parser(self, client_data: pd.DataFrame): + """""" + :param client_data: dataframe training data + :return: minimum of the training dataset, and maximum of the training dataset. + """""" + return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))} + + ## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction + def main_client(self): + self.log.info('Client actual data sample (displaying last 10 values) : \\n'+str(self.data.tail(10))) + print("" Client actual data sample (displaying last 10 values) : \\n"",self.data.tail(10)) + public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey() + + min_max = self.training_dataset_parser(self.data) + meta_min_max = MetaData(min_max) + + encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max) + # print(""Sending encrypted client data to server....\\n"") + print(""\\n Client side encrypted input data to server (displaying last 10 rows): \\n"",encrypted_testdata.tail(10)) + self.log.info('Client side encrypted input data to server (displaying last 10 rows): \\n'+str(encrypted_testdata.tail(10))) + enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata) + print(""\\n Encrypted prediction from server (displaying last 10 values.): \\n"",enc_predictions[-10:]) + self.log.info('\\n Encrypted prediction from server (displaying last 10 values.): \\n'+str(enc_predictions[-10:])) + ## Decrypted predition + dec = self.ppxgboostreg_predict(enc_predictions,private_key) + # ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction']) + ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction']) + # print(""final decrypted prediction at client side: \\n"",ppxgboost_pred) + self.log.info(""Final decrypted prediction at client side:: \\n""+str(ppxgboost_pred)) + return ppxgboost_pred + + +## For standalone testing +if __name__ == '__main__': + problemtype='regression' + data=None + targetfeature=None + ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature) + ppxgboost_dec_predictions = ppxgboost_client_obj.main_client() + print(""In main: ppxgboost_dec_predictions: \\n"",ppxgboost_dec_predictions) # -*- coding: utf-8 -*- + +# -*- coding: utf-8 -*- +import pandas as pd +import numpy as np +import logging +import os +import sys +from logging import INFO +from script.heMulticlass import client_ppxgboost +from" +"script.aion_predict import selector +from script.inputprofiler import inputprofiler +import argparse +class aion_hemulticlient: + def __init__(self): + self.confdata=None + + def dataload(self,datapath): + df = pd.read_csv(datapath) + ## Data preprocess in test dataset, In aion, aion profiler will handle it. + # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] + # df=df.reset_index(drop=True) + profilerobj = inputprofiler() + df = profilerobj.run(df) + selectobj = selector() + df = selectobj.run(df) + return df + +if __name__ == ""__main__"": + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--inputDataLocation', help='Input Data Path') + parser.add_argument('-k', '--keyGenerate', help='True') + parser.add_argument('-e', '--endPoint', help='Service End Point') + args = parser.parse_args() + if args.inputDataLocation: + dataLocation=args.inputDataLocation + if args.keyGenerate: + keyGenerate = args.keyGenerate + else: + keyGenerate='False' + print(keyGenerate) + if args.endPoint: + endPoint=args.endPoint + else: + raise('End Point Not Defined') + classobj=aion_hemulticlient() + df=classobj.dataload(dataLocation) + aionhmcobj=client_ppxgboost(df,keyGenerate,endPoint) + ppxgboost_pred=aionhmcobj.main_client() + print(""final decrypted prediction at client side: \\n"",ppxgboost_pred) + + + + # -*- coding: utf-8 -*- + +# -*- coding: utf-8 -*- +import pandas as pd +import numpy as np +import logging +import os +import sys +from logging import INFO +from script.heBinary import client_ppxgboost +from script.aion_predict import selector +from script.inputprofiler import inputprofiler +## Client main class for binary classification +class aion_hebinaryclient: + def __init__(self): + self.confdata=None + + + def configLoad(self,jsonfile): + import json + jsonfile=str(jsonfile) + with open(jsonfile, 'r') as file: + self.confdata = json.load(file) + return self.confdata + + def dataload(self,datapath): + df = pd.read_csv(datapath) + ## Data preprocess in test dataset, In aion, aion profiler will handle it. + # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] + # df=df.reset_index(drop=True) + profilerobj = inputprofiler() + df = profilerobj.run(df) + selectobj = selector() + df = selectobj.run(df) + return df + +# Start Flower server for n rounds of federated learning +if __name__ == ""__main__"": + classobj=aion_hebinaryclient() + data_location=str(sys.argv[1]) + df=classobj.dataload(data_location) + # print(""df: \\n"",df) + aionhmcobj=client_ppxgboost(df) + ppxgboost_pred=aionhmcobj.main_client() + print(""final decrypted prediction at client side: \\n"",ppxgboost_pred) + # classobj.log.info(""At client end, homomorphic prediction df: \\n""+str(ppxgboost_pred)) + # classobj.log.info(""Aion homomorphic client encrypted prediction df: \\n""+str(ppxgboost_pred)) + + + + # -*- coding: utf-8 -*- +import pandas as pd +from sklearn.model_selection import train_test_split +import numpy as np +from secrets import token_bytes + +from ppxgboost import PaillierAPI as paillier +from ppxgboost import BoosterParser as boostparser +from ppxgboost import PPBooster as ppbooster +from ppxgboost.PPBooster import MetaData +from ppxgboost.PPKey import PPBoostKey +# from ope.pyope.ope import OPE +from pyope.ope import OPE +import sys +sys.path.insert(0, '..') +import logging +from logging import INFO +import pickle +import requests +import json +# from json import JSONEncoder +import jsonpickle +import os +##Aion main client class for ppxgboost based encryption,decryption +class client_ppxgboost: + def __init__(self,data): + + self.data=data + self.prediction=None + ## For logging + cwd = os.path.abspath(os.path.dirname(__file__)) + # model_name=model_name + file_name = ""he_binaryclass"" + file_name=file_name+"".log"" + try: + hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) + os.makedirs(os.path.dirname(hm_log), exist_ok=True) + except Exception as e: + print(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + self.log = logging.getLogger('AION') + self.log.info('Client binary class classification homomorphic encryption service started...') + + ## Loading configuration parameters, Not used now. + def configload(self): + try: + data=self.confdata + usecase_name=str(data[""usecase_name""]) + version=str(data[""version""]) + problem_type=data[""problem_type""] + model_location=str(data[""model_location""]) + data_location=str(data[""data_location""]) + selected_feature=data[""selected_feature""] + if (type(selected_feature) is str): + selected_feature=selected_feature.split(',') + target_feature=data[""target_feature""] + client_encryption_accuracy=str(data[""client_encryption_accuracy""]) + test_size=int(data[""test_size""]) + test_size=test_size/100 + + except Exception as e: + self.log.info(""Reading server config file issue. Err.Msg: %s ""+str(e)) + return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size + + ## Load the model, Not used at client now. + def model_load(self, path): + loaded_model = pickle.load(open(path, 'rb')) + return loaded_model + + + #Generating secure key + def generate_ppboostkey(self): + public_key, private_key = paillier.he_key_gen() + prf_key = token_bytes(16) + OPE_key = token_bytes(16) + encrypter = OPE(OPE_key) + ppBoostKey = PPBoostKey(public_key, prf_key, encrypter) + return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey + + ## Binary client prediction (decrypted prediction) + def ppxgboostbinary_predict(self,enc_predictions,private_key): + dec = ppbooster.client_decrypt(private_key, enc_predictions) + ##For binary classification + res = ppbooster.client_decrypt_prediction_binary(private_key, enc_predictions) + res_df=pd.DataFrame(res) + return res + + # class ppkeyEncoder(JSONEncoder): + # def default(self,o): + # return o.__dict__ + + ## Function to connect secure server via flask restapi (send enc data and receive enc prediction.) + + def connect_xgboostserver(self,ppBoostKey,encrypted_xtest): + url = 'http://localhost:9000//homomorphicprediction_server_api' + enc_dict={} + + # df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist() + enc_dict['values']=encrypted_xtest.values.tolist() + enc_dict['features']=encrypted_xtest.columns.values.tolist() + enc_dict['key']= jsonpickle.encode(ppBoostKey) + json_out=json.dumps(enc_dict,indent=4) + headers = { + 'content-type': ""application/json"", + 'cache-control': ""no-cache"" + + } + r = requests.post(url,data=json_out,headers=headers) + enc_predictions_obj=jsonpickle.decode(r.content) + + return enc_predictions_obj + + ## Create PaillierAPI based encrypted user given data , here, testdata=userdata + def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max): + feature_set_testdata=set(testdata.columns) + ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max)) + return testdata + + ## Create min and max of testdata df for pailler encryption,decryption + def training_dataset_parser(self, client_data: pd.DataFrame): + """""" + :param client_data: dataframe training data + :return: minimum of the training dataset, and maximum of the training dataset. + """""" + return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))} + + ## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction + def main_client(self): + self.log.info('Client actual data sample (displaying last 10 values) : \\n'+str(self.data.tail(10))) + print("" Client actual data sample (displaying last 10 values) : \\n"",self.data.tail(10)) + public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey() + min_max = self.training_dataset_parser(self.data) + meta_min_max = MetaData(min_max) + + encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max) + # print(""Sending encrypted client data to server....\\n"") + print(""\\n Client side encrypted input data to server (displaying last 10 rows): \\n"",encrypted_testdata.tail(10)) + self.log.info('Client side encrypted input data to server (displaying last 10 rows): \\n'+str(encrypted_testdata.tail(10))) + enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata) + print(""\\n Encrypted prediction from server (displaying last 10 values.): \\n"",enc_predictions[-10:]) + self.log.info('\\n Encrypted prediction from server (displaying last 10 values.): \\n'+str(enc_predictions[-10:])) + ## Decrypted predition + dec = self.ppxgboostbinary_predict(enc_predictions,private_key) + # ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction']) + ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction']) + self.log.info(""final decrypted prediction at client side:: \\n""+str(ppxgboost_pred)) + return ppxgboost_pred + + +## For standalone testing +if __name__ == '__main__': + problemtype='Binary classification' + data=None + targetfeature=None + ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature) + ppxgboost_dec_predictions = ppxgboost_client_obj.main_client() + # print(""In main: ppxgboost_dec_predictions: \\n"",ppxgboost_dec_predictions) # -*- coding: utf-8 -*- +import pandas as pd +from sklearn.model_selection import train_test_split +import numpy as np +from secrets import token_bytes + +from ppxgboost import PaillierAPI as paillier +from ppxgboost import BoosterParser as boostparser +from ppxgboost import PPBooster as ppbooster +from ppxgboost.PPBooster import MetaData +from ppxgboost.PPKey import PPBoostKey +# from ope.pyope.ope import OPE +from pyope.ope import OPE +import sys +sys.path.insert(0, '..') +import logging +from logging import INFO +import pickle +import requests +import json +# from json import JSONEncoder +import jsonpickle +import os +from pathlib import Path +##Aion main client class for ppxgboost based encryption,decryption +class client_ppxgboost: + def __init__(self,data,keyGenerate,endPoint): + + self.data=data + self.keyGenerate = keyGenerate + self.endPoint = endPoint + self.prediction=None + ## For logging + clientDirectory = os.path.abspath(os.path.dirname(__file__)) + # model_name=model_name + file_name = ""he_multiclass"" + file_name=file_name+"".log"" + self.keydir=os.path.join(clientDirectory,'..','keys') + os.makedirs(self.keydir, exist_ok=True) + try: + hm_log=os.path.normpath(os.path.join(clientDirectory,'logs',file_name)) + os.makedirs(os.path.dirname(hm_log), exist_ok=True) + except Exception as e: + print(""Log path error. Error Msg: \\n"",e) + logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) + self.log = logging.getLogger('AION') + self.log.info('Client Multi class classification homomorphic encryption service started...') + + ## Loading configuration parameters, Not used now. + def configload(self): + try: + data=self.confdata + usecase_name=str(data[""usecase_name""]) + version=str(data[""version""]) + problem_type=data[""problem_type""] + model_location=str(data[""model_location""]) + data_location=str(data[""data_location""]) + selected_feature=data[""selected_feature""] + if (type(selected_feature) is str): + selected_feature=selected_feature.split(',') + target_feature=data[""target_feature""] + client_encryption_accuracy=str(data[""client_encryption_accuracy""]) + test_size=int(data[""test_size""]) + test_size=test_size/100 + + except Exception as e: + self.log.info(""Reading server config file issue. Err.Msg: %s ""+str(e)) + return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size + + ## Load the model, Not used at client now. + def model_load(self" +", path): + loaded_model = pickle.load(open(path, 'rb')) + return loaded_model + + + #Generating secure key + def generate_ppboostkey(self): + try: + public_key_file = Path(__file__).parent.parent/'keys'/'public.k' + private_key_file = Path(__file__).parent.parent/'keys'/'private.k' + prf_key_file = Path(__file__).parent.parent/'keys'/'prf.k' + ope_key_file = Path(__file__).parent.parent/'keys'/'ope.k' + encryptor_key_file = Path(__file__).parent.parent/'keys'/'encryptor.k' + boostkey_key_file = Path(__file__).parent.parent/'keys'/'boostkey.k' + if not boostkey_key_file.exists() or self.keyGenerate == 'True': + public_key, private_key = paillier.he_key_gen() + pub_file = open(public_key_file, 'w') + pub_file.write(jsonpickle.encode(public_key)) + pri_file = open(private_key_file, 'w') + pri_file.write(jsonpickle.encode(private_key)) + prf_key = token_bytes(16) + OPE_key = token_bytes(16) + prf_file = open(prf_key_file, 'w') + prf_file.write(jsonpickle.encode(prf_key)) + ope_file = open(ope_key_file, 'w') + ope_file.write(jsonpickle.encode(OPE_key)) + encrypter = OPE(OPE_key) + enc_file = open(encryptor_key_file, 'w') + enc_file.write(jsonpickle.encode(encrypter)) + ppBoostKey = PPBoostKey(public_key, prf_key, encrypter) + boost_file = open(boostkey_key_file, 'w') + boost_file.write(jsonpickle.encode(ppBoostKey)) + else: + pub_file = open(public_key_file, 'r') + public_key = jsonpickle.decode(pub_file.read()) + pub_file.close() + pri_file = open(private_key_file, 'r') + private_key = jsonpickle.decode(pri_file.read()) + pri_file.close() + prf_file = open(prf_key_file, 'r') + prf_key = jsonpickle.decode(prf_file.read()) + prf_file.close() + ope_file = open(ope_key_file, 'r') + OPE_key = jsonpickle.decode(ope_file.read()) + ope_file.close() + enc_file = open(encryptor_key_file, 'r') + encrypter = jsonpickle.decode(enc_file.read()) + enc_file.close() + boost_file = open(boostkey_key_file, 'r') + ppBoostKey = jsonpickle.decode(boost_file.read()) + boost_file.close() + return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey + except Exception as e: + print(e) + + ## PPboost multiclass prediction fn + def ppxgboostmulticlass_predict(self,enc_predictions,private_key): + + ##For binary classification + # res = ppbooster.client_decrypt_prediction_binary(private_key, enc_predictions) + ## For multiclass classification + res = ppbooster.client_decrypt_prediction_multiclass(private_key, enc_predictions) + return res + + # class ppkeyEncoder(JSONEncoder): + # def default(self,o): + # return o.__dict__ + + ## Function to connect secure server via flask restapi (send enc data and receive enc prediction.) + + def connect_xgboostserver(self,ppBoostKey,encrypted_xtest): + url = self.endPoint + enc_dict={} + + # df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist() + enc_dict['values']=encrypted_xtest.values.tolist() + enc_dict['features']=encrypted_xtest.columns.values.tolist() + enc_dict['key']= jsonpickle.encode(ppBoostKey) + json_out=json.dumps(enc_dict,indent=4) + headers = { + 'content-type': ""application/json"", + 'cache-control': ""no-cache"" + + } + response = requests.post(url,auth=('admin','aion'),data=json_out,headers=headers) + #print(response.content) + outputStr=response.content + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + predict_dict = json.loads(str(outputStr)) + if (predict_dict['status'] == 'SUCCESS'): + data = predict_dict['data'] + enc_predictions_ob=jsonpickle.decode(data) + return enc_predictions_ob + else: + print('Error') + + ## Create PaillierAPI based encrypted user given data , here, testdata=userdata + def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max): + feature_set_testdata=set(testdata.columns) + ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max)) + return testdata + + ## Create min and max of testdata df for pailler encryption,decryption + def training_dataset_parser(self, client_data: pd.DataFrame): + """""" + :param client_data: dataframe training data + :return: minimum of the training dataset, and maximum of the training dataset. + """""" + return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))} + + ## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction + def main_client(self): + self.log.info('Client actual data sample (displaying last 10 values) : \\n'+str(self.data.tail(10))) + #print("" Client actual data sample (displaying last 10 values) : \\n"",self.data.tail(10)) + public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey() + + min_max = self.training_dataset_parser(self.data) + meta_min_max = MetaData(min_max) + #print('++++++++++++++++++++++++++++') + encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max) + # print(""Sending encrypted client data to server....\\n"") + #print(""\\n Client side encrypted input data to server (displaying last 10 rows): \\n"",encrypted_testdata.tail(10)) + self.log.info('Client side encrypted input data to server (displaying last 10 rows): \\n'+str(encrypted_testdata.tail(10))) + enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata) + #print(""\\n Encrypted prediction from server (displaying last 10 values.): \\n"",enc_predictions[-10:]) + #self.log.info('\\n Encrypted prediction from server (displaying last 10 values.): \\n'+str(enc_predictions[-10:])) + ## Decrypted predition + dec = self.ppxgboostmulticlass_predict(enc_predictions,private_key) + # ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction']) + ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction']) + self.log.info(""final decrypted prediction at client side:: \\n""+str(ppxgboost_pred)) + return ppxgboost_pred + + +## For standalone testing +if __name__ == '__main__': + problemtype='Multi class classification' + data=None + targetfeature=None + ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature) + ppxgboost_dec_predictions = ppxgboost_client_obj.main_client() + # print(""In main: ppxgboost_dec_predictions: \\n"",ppxgboost_dec_predictions) import os +from typing import List, Tuple + +import numpy as np +from pandas import read_csv +from sklearn.model_selection import train_test_split +from sklearn.utils import shuffle +from tensorflow.keras.datasets import cifar10, fashion_mnist, imdb, mnist +from tensorflow.keras.preprocessing.sequence import pad_sequences + + +TrainTestData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] +TrainTestValData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] + + +def load_image(data_source: str) -> TrainTestData: + """""" + Loads one of the following image datasets: {mnist, famnist, cifar10}. + Normalizes the data. Returns X and y for both train and test datasets. + Dtypes of X's and y's will be `float32` and `int32` to be compatible with `tf_agents`. + + :param data_source: Either mnist, famnist or cifar10 + :type data_source: str + + :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test + :rtype: tuple + """""" + reshape_shape = -1, 28, 28, 1 + + if data_source == ""mnist"": + (X_train, y_train), (X_test, y_test) = mnist.load_data() + + elif data_source == ""famnist"": + (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data() + + elif data_source == ""cifar10"": + (X_train, y_train), (X_test, y_test) = cifar10.load_data() + reshape_shape = -1, 32, 32, 3 + + else: + raise ValueError(""No valid `data_source`."") + + X_train = X_train.reshape(reshape_shape).astype(np.float32) # Float32 is the expected dtype for the observation spec in the env + X_test = X_test.reshape(reshape_shape).astype(np.float32) + + X_train /= 255 # /= is not available when casting int to float: https://stackoverflow.com/a/48948461/10603874 + X_test /= 255 + + y_train = y_train.reshape(y_train.shape[0], ).astype(np.int32) + y_test = y_test.reshape(y_test.shape[0], ).astype(np.int32) + + return X_train, y_train, X_test, y_test + + +def load_csv(fp_train: str, fp_test: str, label_col: str, drop_cols: List[str], normalization: bool = False) -> TrainTestData: + """""" + Loads any csv-file from local filepaths. Returns X and y for both train and test datasets. + Option to normalize the data with min-max normalization. + Only csv-files with float32 values for the features and int32 values for the labels supported. + Source for dataset: https://mimic-iv.mit.edu/ + + :param fp_train: Location of the train csv-file + :type fp_train: str + :param fp_test: Location of the test csv-file + :type fp_test: str + :param label_col: The name of the column containing the labels of the data + :rtype label_col: str + :param drop_cols: List of the names of the columns to be dropped. `label_col` gets dropped automatically + :rtype drop_cols: List of strings + :param normalization: Normalize the data with min-max normalization? + :type normalization: bool + + :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test + :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] + """""" + if not os.path.isfile(fp_train): + raise FileNotFoundError(f""`fp_train` {fp_train} does not exist."") + if not os.path.isfile(fp_test): + raise FileNotFoundError(f""`fp_test` {fp_test} does not exist."") + if not isinstance(normalization, bool): + raise TypeError(f""`normalization` must be of type `bool`, not {type(normalization)}"") + + X_train = read_csv(fp_train).astype(np.float32) # DataFrames directly converted to float32 + X_test = read_csv(fp_test).astype(np.float32) + + y_train = X_train[label_col].astype(np.int32) + y_test = X_test[label_col].astype(np.int32) + X_train.drop(columns=drop_cols + [label_col], inplace=True) # Dropping cols and label column + X_test.drop(columns=drop_cols + [label_col], inplace=True) + + # Other data sources are already normalized. RGB values are always in range 0 to 255. + if normalization: + mini, maxi = X_train.min(axis=0), X_train.max(axis=0) + X_train -= mini + X_train /= maxi - mini + X_test -= mini + X_test /= maxi - mini + + return X_train.values, y_train.values, X_test.values, y_test.values # Numpy arrays + + +def load_imdb(config: Tuple[int, int] = (5_000, 500)) -> TrainTestData: + """"""Loads the IMDB dataset. Returns X and y for both train and test datasets. + + :param config: Tuple of number of most frequent words and max length of each sequence. + :type config: str + + :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test + :rtype: tuple + """""" + if not isinstance(config, (tuple, list)): + raise TypeError(f""{type(config)} is no valid datatype for `config`."") + if len(config) != 2: + raise ValueError(""Tuple length of `config` must be 2."") + if not all(i > 0 for i in config): + raise ValueError(""All integers of `config` must be > 0."") + + (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=config[0]) + + X_train = pad_sequences(X_train, maxlen=config[1]) + X_test = pad_sequences(X_test, maxlen=config[1]) + + y_train = y_train.astype(np.int32) + y_test = y_test.ast" +"ype(np.int32) + + return X_train, y_train, X_test, y_test + + +def get_train_test_val(X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test: np.ndarray, min_classes: List[int], + maj_classes: List[int], imb_ratio: float = None, imb_test: bool = True, val_frac: float = 0.25, + print_stats: bool = True) -> TrainTestValData: + """""" + Imbalances data and divides the data into train, test and validation sets. + The imbalance rate of each individual dataset is approx. the same as the given `imb_ratio`. + + :param X_train: The X_train data + :type X_train: np.ndarray + :param y_train: The y_train data + :type y_train: np.ndarray + :param X_test: The X_test data + :type X_test: np.ndarray + :param y_test: The y_test data + :type y_test: np.ndarray + :param min_classes: List of labels of all minority classes + :type min_classes: list + :param maj_classes: List of labels of all majority classes. + :type maj_classes: list + :param imb_ratio: Imbalance ratio for minority to majority class: len(minority datapoints) / len(majority datapoints) + If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's. + :type imb_ratio: float + :param imb_test: Imbalance the test dataset? + :type imb_test: bool + :param val_frac: Fraction to take from X_train and y_train for X_val and y_val + :type val_frac: float + :param print_stats: Print the imbalance ratio of the imbalanced data? + :type print_stats: bool + + :return: Tuple of (X_train, y_train, X_test, y_test, X_val, y_val) + :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] + """""" + if not 0 < val_frac < 1: + raise ValueError(f""{val_frac} is not in interval 0 < x < 1."") + if not isinstance(print_stats, bool): + raise TypeError(f""`print_stats` must be of type `bool`, not {type(print_stats)}."") + + X_train, y_train = imbalance_data(X_train, y_train, min_classes, maj_classes, imb_ratio=imb_ratio) + # Only imbalance test-data if imb_test is True + X_test, y_test = imbalance_data(X_test, y_test, min_classes, maj_classes, imb_ratio=imb_ratio if imb_test else None) + + # stratify=y_train to ensure class balance is kept between train and validation datasets + X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_frac, stratify=y_train) + + if print_stats: + p_train, p_test, p_val = [((y == 1).sum(), imbalance_ratio(y)) for y in (y_train, y_test, y_val)] + print(f""Imbalance ratio `p`:\\n"" + f""\\ttrain: n={p_train[0]}, p={p_train[1]:.6f}\\n"" + f""\\ttest: n={p_test[0]}, p={p_test[1]:.6f}\\n"" + f""\\tvalidation: n={p_val[0]}, p={p_val[1]:.6f}"") + + return X_train, y_train, X_test, y_test, X_val, y_val + + +def imbalance_data(X: np.ndarray, y: np.ndarray, min_class: List[int], maj_class: List[int], + imb_ratio: float = None) -> Tuple[np.ndarray, np.ndarray]: + """""" + Split data in minority and majority, only values in {min_class, maj_class} will be kept. + (Possibly) decrease minority rows to match the imbalance rate. + If initial imb_ratio of dataset is lower than given `imb_ratio`, the imb_ratio of the returned data will not be changed. + If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's. + """""" + if not isinstance(X, np.ndarray): + raise TypeError(f""`X` must be of type `np.ndarray` not {type(X)}"") + if not isinstance(y, np.ndarray): + raise TypeError(f""`y` must be of type `np.ndarray` not {type(y)}"") + if X.shape[0] != y.shape[0]: + raise ValueError(""`X` and `y` must contain the same amount of rows."") + if not isinstance(min_class, (list, tuple)): + raise TypeError(""`min_class` must be of type list or tuple."") + if not isinstance(maj_class, (list, tuple)): + raise TypeError(""`maj_class` must be of type list or tuple."") + + if (imb_ratio is not None) and not (0 < imb_ratio < 1): + raise ValueError(f""{imb_ratio} is not in interval 0 < imb_ratio < 1."") + + if imb_ratio is None: # Do not imbalance data if no `imb_ratio` is given + imb_ratio = 1 + + X_min = X[np.isin(y, min_class)] # Mask the correct indexes + X_maj = X[np.isin(y, maj_class)] # Only keep data/labels for x in {min_class, maj_class} and forget all other + + min_len = int(X_maj.shape[0] * imb_ratio) # Amount of rows to select from minority classes to get to correct imbalance ratio + # Keep all majority rows, decrease minority rows to match `imb_ratio` + X_min = X_min[np.random.choice(X_min.shape[0], min(min_len, X_min.shape[0]), replace=False), :] + + X_imb = np.concatenate([X_maj, X_min]).astype(np.float32) + y_imb = np.concatenate((np.zeros(X_maj.shape[0]), np.ones(X_min.shape[0]))).astype(np.int32) + X_imb, y_imb = shuffle(X_imb, y_imb) + + return X_imb, y_imb + import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns +from sklearn.metrics import (auc, average_precision_score, confusion_matrix, + f1_score, precision_recall_curve, roc_curve,precision_score,recall_score) +from tensorflow import constant +from tf_agents.trajectories import time_step + + +def network_predictions(network, X: np.ndarray) -> dict: + """"""Computes y_pred using a given network. + Input is array of data entries. + + :param network: The network to use to calculate metrics + :type network: (Q)Network + :param X: X data, input to network + :type X: np.ndarray + + :return: Numpy array of predicted targets for given X + :rtype: np.ndarray + """""" + if not isinstance(X, np.ndarray): + raise ValueError(f""`X` must be of type `np.ndarray` not {type(X)}"") + + q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False) + return np.argmax(q.numpy(), axis=1) # Max action for each x in X + + +def decision_function(network, X: np.ndarray) -> dict: + """"""Computes the score for the predicted class of each x in X using a given network. + Input is array of data entries. + + :param network: The network to use to calculate the score per x in X + :type network: (Q)Network + :param X: X data, input to network + :type X: np.ndarray + + :return: Numpy array of scores for given X + :rtype: np.ndarray + """""" + if not isinstance(X, np.ndarray): + raise ValueError(f""`X` must be of type `np.ndarray` not {type(X)}"") + + q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False) + return np.max(q.numpy(), axis=1) # Value of max action for each x in X + + +def classification_metrics(y_true: list, y_pred: list) -> dict: + """"""Computes metrics using y_true and y_pred. + + :param y_true: True labels + :type y_true: np.ndarray + :param y_pred: Predicted labels, corresponding to y_true + :type y_pred: np.ndarray + + :return: Dictionairy containing Geometric Mean, F1, Precision, Recall, TP, TN, FP, FN + :rtype: dict + """""" + if not isinstance(y_true, (list, tuple, np.ndarray)): + raise ValueError(f""`y_true` must be of type `list` not {type(y_true)}"") + if not isinstance(y_pred, (list, tuple, np.ndarray)): + raise ValueError(f""`y_pred` must be of type `list` not {type(y_pred)}"") + if len(y_true) != len(y_pred): + raise ValueError(""`X` and `y` must be of same length."") + + + #G_mean = np.sqrt(recall * specificity) # Geometric mean of recall and specificity + F1 = f1_score(y_true, y_pred, average='macro') # Default F-measure + recall = recall_score(y_true,y_pred,average='macro') + precision = precision_score(y_true,y_pred,average='macro') + return {""F1"": F1, ""Precision"": precision, ""Recall"": recall} + + +def plot_pr_curve(network, X_test: np.ndarray, y_test: np.ndarray, + X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover + """"""Plots PR curve of X_test and y_test of given network. + Optionally plots PR curve of X_train and y_train. + Average precision is shown in the legend. + + :param network: The network to use to calculate the PR curve + :type network: (Q)Network + :param X_test: X data, input to network + :type X_test: np.ndarray + :param y_test: True labels for `X_test` + :type y_test: np.ndarray + :param X_train: Optional X data to plot validation PR curve + :type X_train: np.ndarray + :param y_train: True labels for `X_val` + :type y_train: np.ndarray + + :return: None + :rtype: NoneType + """""" + plt.plot((0, 1), (1, 0), color=""black"", linestyle=""--"", label=""Baseline"") + # TODO: Consider changing baseline + + if X_train is not None and y_train is not None: + y_val_score = decision_function(network, X_train) + val_precision, val_recall, _ = precision_recall_curve(y_train, y_val_score) + val_AP = average_precision_score(y_train, y_val_score) + plt.plot(val_recall, val_precision, label=f""Train AP: {val_AP:.3f}"") + + y_test_score = decision_function(network, X_test) + test_precision, test_recall, _ = precision_recall_curve(y_test, y_test_score) + test_AP = average_precision_score(y_test, y_test_score) + + plt.plot(test_recall, test_precision, label=f""Test AP: {test_AP:.3f}"") + plt.xlim((-0.05, 1.05)) + plt.ylim((-0.05, 1.05)) + plt.xlabel(""Recall"") + plt.ylabel(""Precision"") + plt.title(""PR Curve"") + plt.gca().set_aspect(""equal"", adjustable=""box"") + plt.legend(loc=""lower left"") + plt.grid(True) + plt.show() + + +def plot_roc_curve(network, X_test: np.ndarray, y_test: np.ndarray, + X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover + """"""Plots ROC curve of X_test and y_test of given network. + Optionally plots ROC curve of X_train and y_train. + Average precision is shown in the legend. + + :param network: The network to use to calculate the PR curve + :type network: (Q)Network + :param X_test: X data, input to network + :type X_test: np.ndarray + :param y_test: True labels for `X_test` + :type y_test: np.ndarray + :param X_train: Optional X data to plot validation PR curve + :type X_train: np.ndarray + :param y_train: True labels for `X_val` + :type y_train: np.ndarray + + :return: None + :rtype: NoneType + """""" + plt.plot((0, 1), (0, 1), color=""black"", linestyle=""--"", label=""Baseline"") + # TODO: Consider changing baseline + + if X_train is not None and y_train is not None: + y_train_score = decision_function(network, X_train) + fpr_train, tpr_train, _ = roc_curve(y_train, y_train_score) + plt.plot(fpr_train, tpr_train, label=f""Train AUROC: {auc(fpr_train, tpr_train):.2f}"") + + y_test_score = decision_function(network, X_test) + fpr_test, tpr_test, _ = roc_curve(y_test, y_test_score) + + plt.plot(fpr_test, tpr_test, label=f""Test AUROC: {auc(fpr_test, tpr_test):.2f}"") + plt.xlim((-0.05, 1.05)) + plt.ylim((-0.05, 1.05)) + plt.xlabel(""False Positive Rate"") + plt.ylabel(""True Positive Rate"") + plt.title(""ROC Curve"") + plt.gca().set_aspect(""equal"", adjustable=""box"") + plt.legend(loc=""lower right"") + plt.grid(True) + plt.show() + + +def plot_confusion_matrix(TP: int, FN: int, FP: int, TN: int)" +"-> None: # pragma: no cover + """"""Plots confusion matric of given TP, FN, FP, TN. + + :param TP: True Positive + :type TP: int + :param FN: False Negative + :type FN: int + :param FP: False Positive + :type FP: int + :param TN: True Nega" +"per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_save_path,log_dir=logFilePath) + model.compile_model(X_train,y_train,layers) + model.q_net.summary() + model.train(xval,yval) + network = model.get_network() + predictedytrain=network_predictions(network,np.array(xtrain)) + + predictedytest = network_predictions(network,np.array(xtest)) + + if ""DDQN"" == algorithm: + start = time.time() + modelName = ""DDQN"" + model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update,log_dir=logFilePath) + + model.compile_model(X_train,y_train,layers) + model.q_net.summary() + model.train(xval,yval) + network = model.get_network() + predictedytrain=network_predictions(network,np.array(xtrain)) + + predictedytest = network_predictions(network,np.array(xtest)) + score = objClf.get_score(self.scoreParam,ytest,predictedytest) + score = round(score,2) + + return (network,self.rl_config,score,algorithm,-1,-1,-1) + except Exception as inst: + self.log.info( '\\n-----> RL Failed!!!.'+str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + + + import os +import pickle +from datetime import datetime + +import numpy as np +import tensorflow as tf +from reinforcement.environments.classifierenv import ClassifierEnv +from reinforcement.metrics import (classification_metrics, decision_function, + network_predictions, plot_pr_curve, plot_roc_curve) +from reinforcement.utils import imbalance_ratio +from tensorflow import data +from tensorflow.keras.optimizers import Adam +from tf_agents.agents.dqn.dqn_agent import DdqnAgent +from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver +from tf_agents.environments.tf_py_environment import TFPyEnvironment +from tf_agents.networks.sequential import Sequential +from tf_agents.policies.random_tf_policy import RandomTFPolicy +from tf_agents.replay_buffers.tf_uniform_replay_buffer import \\ + TFUniformReplayBuffer +from tf_agents.utils import common + + + +class TrainDDQN(): + """"""Wrapper for DDQN training, validation, saving etc."""""" + + def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int, + model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None, + collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0, + progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None: + """""" + Wrapper to make training easier. + Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial + + :param episodes: Number of training episodes + :type episodes: int + :param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts + :type warmup_steps: int + :param learning_rate: Learning Rate for the Adam Optimizer + :type learning_rate: float + :param gamma: Discount factor for the Q-values + :type gamma: float + :param min_epsilon: Lowest and final value for epsilon + :type min_epsilon: float + :param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon` + :type decay_episodes: int + :param model_path: Location to save the trained model + :type model_path: str + :param log_dir: Location to save the logs, usefull for TensorBoard + :type log_dir: str + :param batch_size: Number of samples in minibatch to train on each step + :type batch_size: int + :param memory_length: Maximum size of the Replay Buffer + :type memory_length: int + :param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode + :type collect_steps_per_episode: int + :param collect_every: Step interval to collect data during training + :type collect_every: int + :param val_every: Validate the model every X episodes using the `collect_metrics()` function + :type val_every: int + :param target_update_period: Update the target Q-network every X episodes + :type target_update_period: int + :param target_update_tau: Parameter for softening the `target_update_period` + :type target_update_tau: float + :param progressbar: Enable or disable the progressbar for collecting data and training + :type progressbar: bool + + :return: None + :rtype: NoneType + """""" + self.episodes = episodes # Total episodes + self.warmup_steps = warmup_steps # Amount of warmup steps before training + self.batch_size = batch_size # Batch size of Replay Memory + self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode + self.collect_every = collect_every # Step interval to collect data during training + self.learning_rate = learning_rate # Learning Rate + self.gamma = gamma # Discount factor + self.min_epsilon = min_epsilon # Minimal chance of choosing random action + self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON` + self.target_update_period = target_update_period # Period for soft updates + self.target_update_tau = target_update_tau + self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training + self.n_step_update = n_step_update + self.gradient_clipping = gradient_clipping # Clip the loss + self.compiled = False + NOW = ""DDQN"" #datetime.now().strftime(""%Y%m%d_%H%M%S"") + + if memory_length is not None: + self.memory_length = memory_length # Max Replay Memory length + else: + self.memory_length = warmup_steps + + if val_every is not None: + self.val_every = val_every # Validate the policy every `val_every` episodes + else: + self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50 + + if model_path is not None: + #if os.path.exists(model_path + ""/"" + NOW + "".pkl""): + # os.remove(model_path + ""/"" + NOW + "".pkl"") + self.model_path = model_path + ""/"" + NOW + "".pkl"" + else: + self.model_path = ""./models/"" + NOW + "".pkl"" + + if log_dir is None: + log_dir = ""./logs/"" + NOW + self.writer = tf.summary.create_file_writer(log_dir) + + def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None: + """"""Initializes the neural networks, DDQN-agent, collect policies and replay buffer. + + :param X_train: Training data for the model. + :type X_train: np.ndarray + :param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class. + :param y_train: np.ndarray + :param layers: List of layers to feed into the TF-agents custom Sequential(!) layer. + :type layers: list + :param imb_ratio: The imbalance ratio of the data. + :type imb_ratio: float + :param loss_fn: Callable loss function + :type loss_fn: tf.compat.v1.losses + + :return: None + :rtype: NoneType + """""" + if imb_ratio is None: + imb_ratio = imbalance_ratio(y_train) + + self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio)) + self.global_episode = tf.Variable(0, name=""global_episode"", dtype=np.int64, trainable=False) # Global train episode counter + + # Custom epsilon decay: https://github.com/tensorflow/agents/issues/339 + epsilon_decay = tf.compat.v1.train.polynomial_decay( + 1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon) + + self.q_net = Sequential(layers, self.train_env.observation_spec()) + + self.agent = DdqnAgent(self.train_env.time_step_spec(), + self.train_env.action_spec(), + q_network=self.q_net, + optimizer=Adam(learning_rate=self.learning_rate), + td_errors_loss_fn=loss_fn, + train_step_counter=self.global_episode, + target_update_period=self.target_update_period, + target_update_tau=self.target_update_tau, + gamma=self.gamma, + epsilon_greedy=epsilon_decay, + n_step_update=self.n_step_update, + gradient_clipping=self.gradient_clipping) + self.agent.initialize() + + self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec()) + self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec, + batch_size=self.train_env.batch_size, + max_length=self.memory_length) + + self.warmup_driver = DynamicStepDriver(self.train_env, + self.random_policy, + observers=[self.replay_buffer.add_batch], + num_steps=self.warmup_steps) # Uses a random policy + + self.collect_driver = DynamicStepDriver(self.train_env, + self.agent.collect_policy, + observers=[self.replay_buffer.add_batch], + num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent + + self.agent.train = common.function(self.agent.train) # Optimalization + self.warmup_driver.run = common.function(self.warmup_driver.run) + self.collect_driver.run = common.function(self.collect_driver.run) + + self.compiled = True + + def train(self, *args) -> None: + """"""Starts the training of the model. Includes warmup period, metrics collection and model saving. + + :param *args: All arguments will be passed to `collect_metrics()`. + This can be usefull to pass callables, testing environments or validation data. + Overwrite the TrainDDQN.collect_metrics() function to use your own *args. + :type *args: Any + + :return: None + :rtype: NoneType, last step is saving the model as a side-effect + """""" + assert self.compiled, ""Model must be compiled with model.compile_model(X_train, y_train, layers) before training."" + + # Warmup period, fill memory with random actions + if self.progressbar: + print(f""\\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\\033[0m"") + + self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size)) + + if self.progressbar: + print(f""\\033[92m{self.replay_buffer.num_frames():_} frames collected!\\033[0m"") + + dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1, + num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE) + iterator = iter(dataset) + + def _train(): + experiences, _ = next(iterator) + return self.agent.train(experiences).loss + _train = common.function(_train) # Optimalization + + ts = None + policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size) + self.collect_metrics(*args) # Initial collection for step 0 + + for _ in range(self.episodes): + if not self.global_episode % self.collect_every: + # Collect a few steps using collect_policy and save to `replay_buffer` + if self.collect_steps_per_episode != 0: + ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state) + + + # Sample a batch of data from `replay_buffer` and update the agent's network + train_loss = _train() + + if not self.global_episode % self.val_every: + with self.writer.as_default(): + tf.summary.scalar(""train_loss"", train_loss, step=self.global_episode) + + self.collect_metrics(*args) + + + def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None): + """"""Collects metrics using the trained Q-network. + + :param X_val: Features of validation data, same shape as X_train + :type X_val: np.ndarray + :param y_val: Labels of validation data, same shape as y_train + :type y" +"_val: np.ndarray + :param save_best: Saving the best model of all validation runs based on given metric: + Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN} + This improves stability since the model at the last episode is not guaranteed to be the best model. + :type save_best: str + """""" + y_pred = network_predictions(self.agent._target_q_network, X_val) + stats = classification_metrics(y_val, y_pred) + avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X + + if save_best is not None: + if not hasattr(self, ""best_score""): # If no best model yet + self.best_score = 0.0 + + if stats.get(save_best) >= self.best_score: # Overwrite best model + self.save_network() # Saving directly to avoid shallow copy without trained weights + self.best_score = stats.get(save_best) + + with self.writer.as_default(): + tf.summary.scalar(""AverageQ"", avgQ, step=self.global_episode) # Average Q-value for this epoch + for k, v in stats.items(): + tf.summary.scalar(k, v, step=self.global_episode) + + def evaluate(self,X_train,y_train, X_test, y_test): + """""" + Final evaluation of trained Q-network with X_test and y_test. + Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place. + + :param X_test: Features of test data, same shape as X_train + :type X_test: np.ndarray + :param y_test: Labels of test data, same shape as y_train + :type y_test: np.ndarray + :param X_train: Features of train data + :type X_train: np.ndarray + :param y_train: Labels of train data + :type y_train: np.ndarray + """""" + #if hasattr(self, ""best_score""): + # print(f""\\033[92mBest score: {self.best_score:6f}!\\033[0m"") + # network = self.load_network(self.model_path) # Load best saved model + #else: + # network = self.agent._target_q_network # Load latest target model + + #network = self.load_network(self.model_path) + #if (X_train is not None) and (y_train is not None): + # plot_pr_curve(network, X_test, y_test, X_train, y_train) + # plot_roc_curve(network, X_test, y_test, X_train, y_train) + + y_pred = network_predictions(self.agent._target_q_network, X_test) + return classification_metrics(y_test, y_pred) + + def get_network(self): + #network = self.load_network(self.model_path) + return self.agent._target_q_network + + def save_network(self, filename_rl): #usnish + """"""Saves Q-network as pickle to `model_path`."""""" + with open(self.filename_rl, ""wb"") as f: # Save Q-network as pickle + pickle.dump(self.agent._target_q_network, f) + + @staticmethod + def load_network(fp: str): + """"""Static method to load Q-network pickle from given filepath. + + :param fp: Filepath to the saved pickle of the network + :type fp: str + + :returns: The network-object loaded from a pickle file. + :rtype: tensorflow.keras.models.Model + """""" + with open(fp, ""rb"") as f: # Load the Q-network + network = pickle.load(f) + return network + import os +import pickle +from datetime import datetime + +import numpy as np +import tensorflow as tf +from reinforcement.environments.classifierenv import ClassifierEnv +from reinforcement.metrics import (classification_metrics, decision_function, + network_predictions, plot_pr_curve, plot_roc_curve) +from reinforcement.utils import imbalance_ratio +from tensorflow import data +from tensorflow.keras.optimizers import Adam +#from tf_agents.agents.dqn.dqn_agent import DdqnAgent +from tf_agents.agents import DqnAgent +from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver +from tf_agents.environments.tf_py_environment import TFPyEnvironment +from tf_agents.networks.sequential import Sequential +from tf_agents.policies.random_tf_policy import RandomTFPolicy +from tf_agents.replay_buffers.tf_uniform_replay_buffer import \\ + TFUniformReplayBuffer +from tf_agents.utils import common + + +class TrainDQN(): + """"""Wrapper for DDQN training, validation, saving etc."""""" + + def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int, + model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None, + collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0, + progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None: + """""" + Wrapper to make training easier. + Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial + + :param episodes: Number of training episodes + :type episodes: int + :param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts + :type warmup_steps: int + :param learning_rate: Learning Rate for the Adam Optimizer + :type learning_rate: float + :param gamma: Discount factor for the Q-values + :type gamma: float + :param min_epsilon: Lowest and final value for epsilon + :type min_epsilon: float + :param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon` + :type decay_episodes: int + :param model_path: Location to save the trained model + :type model_path: str + :param log_dir: Location to save the logs, usefull for TensorBoard + :type log_dir: str + :param batch_size: Number of samples in minibatch to train on each step + :type batch_size: int + :param memory_length: Maximum size of the Replay Buffer + :type memory_length: int + :param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode + :type collect_steps_per_episode: int + :param collect_every: Step interval to collect data during training + :type collect_every: int + :param val_every: Validate the model every X episodes using the `collect_metrics()` function + :type val_every: int + :param target_update_period: Update the target Q-network every X episodes + :type target_update_period: int + :param target_update_tau: Parameter for softening the `target_update_period` + :type target_update_tau: float + :param progressbar: Enable or disable the progressbar for collecting data and training + :type progressbar: bool + + :return: None + :rtype: NoneType + """""" + self.episodes = episodes # Total episodes + self.warmup_steps = warmup_steps # Amount of warmup steps before training + self.batch_size = batch_size # Batch size of Replay Memory + self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode + self.collect_every = collect_every # Step interval to collect data during training + self.learning_rate = learning_rate # Learning Rate + self.gamma = gamma # Discount factor + self.min_epsilon = min_epsilon # Minimal chance of choosing random action + self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON` + self.target_update_period = target_update_period # Period for soft updates + self.target_update_tau = target_update_tau + self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training + self.n_step_update = n_step_update + self.gradient_clipping = gradient_clipping # Clip the loss + self.compiled = False + NOW = ""DQN"" #datetime.now().strftime(""%Y%m%d_%H%M%S"") + + if memory_length is not None: + self.memory_length = memory_length # Max Replay Memory length + else: + self.memory_length = warmup_steps + + if val_every is not None: + self.val_every = val_every # Validate the policy every `val_every` episodes + else: + self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50 + + if model_path is not None: + #if os.path.exists(model_path + ""/"" + NOW + "".pkl""): + # os.remove(model_path + ""/"" + NOW + "".pkl"") + self.model_path = model_path + ""/"" + NOW + "".pkl"" + else: + self.model_path = ""./models/"" + NOW + "".pkl"" + + if log_dir is None: + log_dir = ""./logs/"" + NOW + self.writer = tf.summary.create_file_writer(log_dir) + + def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None: + """"""Initializes the neural networks, DDQN-agent, collect policies and replay buffer. + + :param X_train: Training data for the model. + :type X_train: np.ndarray + :param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class. + :param y_train: np.ndarray + :param layers: List of layers to feed into the TF-agents custom Sequential(!) layer. + :type layers: list + :param imb_ratio: The imbalance ratio of the data. + :type imb_ratio: float + :param loss_fn: Callable loss function + :type loss_fn: tf.compat.v1.losses + + :return: None + :rtype: NoneType + """""" + if imb_ratio is None: + imb_ratio = imbalance_ratio(y_train) + + self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio)) + self.global_episode = tf.Variable(0, name=""global_episode"", dtype=np.int64, trainable=False) # Global train episode counter + + # Custom epsilon decay: https://github.com/tensorflow/agents/issues/339 + epsilon_decay = tf.compat.v1.train.polynomial_decay( + 1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon) + + self.q_net = Sequential(layers, self.train_env.observation_spec()) + + self.agent = DqnAgent(self.train_env.time_step_spec(), + self.train_env.action_spec(), + q_network=self.q_net, + optimizer=Adam(learning_rate=self.learning_rate), + td_errors_loss_fn=loss_fn, + train_step_counter=self.global_episode, + target_update_period=self.target_update_period, + target_update_tau=self.target_update_tau, + gamma=self.gamma, + epsilon_greedy=epsilon_decay, + n_step_update=self.n_step_update, + gradient_clipping=self.gradient_clipping) + self.agent.initialize() + + self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec()) + self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec, + batch_size=self.train_env.batch_size, + max_length=self.memory_length) + + self.warmup_driver = DynamicStepDriver(self.train_env, + self.random_policy, + observers=[self.replay_buffer.add_batch], + num_steps=self.warmup_steps) # Uses a random policy + + self.collect_driver = DynamicStepDriver(self.train_env, + self.agent.collect_policy, + observers=[self.replay_buffer.add_batch], + num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent + + self.agent.train = common.function(self.agent.train) # Optimalization + self.warmup_driver.run = common.function(self.warmup_driver.run) + self.collect_driver.run = common.function(self.collect_driver.run) + + self.compiled = True + + def train(self, *args) -> None: + """"""Starts the training of the model. Includes warmup period, metrics collection and model saving. + + :param *args: All arguments will be passed to `collect_metrics()`. + This can be usefull to pass callables, testing environments or validation data. + Overwrite the TrainDQN.collect_metrics() function to use your own *args. + :type *args: Any + + :return: None + :rtype: NoneType, last step is saving the model as a side-effect + """""" + assert self.compiled, ""Model must be compiled with model.compile_model(X_train, y_train, layers) before training."" + + # Warmup period, fill memory with random actions + if self.progressbar: + print(f""\\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\\033[0m"") + + self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size)) + + if self.progressbar: + print(f""\\033[92m{self.replay_buffer.num_frames():_} frames collected!\\033[0m"") + + dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1, + num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE) + iterator = iter(dataset) + + def _train" +"(): + experiences, _ = next(iterator) + return self.agent.train(experiences).loss + _train = common.function(_train) # Optimalization + + ts = None + policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size) + print('Before Collect Metrics') + self.collect_metrics(*args) # Initial collection for step 0 + print('After Collect Metrics') + + for _ in range(self.episodes): + if not self.global_episode % self.collect_every: + # Collect a few steps using collect_policy and save to `replay_buffer` + if self.collect_steps_per_episode != 0: + ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state) + + + # Sample a batch of data from `replay_buffer` and update the agent's network + train_loss = _train() + + if not self.global_episode % self.val_every: + with self.writer.as_default(): + tf.summary.scalar(""train_loss"", train_loss, step=self.global_episode) + + self.collect_metrics(*args) + + + def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None): + """"""Collects metrics using the trained Q-network. + + :param X_val: Features of validation data, same shape as X_train + :type X_val: np.ndarray + :param y_val: Labels of validation data, same shape as y_train + :type y_val: np.ndarra + :param save_best: Saving the best model of all validation runs based on given metric: + Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN} + This improves stability since the model at the last episode is not guaranteed to be the best model. + :type save_best: str + """""" + y_pred = network_predictions(self.agent._target_q_network, X_val) + print('classification_metrics') + stats = classification_metrics(y_val, y_pred) + print('Before AVGQ') + avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X + print('After AVGQ') + if save_best is not None: + if not hasattr(self, ""best_score""): # If no best model yet + self.best_score = 0.0 + + if stats.get(save_best) >= self.best_score: # Overwrite best model + self.save_network() # Saving directly to avoid shallow copy without trained weights + self.best_score = stats.get(save_best) + + with self.writer.as_default(): + tf.summary.scalar(""AverageQ"", avgQ, step=self.global_episode) # Average Q-value for this epoch + for k, v in stats.items(): + tf.summary.scalar(k, v, step=self.global_episode) + + def evaluate(self, X_test, y_test): + """""" + Final evaluation of trained Q-network with X_test and y_test. + Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place. + + :param X_test: Features of test data, same shape as X_train + :type X_test: np.ndarray + :param y_test: Labels of test data, same shape as y_train + :type y_test: np.ndarray + :param X_train: Features of train data + :type X_train: np.ndarray + :param y_train: Labels of train data + :type y_train: np.ndarray + """""" + #if hasattr(self, ""best_score""): + # print(f""\\033[92mBest score: {self.best_score:6f}!\\033[0m"") + # network = self.load_network(self.model_path) # Load best saved model + #else: + # network = self.agent._target_q_network # Load latest target model + + #network = self.load_network(self.model_path) + #if (X_train is not None) and (y_train is not None): + # plot_pr_curve(network, X_test, y_test, X_train, y_train) + # plot_roc_curve(network, X_test, y_test, X_train, y_train) + + y_pred = network_predictions(self.agent._target_q_network, X_test) + return classification_metrics(y_test, y_pred) + + def save_network(self): + print('save_network') + """"""Saves Q-network as pickle to `model_path`."""""" + with open(self.model_path, ""wb"") as f: # Save Q-network as pickle + pickle.dump(self.agent._target_q_network, f) + + def get_network(self): + """"""Static method to load Q-network pickle from given filepath. + + :param fp: Filepath to the saved pickle of the network + :type fp: str + + :returns: The network-object loaded from a pickle file. + :rtype: tensorflow.keras.models.Model + """""" + return self.agent._target_q_network + import numpy as np +from tf_agents.environments.py_environment import PyEnvironment +from tf_agents.specs.array_spec import ArraySpec, BoundedArraySpec +from tf_agents.trajectories import time_step as ts + + +class ClassifierEnv(PyEnvironment): + """""" + Custom `PyEnvironment` environment for imbalanced classification. + Based on https://www.tensorflow.org/agents/tutorials/2_environments_tutorial + """""" + + def __init__(self, X_train: np.ndarray, y_train: np.ndarray, imb_ratio: float): + """"""Initialization of environment with X_train and y_train. + + :param X_train: Features shaped: [samples, ..., ] + :type X_train: np.ndarray + :param y_train: Labels shaped: [samples] + :type y_train: np.ndarray + :param imb_ratio: Imbalance ratio of the data + :type imb_ratio: float + + :returns: None + :rtype: NoneType + + """""" + #print('1') + self._action_spec = BoundedArraySpec(shape=(), dtype=np.int32, minimum=0, maximum=(len(np.unique(y_train)) - 1), name=""action"") + #print(y_train) + self._observation_spec = ArraySpec(shape=X_train.shape[1:], dtype=X_train.dtype, name=""observation"") + #print('3') + self._episode_ended = False + + self.X_train = X_train + self.y_train = y_train + self.imb_ratio = imb_ratio # Imbalance ratio: 0 < imb_ratio < 1 + self.id = np.arange(self.X_train.shape[0]) # List of IDs to connect X and y data + + self.episode_step = 0 # Episode step, resets every episode + self._state = self.X_train[self.id[self.episode_step]] + + def action_spec(self): + """""" + Definition of the discrete actionspace. + 1 for the positive/minority class, 0 for the negative/majority class. + """""" + return self._action_spec + + def observation_spec(self): + """"""Definition of the continous statespace e.g. the observations in typical RL environments."""""" + return self._observation_spec + + def _reset(self): + """"""Shuffles data and returns the first state of the shuffled data to begin training on new episode."""""" + np.random.shuffle(self.id) # Shuffle the X and y data + self.episode_step = 0 # Reset episode step counter at the end of every episode + self._state = self.X_train[self.id[self.episode_step]] + self._episode_ended = False # Reset terminal condition + + return ts.restart(self._state) + + def _step(self, action: int): + """""" + Take one step in the environment. + If the action is correct, the environment will either return 1 or `imb_ratio` depending on the current class. + If the action is incorrect, the environment will either return -1 or -`imb_ratio` depending on the current class. + """""" + if self._episode_ended: + # The last action ended the episode. Ignore the current action and start a new episode + return self.reset() + + env_action = self.y_train[self.id[self.episode_step]] # The label of the current state + self.episode_step += 1 + + if action == env_action: # Correct action + if env_action: # Minority + reward = 1 # True Positive + else: # Majority + reward = self.imb_ratio # True Negative + + else: # Incorrect action + if env_action: # Minority + reward = -1 # False Negative + self._episode_ended = True # Stop episode when minority class is misclassified + else: # Majority + reward = -self.imb_ratio # False Positive + + if self.episode_step == self.X_train.shape[0] - 1: # If last step in data + self._episode_ended = True + + self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint + + if self._episode_ended: + return ts.termination(self._state, reward) + else: + return ts.transition(self._state, reward) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +from sklearn.preprocessing import MinMaxScaler, LabelEncoder + +import numpy as np + +import logging +logging.getLogger('tensorflow').disabled = True + +import aif360 +from aif360.datasets import StandardDataset +from aif360.algorithms.preprocessing.reweighing import Reweighing +from aif360.algorithms.preprocessing import DisparateImpactRemover + +class DebiasingManager: + + def __init__(self): + self.data = '' + + # ------------------------------- Debiasing Changes ------------------------------- + def get_attributes(self, data, selected_attr=None): + unprivileged_groups = [] + privileged_groups = [] + if selected_attr == None: + selected_attr = data.protected_attribute_names + + for attr in selected_attr: + idx = data.protected_attribute_names.index(attr) + privileged_groups.append({attr:data.privileged_protected_attributes[idx]}) + unprivileged_groups.append({attr:data.unprivileged_protected_attributes[idx]}) + return privileged_groups, unprivileged_groups + # ------------------------------- ------------------------------- + + + def Bias_Mitigate(self, dataFrame, protected_feature, privileged_className, target_feature, algorithm): + + # log = logging.getLogger('eion') + # log.propagate = False + + data_encoded = dataFrame.copy() + categorical_names = {} + encoders = {} + + dataFrame = dataFrame.replace('Unknown', 'NA') + dataFrame = dataFrame.replace(np.nan, 'NA') + try: + # Label-Encoding + for feature in dataFrame.columns: + le = LabelEncoder() + le.fit(data_encoded[feature]) + data_encoded[feature] = le.transform(data_encoded[feature]) + categorical_names[feature] = le.classes_ + encoders[feature] = le + + privileged_class = np.where(categorical_names[protected_feature] == privileged_className)[0] + target_feature_count = len(data_encoded[target_feature].value_counts()) + # Check if it's BinaryLabel + if target_feature_count == 2: + binaryLabelDataset = aif360.datasets.BinaryLabelDataset( + favorable_label='1', + unfavorable_label='0', + df=data_encoded, + label_names=[target_feature], + protected_attribute_names=[protected_feature]) + data_orig = binaryLabelDataset + + # Check if it's Non-BinaryLabel + if target_feature_count > 2: + data_orig = StandardDataset(data_encoded, + label_name=target_feature, + favorable_classes=[1], + protected_attribute_names=[protected_feature], + privileged_classes=[privileged_class]) + + if algorithm == 'DIR': + DIR = DisparateImpactRemover(repair_level=0.9) + data_transf_train = DIR.fit_transform(data_orig) + # log.info('Status:-|... DIR applied on input dataset') + else: + privileged_groups, unprivileged_groups = self.get_attributes(data_orig, selected_attr=[protected_feature]) + RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) + data_transf_train = RW.fit_transform(data_orig) + # log.info('Status:-|... Reweighing applied on input dataset') + + transf_dataFrame = data_transf_train.convert_to_dataframe()[0] + + data_decoded = transf_dataFrame.copy().astype('int') + for column in data_decoded.columns: + data_decoded[column] = encoders[column].inverse_transform(data_decoded[column]) + + debiased_dataFrame = data_decoded + + except Exception as e: + print(e) + debiased_dataFrame = dataFrame + + return debiased_dataFrame + #!/usr/bin/env python +""""""Django's command-line utility for administrative tasks."""""" +import os +import sys + + +def main(): + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + ""Couldn't import Django. Are you sure it's installed and "" + ""available on your PYTHONPATH environment variable? Did you "" + ""forget to activate a virtual environment?"" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() + """"""mpgWebApp URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en" +"/3.0/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +"""""" +from django.contrib import admin +from django.urls import path +from django.urls import include, re_path +from appfe.api import inferenceApis +from django.urls import path, re_path +urlpatterns = [ + #path('predict', inferenceApis.apipredict,name='PredictAPI'), + path('predict', inferenceApis.apipredict,name='PredictAPI'), + path('spredict',inferenceApis.apispredict,name='SecurePredictAPI'), + path('monitoring', inferenceApis.apiinputdrift,name='MonitoringAPI'), + path('performance', inferenceApis.apioutputdrift,name='Performance'), + path('xplain', inferenceApis.apixplain,name='Xplain'), + path('features',inferenceApis.apifeatures,name='Features'), + path('uploadfile',inferenceApis.uploadfile,name='uploadfile'), + path('retrain',inferenceApis.retrain,name='retrain'), + path('trainstatus',inferenceApis.trainstatus,name='trainstatus'), + path('publish',inferenceApis.publish,name='publish'), + path('geteda',inferenceApis.geteda,name='geteda'), + path('pattern_anomaly_settings',inferenceApis.apiPatternAnomalySettings,name='PatternAnomalySettings'), + path('pattern_anomaly_predict',inferenceApis.apiPatternAnomalyPredict,name='PatternAnomalyPredict') +] + + +#df=pd.read_csv(""C:\\Project\\Analytics\\Deployment\\germancredit_9\\germancreditdata.csv"") +# +#bool_cols = [col for col in df if np.isin(df[col].dropna().unique(), [0, 1]).all()] +# +#bool_cols + from django.shortcuts import render +from django.http import HttpResponse +from appbe.dataPath import DEPLOY_LOCATION +from rest_framework import status +from django.db.models import Max, F +import os,sys +import time +import json +import re +import pandas as pd +from rest_framework.permissions import IsAuthenticated +from django.views.decorators.csrf import csrf_exempt +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +import subprocess +from pathlib import Path + +user_records = {} +@csrf_exempt +def geteda(request): + if request.method == 'POST': + if request.content_type == 'application/json': + output = {} + try: + data=request.body.decode('utf-8') + data = json.loads(data) + file_id = data['fileid'] + edaOptions = 'All' + if 'options' in data: + edaOptions = data['options'] + dataFile = os.path.join(DATA_FILE_PATH,file_id) + from appbe.eda import ux_eda + eda_obj = ux_eda(dataFile) + if 'dataoverview' in edaOptions.lower() or 'all' in edaOptions.lower(): + dataDistributionDF = eda_obj.dataDistribution() + dataDistributionJson = dataDistributionDF.to_json(orient = 'records') + output['DataOverview'] = json.loads(dataDistributionJson) + if 'top10records' in edaOptions.lower() or 'all' in edaOptions.lower(): + top10df = eda_obj.getTopRows(10) + top10dfJson = top10df.to_json(orient = 'records') + output['Top10Records'] = json.loads(top10dfJson) + if 'datadistribution' in edaOptions.lower() or 'all' in edaOptions.lower(): + distributionJson = eda_obj.getDistribution() + output['DataDistribution'] = distributionJson + if ""featureimportance"" in edaOptions.lower() or 'all' in edaOptions.lower(): + pca_map = eda_obj.getPCATop10Features() + pca_details = pca_map + pca_df=pd.DataFrame() + if len(pca_details) > 0: + pca_df = pd.DataFrame({'Feature':pca_details.index, 'Explained Variance Ratio':pca_details.values}).round(2) + pca_json = pca_df.to_json(orient=""records"") + output['FeatureImportance'] = json.loads(pca_json) + else: + pca_json = 'Error During feature importance processing' + output['FeatureImportance'] = pca_json + if ""correlationanalysis"" in edaOptions.lower() or 'all' in edaOptions.lower(): + corr_mat = eda_obj.getCorrelationMatrix() + if not corr_mat.empty: + corr_mat = corr_mat.to_json(orient=""columns"") + output['CorrelationAnalysis'] = json.loads(corr_mat) + else: + output['CorrelationAnalysis'] = 'Error during correlation analysis' + if ""unsupervisedclustering"" in edaOptions.lower() or 'all' in edaOptions.lower(): + clusteringDetails,hopkins_val = eda_obj.getClusterDetails() + output['UnsupervisedClustering'] = clusteringDetails + output['HopkinsValue'] = hopkins_val + + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""Success"",""output"":output}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + +@csrf_exempt +def publish(request): + usecaseid = request.GET[""usecaseid""] + currentVersion = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + try: + from appbe.models import publishmodel + status,msg,url = publishmodel(request,usecaseid,currentVersion,Existusecases,usecasedetails) + return HttpResponse(json.dumps({""status"":status,""msg"":msg,""url"":url}),content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""model training exception""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +@csrf_exempt +def trainstatus(request): + usecaseid = request.GET[""usecaseid""] + currentVersion = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + try: + data=request.body.decode('utf-8') + data = json.loads(data) + trainingid = int(data['trainingid']) + model = Existusecases.objects.get(id=trainingid) + if model.Status.lower() == 'success': + return HttpResponse(json.dumps({""status"":""success"",""trainingStatus"":""Trained"",""usecaseid"":str(usecaseid),""version"":str(model.Version)}),content_type=""application/json"") + else: + from appbe.training import checkversionrunningstatus + status = checkversionrunningstatus(trainingid,usecasedetails,Existusecases) + if status.lower() == 'success': + return HttpResponse(json.dumps({""status"":""success"",""trainingStatus"":""Trained"",""usecaseid"":str(usecaseid),""version"":str(model.Version)}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""success"",""trainingStatus"":status,""usecaseid"":str(usecaseid),""version"":str(model.Version)}),content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""model training exception""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +@csrf_exempt +def retrain(request): + usecaseid = request.GET[""usecaseid""] + currentVersion = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + try: + data=request.body.decode('utf-8') + data = json.loads(data) + file_id = data['fileid'] + p = usecasedetails.objects.get(usecaseid=usecaseid) + s1 = Existusecases.objects.filter(ModelName=p).annotate(maxver=Max('ModelName__existusecases__Version')) + config_list = s1.filter(Version=F('maxver')) + if config_list.count() > 0: + Version = config_list[0].Version + Version = Version + 1 + model = Existusecases.objects.filter(ModelName=p,Version=currentVersion) + indexVal = 0 + configfile = str(model[indexVal].ConfigPath) + f = open(configfile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + configSettingsJson['basic']['modelVersion'] = str(Version) + dataFile = configSettingsJson['basic']['dataLocation'] + if os.path.isfile(dataFile): + data = pd.read_csv(dataFile,encoding='utf-8',skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') + dataFile = os.path.join(DATA_FILE_PATH,file_id) + data2 = pd.read_csv(dataFile,encoding='utf-8',skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') + data = data.append(data2,ignore_index=True) + data.to_csv(dataFile,index=False) + dataFile = os.path.join(DATA_FILE_PATH,file_id) + configSettingsJson['basic']['dataLocation'] = str(dataFile) + updatedConfigSettings = json.dumps(configSettingsJson) + filetimestamp = str(int(time.time())) + outputfile = os.path.join(CONFIG_FILE_PATH, 'AION_OUTPUT_' + filetimestamp + '.json') + config_json_filename = os.path.join(CONFIG_FILE_PATH, 'AION_' + filetimestamp + '.json') + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(updatedConfigSettings) + fpWrite.close() + ps = Existusecases(DataFilePath=str(dataFile), DeployPath='', Status='Not Trained',ConfigPath=str(config_json_filename), Version=Version, ModelName=p,TrainOuputLocation=str(outputfile)) + ps.save() + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','training','-c',config_json_filename]) + ps.Status = 'Running' + ps.trainingPID = outputStr.pid + ps.save() + return HttpResponse(json.dumps({""status"":""success"",""trainingid"":str(ps.id),""version"":str(ps.Version),""usecaseid"":usecaseid}),content_type=""application/json"") + ''' + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resultJsonObj = json.loads(outputStr) + ps.Status = resultJsonObj['status'] + if resultJsonObj['status'] == 'SUCCESS': + ps.modelType = resultJsonObj['data']['ModelType'] + ps.DeployPath = str(resultJsonObj['data']['deployLocation']) + if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection', 'timeSeriesAnomalyDetection']: #task 11997 + ps.ProblemType = 'unsupervised' + else: + ps.ProblemType = 'supervised' + ps.save() + ''' + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":'Existing trained model not found'}),content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""model training exception""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +@csrf_exempt +def uploadfile(request): + try: + if 'file' not in request.FILES: + msg = 'No file part in the request' + return HttpResponse(json.dumps({""status"":""error"",""msg"":msg}),content_type=""application/json"") + else: + file = request.FILES['file'] + if file.size > 31457280: + msg = 'Upload limit is 30 MB only' + ext = str(file).split('.')[-1] + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: + filetimestamp = str(int(time.time())) + file_id = 'AION_' + filetimestamp+'.'+ext + dataFile = os.path.join(DATA_FILE_PATH,file_id) + with open(dataFile, 'wb+') as destination: + for chunk in file.chunks(): + destination.write(chunk) + destination.close() + return HttpResponse(json.dumps({""status"":""success"",""fileid"":file_id}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error" +""",""msg"":""File extension not supported""}),content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""File upload exception""}),content_type=""application/json"") +def help_text(request,usecaseid,version): + hosturl =request.get_host() + url='http://'+hosturl+'/api/' + msg = """""" +Request_Type: Post +Content_Type: applicattion/json +For Prediction URL: {url}'predict?usecaseid={usecaseid}&version={version} +For Explanations URL: {url}xplain?usecaseid={usecaseid}&version={version} +For Input Drift URL: {url}monitoring?usecaseid={usecaseid}&version={version} +For Output Drift URL: {url}performance?usecaseid={usecaseid}&version={version} +BODY: Data in json format +"""""".format(url=url,usecaseid=usecaseid,version=version) + return msg + +@csrf_exempt +def apispredict(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data=request.body.decode('utf-8') + predict_path = os.path.join(model_path,'aion_spredict.py') + outputStr = subprocess.check_output([sys.executable,predict_path,data]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + resp = outputStr.strip() + return HttpResponse(resp,content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong UseCaseID or Version""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") + +@csrf_exempt +def apipredict(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + #print(request.content_type) + if request.method == 'POST': + if request.content_type in ['application/json','multipart/form-data']: + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data = '' + msg = 'Bad request' + if 'file' not in request.FILES: + data=request.body.decode('utf-8') + else: + file = request.FILES['file'] + if file.size > 31457280: + msg = 'Upload limit is 30 MB only' + + else: + ext = str(file).split('.')[-1] + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: + urlData = file.read() + import io + rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) + data = rawData.to_json(orient='records') + else: + msg = 'Extension not supported' + + if data != '': + predict_path = os.path.join(model_path,'aion_predict.py') + outputStr = subprocess.check_output([sys.executable,predict_path,data]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + resp = outputStr.strip() + return HttpResponse(resp,content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":msg}),content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong UseCaseID or Version""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +@csrf_exempt +def apiinputdrift(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data=request.body.decode('utf-8') + predict_path = os.path.join(model_path,'aion_ipdrift.py') + outputStr = subprocess.check_output([sys.executable,predict_path,data]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) + resp = outputStr.strip() + return HttpResponse(resp,content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong UseCaseID or Version""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +@csrf_exempt +def apioutputdrift(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data=request.body.decode('utf-8') + predict_path = os.path.join(model_path,'aion_opdrift.py') + outputStr = subprocess.check_output([sys.executable,predict_path,data]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) + resp = outputStr.strip() + return HttpResponse(resp,content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong UseCaseID or Version""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +@csrf_exempt +def apixplain(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.method == 'POST': + if request.content_type == 'application/json': + model_path = (Path(DEPLOY_LOCATION)/usecaseid)/str(version) + if model_path.is_dir(): + try: + with open( (model_path/'etc')/'display.json', 'r') as f: + disp_data = json.load(f) + is_explainable = not disp_data.get('textFeatures') + except: + is_explainable = True + try: + if not is_explainable: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""explain api is not supported when text features are used for training""}),content_type=""application/json"") + data=request.body.decode('utf-8') + predict_path = model_path/'aion_xai.py' + outputStr = subprocess.check_output([sys.executable,predict_path,'local',data]) #BugId:13304 + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) + resp = outputStr.strip() + return HttpResponse(resp,content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong UseCaseID or Version""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +#@api_view(['POST','GET']) +def apifeatures(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.content_type == 'application/json': + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data=request.body.decode('utf-8') + predict_path = os.path.join(model_path,'featureslist.py') + outputStr = subprocess.check_output([sys.executable,predict_path,data]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) + resp = outputStr.strip() + return HttpResponse(resp,content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong UseCaseID or Version""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Wrong Content Type""}),content_type=""application/json"") + +@csrf_exempt +def apiPatternAnomalySettings(request): + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.content_type == 'application/json': + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data=request.body.decode('utf-8') + data = json.loads(data) + groupswitching = data['groupswitching'] + transitionprobability = data['transitionprobability'] + transitionsequence = data['transitionsequence'] + sequencethreshold = data['sequencethreshold'] + filename = os.path.join(model_path,'clickstream.json') + print(filename) + data = {} + data['groupswitching'] = groupswitching + data['transitionprobability'] = transitionprobability + data['transitionsequence'] = transitionsequence + data['sequencethreshold'] = sequencethreshold + updatedConfig = json.dumps(data) + with open(filename, ""w"") as fpWrite: + fpWrite.write(updatedConfig) + fpWrite.close() + return HttpResponse(json.dumps({""status"":'Success'}),content_type=""application/json"") + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") +#@api_view(['POST']) +@csrf_exempt +def apiPatternAnomalyPredict(request): + import pandas as pd + usecaseid = request.GET[""usecaseid""] + version = request.GET[""version""] + if request.content_type == 'application/json': + model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version)) + isdir = os.path.isdir(model_path) + if isdir: + try: + data=request.body.decode('utf-8') + data = json.loads(data) + anomaly = False + remarks = '' + clusterid = -1 + configfilename = os.path.join(model_path,'datadetails.json') + filename = os.path.join(model_path,'clickstream.json') + clusterfilename = os.path.join(model_path,'stateClustering.csv') + probfilename = os.path.join(model_path,'stateTransitionProbability.csv') + dfclus = pd.read_csv(clusterfilename) + dfprod = pd.read_csv(probfilename) + f = open(configfilename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + activity = configSettingsJson['activity'] + sessionid = configSettingsJson['sessionid'] + f = open(filename, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + groupswitching = configSettingsJson['groupswitching'] + page_threshold = configSettingsJson['transitionprobability'] + chain_count = configSettingsJson['transitionsequence'] + chain_probability = configSettingsJson['sequencethreshold'] + currentactivity = data[activity] + if bool(user_records): + sessionid = data[sessionid] + if sessionid != user_records['SessionID']: + user_records['SessionID'] = sessionid + prevactivity = '' + user_records['probarry'] = [] + " +" user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + else: + prevactivity = user_records['Activity'] + user_records['Activity'] = currentactivity + pageswitch = True + if prevactivity == currentactivity or prevactivity == '': + probability = 0 + pageswitch = False + remarks = '' + else: + user_records['pageclicks'] += 1 + df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] + if df1.empty: + remarks = 'Anomaly Detected - User in unusual state' + anomaly = True + clusterid = -1 + probability = 0 + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + avg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + else: + probability = df1['Probability'].iloc[0] + user_records['probarry'].append(probability) + n=int(chain_count) + num_list = user_records['probarry'][-n:] + davg = sum(num_list)/len(num_list) + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + remarks = '' + if user_records['prevclusterid'] != -1: + if probability == 0 and user_records['prevclusterid'] != clusterid: + user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 + if user_records['pageclicks'] == 1: + remarks = 'Anomaly Detected - Frequent Cluster Hopping' + anomaly = True + else: + remarks = 'Cluster Hopping Detected' + user_records['pageclicks'] = 0 + if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: + remarks = 'Anomaly Detected - Multiple Cluster Hopping' + anomaly = True + elif probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + if pageswitch == True: + if probability == 0: + remarks = 'Anomaly Detected - Unusual State Transition Detected' + anomaly = True + elif probability <= float(page_threshold): + remarks = 'Anomaly Detected - In-frequent State Transition Detected' + anomaly = True + else: + remarks = '' + if davg < float(chain_probability): + if anomaly == False: + remarks = 'Anomaly Detected - In-frequent Pattern Detected' + anomaly = True + else: + user_records['SessionID'] = data[sessionid] + user_records['Activity'] = data[activity] + user_records['probability'] = 0 + user_records['probarry'] = [] + user_records['chainprobability'] = 0 + user_records['prevclusterid'] = -1 + user_records['NoOfClusterHopping'] = 0 + user_records['pageclicks'] = 1 + for index, row in dfclus.iterrows(): + clusterlist = row[""clusterlist""] + if currentactivity in clusterlist: + clusterid = row[""clusterid""] + user_records['prevclusterid'] = clusterid + outputStr = {'status':'SUCCESS','data':{'Anomaly':str(anomaly),'Remarks':str(remarks)}} + return HttpResponse(json.dumps(outputStr),content_type=""application/json"") + + except Exception as e: + print(e) + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + return HttpResponse(json.dumps({""status"":""error"",""msg"":""Bad Request""}),content_type=""application/json"") + else: + msg = help_text(request,usecaseid,version) + return HttpResponse(msg,content_type=""text/plain"") from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import json +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe.pages import getversion +AION_VERSION = getversion() +import os +import time +import subprocess +import sys +import re +import pandas as pd +def mltesting(request): + from appbe.pages import mltesting_page + context = mltesting_page(request) + context['selected']='mltesting' + context['version'] = AION_VERSION + return render(request, 'mltesting.html',context) + +def ABtest(request): + try: + if request.method == ""POST"": + + models = request.POST[""model""] + data = request.POST[""data""] + #context['version'] = AION_VERSION + if(os.path.isfile(models) and os.path.isfile(data)): + + AlgorithmNames={'LogisticRegression':'Logistic Regression','SGDClassifier':'Stochastic Gradient Descent','GaussianNB':'Naive Bayes','SVC':'Support Vector Machine','KNeighborsClassifier':'K Nearest Neighbors','DecisionTreeClassifier':'Decision Tree','RandomForestClassifier':'Random Forest','GradientBoostingClassifier':'Gradient Boosting','XGBClassifier':'Extreme Gradient Boosting (XGBoost)','DecisionTreeRegressor':'Decision Tree','LinearRegression':'Linear Regression','Lasso':'Lasso','Ridge':'Ridge','RandomForestRegressor':'Random Forest','XGBRegressor':'Extreme Gradient Boosting (XGBoost)'} + filetimestamp = str(int(time.time())) + mltestjson = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json') + with open(mltestjson, 'r+') as f: + mltest = json.load(f) + f.close() + + with open(request.session['MLTestResult'], 'r+') as f: + mltestresult = json.load(f) + f.close() + models = mltestresult['models'] + datapath = mltestresult['datap'] + featurs = mltestresult['feature'] + featurs = featurs.split("","") + tar = mltestresult['target'] + tar = tar.split("","") + # models,datap,Problemtype,targ,feature,Problem,Parameters,Accuracy + # models,datap,Problemtype,targ,feature,Problem,Parameters,Accuracy + mltest['basic']['modelName'] = 'MLtest_'+ str(filetimestamp) + mltest['basic']['modelVersion'] = ""1"" + mltest['basic']['dataLocation'] = mltestresult['datap'] + mltest['basic']['deployLocation'] = DEPLOY_LOCATION + mltest['basic']['trainingFeatures'] = mltestresult['feature'] + mltest['basic']['targetFeature'] = mltestresult['target'] + mltest['advance']['profiler']['featureDict']=[] + temp = {} + Problemtype = mltestresult['Problemtype'] + if Problemtype == 'Classification': + Accuracyscore1 = mltestresult['Score'] + Accuracyscore = float(Accuracyscore1)*100 + temp['ScoringCriteria'] = 'Accuracy' + else: + R2_Score = round(float(mltestresult['Score']),2) + temp['ScoringCriteria'] = 'R2' + baselineparam = mltestresult['Params'] + temp['algorithm'] = [] + if request.session[""AionProblem""] == 'Samebaseline': + baselineprob = AlgorithmNames[mltestresult['ProblemName']] + temp['algorithm'].append(baselineprob) + + else: + baselineprob = request.session[""AionProblem""] + temp['algorithm'] = baselineprob.split("","") + #print(baselineprob) + temp['ProblemType'] = Problemtype + #temp['algorithm'] = ['K Nearest Neighbors'] + problemtyp = mltest['basic']['analysisType'] + scoring = mltest['basic']['scoringCriteria'] + for i in list(scoring.keys()): + for x in list(mltest['basic']['scoringCriteria'][i].keys()): + mltest['basic']['scoringCriteria'][i][x] = 'False' + if temp['ProblemType'].lower() in [""classification"",""regression"",]: + mltest['basic']['scoringCriteria'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][temp['ScoringCriteria']] = 'True' + for i in list(problemtyp.keys()): + mltest['basic']['analysisType'][i]='False' + + algorithm = mltest['basic']['algorithms'] + for i in list(algorithm.keys()): + for x in list(mltest['basic']['algorithms'][i].keys()): + + mltest['basic']['algorithms'][i][x] = 'False' + + mltest['basic']['analysisType'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]] = 'True' + for X in temp['algorithm']: + mltest['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][X] = 'True' + mltest = json.dumps(mltest) + config_json_filename = os.path.join(CONFIG_FILE_PATH, 'MLtest' + filetimestamp + '.json') + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(mltest) + fpWrite.close() + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','training','-c',config_json_filename]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + #print(decoded_data) + if decoded_data['data']['ScoreType'] == 'R2': + decoded_data['data']['BestScore'] = str(round(float(decoded_data['data']['BestScore']),2)) + if decoded_data['data']['ScoreType'].lower() == 'accuracy': + decoded_data['data']['BestScore'] = str(round(float(decoded_data['data']['BestScore']),2)) + #print(decoded_data) + #print('123',Accuracyscore) + if Problemtype == 'Classification': + + if Accuracyscore >= float(decoded_data['data']['BestScore']) : + context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'abtest':'abtest','message':'message','msg':'Existing model is good to be used.','classification':'classification','classuccess':'classuccess','selected':'mltesting','version':AION_VERSION} + else: + context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'tableab':'tableab','abtest':'abtest','decoded_data':decoded_data,'score':Accuracyscore,'basealgo':baselineprob,'Problem':AlgorithmNames[mltestresult['ProblemName']],'baseparm':baselineparam,'classification':'classification','classuccess':'classuccess','selected':'mltesting','version':AION_VERSION} + else: + if R2_Score >= float(decoded_data['data']['BestScore']) : + context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'abtest':'abtest','message':'message','msg':'Existing model is good to be used.','regression':'regression','regsuccess':'regsuccess','selected':'mltesting'} + else: + context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'tableab':'tableab','abtest':'abtest','decoded_data':decoded_data,'score':R2_Score,'basealgo':baselineprob,'Problem':AlgorithmNames[mltestresult['ProblemName']],'baseparm':baselineparam,'regression':'regression','regsuccess':'regsuccess','selected':'mltesting','version':AION_VERSION} + else: + context= {'error':'Error - Model file or Data file does not exist','abtesting':'abtesting','selected':'mltesting'} + context['version'] = AION_VERSION + return render(request, 'mltesting.html', context) + except Exception as e: + print(e) + context= {'error':'Error - Fail to perform A/B Testing','abtesting':'abtesting','selected':'mltesting'} + context['version'] = AION_VERSION + return render(request, 'mltesting.html', context) + +def UQTesting(request): + try: + if request.method == ""POST"": + models = request.POST['modeluq'] + datap = request.POST['datauq'] + if(os.path.isfile(models) and os.path.isfile(datap)): + df = pd.read_csv(datap) + trainfea = df.columns.tolist() + + featurs = request.POST.getlist('Traininguq') + feature = "","".join(featurs) + + # features = ['PetalLengthCm','PetalWidthCm'] + targ = request.POST['Targetuq'] + tar =[targ] + + from bin.aion_uncertainties import aion_uq + outputStr = aion_uq(models,datap,feature,tar) + print(outputStr) + uq_test = json.loads(outputStr) + + #print(""=============="") + #print(uq_test) + #print(""=============="") + Problemtype= uq_test['Problem'] + msg = uq_test['msg'] + if Problemtype == 'Regression': + + # Confidence_Interval_Plot = uq_test['Confidence Interval Plot'] + # #print(Confidence_Interval_Plot) + # if Confidence_Interval_Plot != '': + # string = base64.b64encode(open(Confidence_Interval_Plot, ""rb"").read()) + " +"# Confidence_Interval_Plot = 'data:image/png;base64,' + urllib.parse.quote(string) + + # PICP_Plot = uq_test['PICP Plot'] + # if PICP_Plot != '': + # string = base64.b64encode(open(PICP_Plot, ""rb"").read()) + # PICP_Plot = 'data:image/png;base64,' + urllib.parse.quote(string) + + # Confidence_Plot = uq_test['Confidence Plot'] + # if Confidence_Plot != '': + # string = base64.b64encode(open(Confidence_Plot, ""rb"").read()) + # Confidence_Plot = 'data:image/png;base64,' + urllib.parse.quote(string) + if msg == ""Good"": + context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'Green':'Green','selected':'mllite','version':AION_VERSION} + elif msg == ""Bad"": + context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'Red':'Red','selected':'mllite','version':AION_VERSION} + else: + context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'orange':'orange','selected':'mllite','version':AION_VERSION} + elif Problemtype == 'Classification': + + # df3 = pd.DataFrame.from_dict(uq_test,orient='index') + df3 = pd.DataFrame.from_dict(uq_test, orient='index', columns=['value']) + df3 = df3.rename_axis('Attributes').reset_index() + # risk_plot = uq_test['Risk Plot'] + # if risk_plot != '': + # string = base64.b64encode(open(risk_plot, ""rb"").read()) + # risk_plot = 'data:image/png;base64,' + urllib.parse.quote(string) + + # reliability_plot = uq_test['Reliability Plot'] + # if reliability_plot != '': + # string = base64.b64encode(open(reliability_plot, ""rb"").read()) + # reliability_plot = 'data:image/png;base64,' + urllib.parse.quote(string) + + df3['Attributes'] = df3['Attributes'].str.replace(r'_', ' ') + # df3.loc[(df3.Attributes == 'Model_total_confidence_score'),'Attributes']='Model Total Confidence' + # df3.loc[(df3.Attributes == 'Expected_Calibration_Error'),'Attributes']='Expected Calibration Error' + df3 = df3.iloc[4:, :] + report = df3.to_html(index=False) + + if msg == ""Good"": + context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION} + elif msg == ""Bad"": + context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION} + else: + context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION} + elif Problemtype == 'None': + #print('hi') + context={'Uqerror':'Uqerror','errormsg':""Error:""+str(msg),'version':AION_VERSION} + else: + context= {'error':'Please enter valid inputs','UQtesting':'UQtesting','selected':'mllite','version':AION_VERSION} + return render(request, 'mltesting.html', context) + except Exception as e: + print(""uqregression error: "",e) + context= {'error':'Error - Fail to perform Uncertainty Quantification ','UQtesting':'UQtesting','selected':'mllite','version':AION_VERSION} + return render(request, 'mltesting.html', context) from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +from appbe.pages import getusercasestatus +from appbe.pages import getversion +AION_VERSION = getversion() +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +import os +from django.db.models import Max, F +import pandas as pd +from appbe.publish import check_input_data +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe import installPackage +import json +from appbe import compute +from appbe.training import checkModelUnderTraining +import logging +def opentraininglogs(request, id,currentVersion): + from appbe.pages import usecases_page + try: + from appbe import installPackage + modelID = installPackage.getMIDFromUseCaseVersion(id,currentVersion,usecasedetails,Existusecases) + p = Existusecases.objects.get(id=modelID) + configpath = str(p.ConfigPath) + file_exists = os.path.exists(configpath) + if not file_exists: + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + context['errorMsg'] = 'Error in model launching: Some of the files are missing' + return render(request,action,context) + usecasename = p.ModelName.UsecaseName + Version = p.Version + request.session['ModelName'] = p.ModelName.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.ModelName.usecaseid + request.session['ModelVersion'] = p.Version + request.session['deploypath'] = str(p.DeployPath) + request.session['config_json'] = configpath + usename = request.session['usecaseid'].replace("" "", ""_"") + request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log') + request.session['finalstate'] = 3 + request.session['ModelStatus'] = p.Status + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + problemtypes = configSettings['basic']['analysisType'] + #print(problemtypes.keys()) + problem_type = """" + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_type = k + break + if problem_type.lower() in ['videoforecasting','imageclassification','objectdetection','document','llmfinetuning']: + request.session['datatype'] = configSettings['basic']['folderSettings']['fileType'] + request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile'] + request.session['datalocation'] = configSettings['basic']['dataLocation'] + if problem_type.lower() == 'llmfinetuning': + request.session['fileExtension'] = configSettings['basic']['folderSettings']['fileExtension'] + else: + request.session['datalocation'] = str(p.DataFilePath) + request.session['datatype'] = 'Normal' + if 'fileSettings' in configSettings['basic'].keys(): + fileSettings = configSettings['basic']['fileSettings'] + if 'delimiters' in fileSettings.keys(): + delimiters = configSettings['basic']['fileSettings']['delimiters'] + textqualifier = configSettings['basic']['fileSettings']['textqualifier'] + request.session['delimiter'] = delimiters + request.session['textqualifier'] = textqualifier + else: + request.session['delimiter'] = ',' + request.session['textqualifier'] = '""' + from appfe.modelTraining.views import uploaddata + return uploaddata(request) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Failed to launch model. Please train the model first before launching.','selected': 'prediction','version':AION_VERSION}) + +def retrain(request, id,currentVersion): + from appbe.aion_config import eda_setting + from appbe.pages import usecases_page + from appbe.aion_config import settings + usecasetab = settings() + try: + p = usecasedetails.objects.get(id=id) + s1 = Existusecases.objects.filter(ModelName=id).annotate(maxver=Max('ModelName__existusecases__Version')) + config_list = s1.filter(Version=F('maxver')) + if config_list.count() > 0: + Version = config_list[0].Version + Version = Version + 1 + else: + Version = 1 + + usecasename = p.UsecaseName + request.session['ModelName'] = p.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.usecaseid + request.session['ModelVersion'] = Version + request.session['ModelStatus'] = 'Not Trained' + request.session['finalstate'] = 0 + usecase = usecasedetails.objects.all().order_by('-id') + + # Retraing settings changes + # -------- S T A R T -------- + model = Existusecases.objects.filter(ModelName=p,Version=currentVersion) + + samplePercentage = 100 + samplePercentval = 0 + showRecommended = False + + if(model.count() > 0): + indexVal = 0 + configfile = str(model[indexVal].ConfigPath) + f = open(configfile, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + dataFile = configSettings['basic']['dataLocation'] + if configSettings['basic']['folderSettings']['fileType'] == 'Object': + request.session['datatype'] = configSettings['basic']['folderSettings']['fileType'] + request.session['objectLabelFileName'] = configSettings['basic']['folderSettings']['labelDataFile'] + request.session['datalocation'] = configSettings['basic']['dataLocation'] + return objectlabeldone(request) + else: + request.session['datalocation'] = str(configSettings['basic']['dataLocation']) + request.session['datatype'] = 'Normal' + if 'fileSettings' in configSettings['basic'].keys(): + fileSettings = configSettings['basic']['fileSettings'] + if 'delimiters' in fileSettings.keys(): + delimiters = configSettings['basic']['fileSettings']['delimiters'] + textqualifier = configSettings['basic']['fileSettings']['textqualifier'] + request.session['delimiter'] = delimiters + request.session['textqualifier'] = textqualifier + else: + request.session['delimiter'] = ',' + request.session['textqualifier'] = '""' + + df = pd.read_csv(dataFile, encoding='utf8',nrows=10,encoding_errors= 'replace') + records = df.shape[0] + df1 = check_input_data(usecasename) + if df1.shape[0] > 0: + df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace') + df = df.append(df1, ignore_index=True) + df = df.reset_index(drop=True) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + df.to_csv(dataFile, index=False) + print(df.shape[0]) + request.session['datalocation'] = str(dataFile) + request.session['NoOfRecords'] = records + request.session['IsRetraining'] = 'Yes' + + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + + # from AION import ux_eda + # eda_obj = ux_eda(dataFile) + # featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature = eda_obj.getFeatures() + featuresList = df.columns.tolist() + + numberoffeatures = len(featuresList) + from appfe.modelTraining.views import getimpfeatures + imp_features = getimpfeatures(dataFile,numberoffeatures) + + check_df = pd.read_csv(dataFile,encoding='utf8',encoding_errors= 'replace') + + # EDA Performance change + # ---------------------------- + sample_size = int(eda_setting()) + + # dflength = len(eda_obj.getdata()) + dflength = len(check_df) + + if dflength > sample_size: + samplePercentage = int((sample_size/dflength) * 100) + samplePercentval = samplePercentage / 100 + showRecommended = True + # ---------------------------- + + + statusmsg = 'Data loaded Successfully for Retraining.' + computeinfrastructure = compute.readComputeConfig() + # ---------------------------- + + selected_use_case = request.session['UseCaseName'] + ModelVersion = Version + ModelStatus = 'Not Trained' + if len(usecase) > 0: + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + else: + nouc = 1 + + # Retraing settings changes + # -------- S T A R T -------- + # return render(request, 'usecases.html', {'usecasedetail': usecase,'nouc':nouc,'models': models, 'selectedusecase': usecasename, + # 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, + # 'ModelVersion': ModelVersion, 'selected': 'usecase'}) + ps = Existusecases(DataFilePath=request.session['datalocation'], DeployPath='', Status='Not Trained',ConfigPath=configfile, Version=Version, ModelName=p," +"TrainOuputLocation='') + ps.save() + if(model.count() > 0): + context = {'range':range(1,101),'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList, 'tab': 'tabconfigure','data': df_json,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','exploratory':False, 'status_msg': statusmsg,'computeinfrastructure':computeinfrastructure,'IsRetrainingModel':True,'imp_features':imp_features,'numberoffeatures':numberoffeatures, 'dataSetPath': dataFile,'usecasetab':usecasetab,'finalstate':request.session['finalstate'],'version':AION_VERSION} + else: + context = {'tab': 'tabconfigure','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'Modelretrain':request.session['ModelVersion'],'finalstate':request.session['finalstate'],'version':AION_VERSION} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + except Exception as e: + print(e) + checkModelUnderTraining(request,usecasedetails,Existusecases) + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + #print(context) + context['version'] = AION_VERSION + context['Status'] = 'Error' + context['Msg'] = 'Error in retraining usecase. Check log file for more details' + return render(request,action,context) + +def launchmodel(request, id,version): + from appbe.pages import usecases_page + try: + modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases) + p = Existusecases.objects.get(id=modelID) + configpath = str(p.ConfigPath) + file_exists = os.path.exists(configpath) + if not file_exists: + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + context['errorMsg'] = 'Error in model launching: Some of the files are missing' + return render(request,action,context) + usecasename = p.ModelName.UsecaseName + Version = p.Version + request.session['ModelName'] = p.ModelName.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.ModelName.usecaseid + request.session['ModelVersion'] = p.Version + request.session['deploypath'] = str(p.DeployPath) + request.session['config_json'] = configpath + usename = request.session['usecaseid'].replace("" "", ""_"") + request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log') + request.session['finalstate'] = 3 + request.session['ModelStatus'] = p.Status + updatedConfigFile = request.session['config_json'] + + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + problemtypes = configSettings['basic']['analysisType'] + #print(problemtypes.keys()) + problem_type = """" + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_type = k + break + if problem_type == 'videoForecasting' or problem_type == 'imageClassification' or problem_type == 'objectDetection': + request.session['datatype'] = configSettings['basic']['folderSettings']['fileType'] + request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile'] + request.session['datalocation'] = configSettings['basic']['dataLocation'] + elif configSettings['basic']['folderSettings']['fileType'] == 'Document': + request.session['datatype'] = configSettings['basic']['folderSettings']['fileType'] + request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile'] + request.session['datalocation'] = configSettings['basic']['dataLocation'] + else: + request.session['datalocation'] = str(p.DataFilePath) + request.session['datatype'] = 'Normal' + if 'fileSettings' in configSettings['basic'].keys(): + fileSettings = configSettings['basic']['fileSettings'] + if 'delimiters' in fileSettings.keys(): + delimiters = configSettings['basic']['fileSettings']['delimiters'] + textqualifier = configSettings['basic']['fileSettings']['textqualifier'] + request.session['delimiter'] = delimiters + request.session['textqualifier'] = textqualifier + else: + request.session['delimiter'] = ',' + request.session['textqualifier'] = '""' + + from appfe.modelTraining.prediction_views import Prediction + return Prediction(request) + except Exception as e: + print(e) + return render(request, 'prediction.html',{'error': 'Failed to launch model. Please train the model first before launching.','selected': 'prediction','version':AION_VERSION}) + +def modxplain(request, id,version): + from appbe.pages import usecases_page + log = logging.getLogger('log_ux') + modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases) + p = Existusecases.objects.get(id=modelID) + configpath = str(p.ConfigPath) + usecasename = p.ModelName.UsecaseName + Version = p.Version + request.session['ModelName'] = p.ModelName.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.ModelName.usecaseid + request.session['ModelVersion'] = p.Version + request.session['deploypath'] = str(p.DeployPath) + request.session['config_json'] = configpath + usename = request.session['usecaseid'].replace("" "", ""_"") + request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log') + request.session['finalstate'] = 3 + request.session['ModelStatus'] = p.Status + file_exists = os.path.exists(configpath) + if not file_exists: + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + context['errorMsg'] = 'Error in model launching: Some of the files are missing' + log.info('modxplain:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0 ' + 'sec' + ':' + 'Error:Error in model launching: Some of the files are missing') + return render(request,action,context) + + usecasename = p.ModelName.UsecaseName + Version = p.Version + request.session['ModelName'] = p.ModelName.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.ModelName.usecaseid + request.session['ModelVersion'] = p.Version + request.session['deploypath'] = str(p.DeployPath) + request.session['config_json'] = configpath + usename = request.session['usecaseid'].replace("" "", ""_"") + request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log') + request.session['finalstate'] = 3 + request.session['ModelStatus'] = p.Status + from appfe.modelTraining import visualizer_views as v + return v.xplain(request) + +def moddrift(request, id,version): + from appbe.pages import usecases_page + modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases) + p = Existusecases.objects.get(id=modelID) + configpath = str(p.ConfigPath) + file_exists = os.path.exists(configpath) + if not file_exists: + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + context['errorMsg'] = 'Error in model launching: Some of the files are missing' + return render(request,action,context) + usecasename = p.ModelName.UsecaseName + Version = p.Version + request.session['ModelName'] = p.ModelName.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.ModelName.usecaseid + request.session['ModelVersion'] = p.Version + request.session['deploypath'] = str(p.DeployPath) + request.session['config_json'] = configpath + usename = request.session['usecaseid'].replace("" "", ""_"") + request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log') + request.session['finalstate'] = 3 + request.session['ModelStatus'] = p.Status + + f = open( configpath, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + trainingdataloc = configSettingsJson['basic']['dataLocation'] + + request.session['datalocation']= trainingdataloc + + return inputdrift(request) + +def inputdrift(request): + log = logging.getLogger('log_ux') + from appbe.aion_config import settings + usecasetab = settings() + from appbe import service_url + try: + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + computeinfrastructure = compute.readComputeConfig() + + if ModelStatus != 'SUCCESS': + context = {'error': 'Please train the model first or launch an existing trained model', 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab} + log.info('Error Please train the model first or launch an existing trained model') + + else: + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + problemtypes = configSettingsJson['basic']['analysisType'] + problem_type = """" + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + problem = problem_type + ser_url = service_url.read_monitoring_service_url_params(request) + iterName = request.session['usecaseid'].replace("" "", ""_"") + ModelVersion = request.session['ModelVersion'] + ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion) + pser_url = service_url.read_performance_service_url_params(request) + pser_url = pser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion) + if problem.lower() not in ['classification','regression']: + context = {'error': 'Input drift only available for classification and regression problems', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'version':AION_VERSION, + 'ModelVersion': ModelVersion, 'selected': 'monitoring','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab} + else: + context = {'SUCCESS': 'Model is trained', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'version':AION_VERSION, + 'ModelVersion': ModelVersion, 'selected': 'monitoring','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab} + return render(request, 'inputdrif.html', context) + except Exception as e: + print(e) + log.info('inputdrift; Error: Failed to perform drift analysis'+str(e)) + return render(request, 'inputdrif.html', {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION,'error':'Fail to do inputdrift analysis','usecasetab':usecasetab}) + import os,sys +import re +import logging +from django.http import HttpRequest, HttpResponse +from django.conf import settings +from django.shortcuts import render +from appbe.pages import getversion +import plotly.graph_objects as go +import plotly.figure_factory as ff +from django.shortcuts import render +from plotly.subplots import make_subplots +from django.contrib.sessions.models import Session +from sklearn.metrics import confusion_matrix +from IPython.core.display import HTML +from IPython.core import display +from django.template import Context, loader +import pandas as pd +import numpy as np +import io +import urllib, base64 +from natsort import natsorted +import matplotlib.pyplot as plt +import plotly.express as px +import json +from IPython.core.display import display, HTML +from appbe import compute +import base64 +import warnings +warnings.filterwarnings('ignore') +import subprocess +from appbe import installPackage +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +from utils.file_ops import read_df_compressed +from appbe.dataPath import LOG_LOCATION +from appbe.log_ut import logg +import time + +AION_VERSION = getversion() +def getusercasestatus(request): + if 'UseCaseName' in request.session: + selected_use_case = request.session['UseCaseName'] + else: + selected_use_case = 'Not Defined' + + if 'ModelVersion' in request.session: + ModelVersion = request.session['ModelVersion'] + else: + ModelVersion = 0 + + if 'ModelStatus' in request.session: + ModelStatus = request.session['ModelStatus'] + else: + ModelStatus = 'Not Trained' + return selected_use_case,ModelVersion,ModelStatus + +def xplain(request): + log = logging.getLogger('log_ux') + computeinfrastructure = compute.readComputeConfig() + from appbe.aion_config import settings + usecasetab = settings() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + if request.method == 'GET': + try: + if ModelStatus != 'SUCCESS': + log.info('xplain :' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error" +":Please train the model first or launch an existing trained model') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please train the model first or launch an existing trained model','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + else: + if 'ModelVersion' not in request.session: + log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:Please train the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + elif request.session['ModelVersion'] == 0: + log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + elif 'ModelStatus' not in request.session: + log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + elif request.session['ModelStatus'] != 'SUCCESS': + log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + problemType = 'classification' + for key in configSettingsJson['basic']['analysisType']: + if configSettingsJson['basic']['analysisType'][key] == 'True': + problemType = key + break + + if problemType.lower() != 'classification' and problemType.lower() != 'regression': + log.info('xplain:' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error:Explainable AI only available for classification and regression problem') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Explainable AI only available for classification and regression problem','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + displaypath = os.path.join( request.session['deploypath'],'etc','display.json') + with open(displaypath) as file: + config = json.load(file) + file.close() + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeaturesList = inputFeatures.split(',') + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + df = pd.read_csv(dataFilePath,nrows=10) + df.rename(columns=lambda x: x.strip(), inplace=True) + df = df[inputFeaturesList] + inputFieldsDict = df.to_dict(orient='index')[5] + inputFields = [] + inputFields.append(inputFieldsDict) + targetfeatures = targetFeature.split("","") + ##### Bug 20649 starts + featureDict = configSettingsJson['advance']['profiler']['featureDict'] + catFeatureList = [] + for feature in featureDict: + if feature['type'] == 'categorical': + catFeatureList.append(feature['feature']) + for feature in targetfeatures: + if feature in catFeatureList: + catFeatureList.remove(feature) + fairness_error = """" if len(catFeatureList)>0 else ""Fairness metrics is not applicable as categorical feature(s) is not present."" + ##### Bug 20649 ends + context = {""fairness_error"":fairness_error,""catFeatureList"":catFeatureList,'selected_use_case':selected_use_case,'configSettings':configSettingsJson,'targetfeatures':targetfeatures,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected':'visualizer','subselected':'businessview','inputFields':inputFields,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'problemType':problemType} + + return render(request, 'businessview.html', context) + except Exception as e: + log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Unexpected error occur, '+str(e)) + print(e) + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Unexpected error occur','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + if request.method == 'POST': + if request.POST.get(""submit"") == 'modelxplain': + return modelxplain(request) + if request.POST.get(""submit"") == 'xplainprediction': + return predictionxplain(request) +def modelxplain(request): + log = logging.getLogger('log_ux') + computeinfrastructure = compute.readComputeConfig() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + from appbe.aion_config import settings + usecasetab = settings() + t1 = time.time() + if 'UseCaseName' not in request.session: + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please create the use case first, trained the model and then visualize the data','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + else: + if 'ModelVersion' not in request.session: + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + elif request.session['ModelVersion'] == 0: + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + elif 'ModelStatus' not in request.session: + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + elif request.session['ModelStatus'] != 'SUCCESS': + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + try: + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + problemType = 'classification' + for key in configSettingsJson['basic']['analysisType']: + if configSettingsJson['basic']['analysisType'][key] == 'True': + problemType = key + break + if problemType.lower() != 'classification' and problemType.lower() != 'regression': + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Explainable AI only available for classification and regression problem') + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Explainable AI only available for classification and regression problem','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + + displaypath = os.path.join( request.session['deploypath'],'etc','display.json') + with open(displaypath) as file: + config = json.load(file) + file.close() + targetFeature = configSettingsJson['basic']['targetFeature'] + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + status, df = read_df_compressed(config['postprocessedData'], nrows=10) + df.rename(columns=lambda x: x.strip(), inplace=True) + if targetFeature in df.columns: + df.drop( targetFeature, axis=1, inplace=True) + inputFieldsDict = df.to_dict(orient='index')[5] + inputFields = [] + inputFields.append(inputFieldsDict) + + if 'nrows' in config: + nrows = config['nrows'] + else: + nrows = 'Not Available' + + if 'ncols' in config: + ncols = config['ncols'] + else: + ncols = 'Not Available' + + if 'targetFeature' in config: + targetFeature = config['targetFeature'] + else: + targetFeature = '' + + labelMaps = config['labelMaps'] + modelfeatures = configSettingsJson['basic']['trainingFeatures'].split(',') + mfcount = len(modelfeatures) + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + df_proprocessed = pd.read_csv(dataFilePath,nrows=1000) + df_proprocessed.rename(columns=lambda x: x.strip(), inplace=True) + if 'targetFeature' != '': + target_classes = df_proprocessed[targetFeature].unique() + numberofclasses = len(target_classes) + else: + target_classes = [] + numberofclasses = 'Not Available' + dataPoints = df_proprocessed.shape[0] + df_proprocessed = df_proprocessed.head(5) + df_proprocessed = df_proprocessed.to_json(orient=""records"") + df_proprocessed = json.loads(df_proprocessed) + expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py') + outputStr = subprocess.check_output([sys.executable,expainableAIPath,'global']) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + ale_json = json.loads(str(outputStr)) + ale_json = ale_json['data'] + ale_view = ale_json['data'] + sentences = ale_json['sentences'] + scoreMessage = '' + feature_importance = ale_json['feature_importance'] + dfimp = pd.DataFrame.from_dict(feature_importance) + dfimp = dfimp.sort_values(by=['values'],ascending=False).reset_index() + yaxis_data = dfimp['values'].tolist() + xaxis_data = dfimp['labels'].tolist() + cfig = go.Figure() + cfig.add_trace(go.Bar(x=xaxis_data,y=yaxis_data,name='Feature Importance')) + cfig" +".update_layout(barmode='stack',xaxis_title='Features') + bargraph = cfig.to_html(full_html=False, default_height=450,default_width=1000) + dftoprecords = dfimp.head(2) + topTwoFeatures = dfimp['labels'].tolist() + topFeaturesMsg = [] + for i in range(0,len(dfimp)): + value = round(dfimp.loc[i, ""values""],2)*100 + value = round(value,2) + tvalue = str(dfimp.loc[i, ""labels""])+' contributing to '+ str(value)+'%' + topFeaturesMsg.append(tvalue) + most_influencedfeature = ale_json['most_influencedfeature'] + interceppoint = ale_json['interceptionpoint'] + anchorjson = ale_json['anchorjson'] + t2 = time.time() + context = {'ale_view':ale_view,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected':'visualizer','subselected':'businessview','sentences':sentences,""bargraph"":bargraph,'inputFields':inputFields,'nrows':nrows,'ncols':ncols,'targetFeature':targetFeature,'dataPoints':dataPoints,'target_classes':target_classes,'datarows':df_proprocessed,'numberofclasses':numberofclasses,'modelfeatures':modelfeatures,'problemType':problemType,'mfcount':mfcount,'topTwoFeatures':topTwoFeatures,'topFeaturesMsg':topFeaturesMsg,'most_influencedfeature':most_influencedfeature,'interceppoint':interceppoint,'achors':anchorjson,'labelMaps':labelMaps,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION} + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + return render(request, 'businessview.html', context) + except Exception as Inst: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(Inst) + log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to Xplain Model, '+str(Inst)) + log.info('Details : '+str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error1':'Failed to Xplain Model','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}) + +def predictionxplain(request): + log = logging.getLogger('log_ux') + from appbe.aion_config import settings + usecasetab = settings() + computeinfrastructure = compute.readComputeConfig() + selected_use_case, ModelVersion, ModelStatus = getusercasestatus(request) + try: + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + t1 = time.time() + displaypath = os.path.join( request.session['deploypath'],'etc','display.json') + with open(displaypath) as file: + config = json.load(file) + file.close() + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeaturesList = inputFeatures.split(',') + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + + inputFieldsDict = {} + problemType = 'classification' + for key in configSettingsJson['basic']['analysisType']: + if configSettingsJson['basic']['analysisType'][key] == 'True': + problemType = key + break + if problemType.lower() == 'timeseriesforecasting': #task 11997 + inputFieldsDict['noofforecasts'] = request.POST.get('noofforecasts') + elif problemType == 'RecommenderSystem': + inputFieldsDict['uid'] = request.POST.get('uid') + inputFieldsDict['iid'] = request.POST.get('iid') + inputFieldsDict['rating'] = request.POST.get('rating') + else: + for feature in inputFeaturesList: + try: + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + df = pd.read_csv(dataFilePath,nrows=10) + df.rename(columns=lambda x: x.strip(), inplace=True) + df = df[inputFeaturesList] + inputFieldsDict = df.to_dict(orient='index')[5] + except: + inputFieldsDict[feature] = request.POST.get(feature) + + for key, value in inputFieldsDict.items(): + if value == 'nan': + inputFieldsDict[key] = '' + + inputFieldsJson = json.dumps(inputFieldsDict) + + expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py') + #print(inputFieldsJson) + outputStr = subprocess.check_output([sys.executable,expainableAIPath,'local',inputFieldsJson]) + + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + #print(outputStr) + + predict_dict = json.loads(str(outputStr)) + if (predict_dict['status'] == 'SUCCESS'): + predict_dict = predict_dict['data'] + prediction = predict_dict['prediction'] + anchor = predict_dict['anchor'] + precision = predict_dict['precision'] + coverage = round(predict_dict['coverage'],2) + confidence = '95%' + forceplot_view = predict_dict['forceplot'] + multidecisionplot_view = predict_dict['multidecisionplot'] + waterfallplot_view = predict_dict['waterfallplot'] #Task12581 + else: + context={'tab':'tabconfigure','error':'Failed to xplain','selected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion} + log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to xplain') + return render(request,'businessview.html',context) + + inputFields = [] + inputFields.append(inputFieldsDict) + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context={'tab' : 'predict','inputFields':inputFields,'prediction':prediction,'reason':anchor, 'precision': precision,'coverage':coverage,'confidence':confidence,'forceplot_view':forceplot_view,'multidecisionplot_view':multidecisionplot_view,'waterfallplot_view':waterfallplot_view,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected' : 'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION} + t2= time.time() + log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + return render(request, 'businessview.html', context = context) + except Exception as inst: + print(inst) + log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' +'0'+ 'sec' + ' : ' + 'Error : Failed to Xplain Prediction, ' + str(inst)) + context={'tab' : 'tabconfigure','error' : 'Failed to Xplain Prediction','selected' : 'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion} + return render(request,'businessview.html',context) + from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import json +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe.pages import getversion +from appbe.aion_config import running_setting +from appbe.training import checkModelUnderTraining +from appbe.training import calculate_total_activities +from appbe.training import check_unsupported_col +from appbe.training import check_granularity +from appbe.training import checkversionrunningstatus +from appbe.training import getModelStatus +from appbe.training import changeModelStatus +from appbe.training import calculate_total_interations +from appbe.pages import getusercasestatus +from utils.file_ops import read_df_compressed +import plotly.graph_objects as go +from appbe.pages import getMLModels +from appfe.modelTraining.models import usecasedetails +from appbe.training import getStatusCount +from appfe.modelTraining.models import Existusecases +import os,sys +import urllib, base64 +import subprocess +import time +import re +import numpy as np +import pandas as pd +from pathlib import Path +import importlib +from appbe.log_ut import logg +from appbe import compute +import logging +AION_VERSION = getversion() +LOG_FILE_NAME = 'model_training_logs.log' +LOG_FOLDER = 'log' + + + +def getPerformanceMatrix(deploypath,output_json): + displaypath = os.path.join(deploypath,'etc','display.json') + model_perf = [] + try: + with open(displaypath) as file: + config = json.load(file) + file.close() + except Exception as e: + print(e) + import glob + resultJsonObj = json.loads(output_json) + if (resultJsonObj['data']['ModelType'] == 'anomalydetection' and resultJsonObj['data']['BestScore'] != 0) or resultJsonObj['data']['ModelType'].lower() == 'timeseriesanomalydetection': #task 11997 + if resultJsonObj['data']['BestModel'].lower() == 'autoencoder' or resultJsonObj['data']['BestModel'].lower() == 'dbscan' : + try: + anomaly_plot_files = glob.glob(os.path.normpath(os.path.join(deploypath,'output','anomaly_plot','*.png'))) + for plot in anomaly_plot_files: + if(os.path.isfile(plot)): + string = base64.b64encode(open(plot, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + model_perf.append(image_64) + else: + model_perf.append('nograph') + except Exception as e: + print(""Anomaly plot exe error: \\n"",e) + + else: + predictfile = os.path.join(deploypath,'data','predicteddata.csv') + if(os.path.isfile(predictfile)): + df = pd.read_csv(predictfile) + outliers=df.loc[df['predict']==-1] + outlier_index=list(outliers.index) + normals=df.loc[df['predict']!=-1] + normals_index=list(normals.index) + featuresList = df.columns.values.tolist() + #print(featuresList) + if 'predict' in featuresList: + featuresList.remove('predict') + if 'score' in featuresList: + featuresList.remove('score') + if len(featuresList) == 1: + xdata = df[featuresList[0]] + ydata = df['score'] + fig = go.Figure() + fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[normals_index], y=df['score'].iloc[normals_index],mode='markers',name='Normal')) + fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[outlier_index], y=df['score'].iloc[outlier_index],mode='markers',name='Predicted Outliers')) + fig.update_layout(xaxis_title=featuresList[0],yaxis_title=""Score"") + frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100) + model_perf.append(frgraph) + if len(featuresList) == 2: + fig = go.Figure() + df = df.reset_index() + fig.add_trace(go.Scatter(x=df[featuresList[0]], y=df[featuresList[1]],mode='markers',name='Normal Points')) + fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[outlier_index], y=df[featuresList[1]].iloc[outlier_index],mode='markers',name='Predicted Outliers')) + fig.update_xaxes(title_text=featuresList[0]) + fig.update_yaxes(title_text=featuresList[1]) + fig.update_layout(xaxis_title=featuresList[0],yaxis_title=featuresList[1]) + frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100) + model_perf.append(frgraph) + if len(featuresList) > 2: + from sklearn.decomposition import PCA + pca = PCA(2) + pca.fit(df) + res=pd.DataFrame(pca.transform(df)) + Z = np.array(res) + fig = go.Figure() + fig.add_trace(go.Scatter(x=res[0], y=res[1],mode='markers',name='Normal Points')) + fig.add_trace(go.Scatter(x=res.iloc[outlier_index,0], y=res.iloc[outlier_index,1" +"],mode='markers',name='Predicted Outliers')) + fig.update_xaxes(title_text=""Principal Component 1"") + fig.update_yaxes(title_text=""Principal Component 2"") + frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100) + model_perf.append(frgraph) + return (model_perf) + + if config['problemType'].lower() == 'classification' or config['problemType'].lower() == 'anomaly_detection' or config['problemType'].lower() == 'timeseriesanomalydetection': + displaypath = os.path.join(deploypath,'log','img') + import glob + for img in glob.glob(displaypath+""/*.png""): + string = base64.b64encode(open(img, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + model_perf.append(image_64) + #print(model_perf) + elif config['problemType'].lower() == 'regression' or config['problemType'].lower() == 'recommendersystem' or \\ + config['problemType'].lower() == 'text similarity': + dataPath = config['predictedData'] + readstatus,predict_df=read_df_compressed(dataPath) + regfig = go.Figure() + regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df['actual'], + mode='lines', + name='Actual')) + regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df['predict'], + mode='lines', + name='Predict')) + + frgraph = regfig.to_html(full_html=False, default_height=400, default_width=1100) + rfgraph = '' + model_perf.append(frgraph) + elif config['problemType'].lower() == 'clustering': + dataPath = config['predictedData'] + readstatus,predict_df=read_df_compressed(dataPath) + distinctCount = len(predict_df['predict'].unique()) + clusterlist = predict_df['predict'].unique() + color = ['green','blue','red','orange','green','blue','red','orange'] + fig = go.Figure() + for cluster in clusterlist: + df_cluster = predict_df[predict_df['predict'] == cluster] + modelFeatures = config['modelFeatures'] + X1= df_cluster[modelFeatures[0]].tolist() + X2= df_cluster[modelFeatures[1]].tolist() + fig.add_trace(go.Scatter(x=X1, y=X2,mode='markers',name='cluster '+str(cluster))) + fig.update_layout(title=""Cluster Graph"",xaxis_title=modelFeatures[0],yaxis_title=modelFeatures[1],) + frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100) + model_perf.append(frgraph) + + elif config['problemType'].lower() == 'timeseriesforecasting': #task 11997 + dataPath = config['predictedData'] + predict_df = pd.read_csv(dataPath) + modelFeatures = config['modelFeatures'] + for feature in modelFeatures: + feature_name = feature + '_actual' + prediction = feature + '_pred' + if feature_name in predict_df.columns: + regfig = go.Figure() + regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df[feature_name], + mode='lines', + name=feature)) + regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df[prediction], + mode='lines', + name='Predict')) + frgraph = regfig.to_html(full_html=False, default_height=400, default_width=1100) + model_perf.append(frgraph) + return (model_perf) + + +def stoptraining(request): + request.session['ModelStatus'] = 'Terminated' + request.session.save() + changeModelStatus(Existusecases,request.session['modelid'],'Terminated','NA','NA') + return HttpResponse('Terminated') + +def kill_child_proc_rec(ppid): + import psutil + for process in psutil.process_iter(): + _ppid = process.ppid() + if _ppid == ppid: + _pid = process.pid + kill_child_proc_rec(_pid) + print(f'Terminating: {_pid}') + if sys.platform == 'win32': + process.terminate() + else: + os.system('kill -9 {0}'.format(_pid)) +def getDataFileCountAndSize(basicConfig): + import glob + path = basicConfig['dataLocation'] + radiovalue = basicConfig['folderSettings']['fileExtension'] + filesCount = 0 + filesSize = 0 + files = [] + + for filename in glob.iglob(os.path.join(path, ""**/*."" + radiovalue), recursive=True): + files.append(filename) + if radiovalue == 'doc': + for filename in glob.iglob(os.path.join(path, ""**/*."" + 'docx'), recursive=True): + files.append(filename) + for filename in files: + #for filename in glob.iglob(os.path.join(path, ""**/*."" + radiovalue), recursive=True): + filesCount = filesCount + 1 + get_size = os.path.getsize(filename) + filesSize = round(filesSize + get_size, 1) + if filesSize > 1048576: + size = round((filesSize / (1024 * 1024)), 1) + filesSize = str(size) + ' M' + elif filesSize > 1024: + size = round((filesSize /1024), 1) + filesSize = str(size) + ' K' + else: + filesSize = str(filesSize) + ' B' + + return filesCount,filesSize +# task 4343 Abort training +def read_log_file( config_file): + outputStr = 'aion_learner_status:{""status"":""Fail"",""message"":""Log file not found""}' + if Path(config_file).exists(): + with open(config_file, 'r', encoding='utf-8') as f: + config = json.load(f) + deployPath = Path(config['basic']['deployLocation']) + log_file = deployPath/config['basic']['modelName'].replace(' ', '_')/config['basic']['modelVersion']/LOG_FOLDER/LOG_FILE_NAME + if log_file.exists(): + with open(log_file, 'r', encoding='utf-8') as f: + outputStr = f.read() + return outputStr + +def checkVectorDBPackage(embeddedDB): + errorStatus = 'False' + if embeddedDB.lower() == 'vectordb': + status = importlib.util.find_spec('chromadb') + if not status: + errorStatus = 'True' + return errorStatus +def getModelSize(configSettings,model): + modelSize = 'NA' + if 'modelSize' in configSettings['basic']: + selectedModelSize = configSettings['basic']['modelSize']['llmFineTuning'][model] + for k in selectedModelSize.keys(): + if configSettings['basic']['modelSize']['llmFineTuning'][model][k] == 'True': + modelSize = k + break + return modelSize +def llmmodelevaluate(request): + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + usecasename = request.session['usecaseid'].replace("" "", ""_"") + from appbe.prediction import get_instance + hypervisor,instanceid,region,image = get_instance(usecasename+'_'+str(ModelVersion)) + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + usecaseconfigfile = request.session['config_json'] + f = open(usecaseconfigfile, ""r+"", encoding=""utf-8"") + configSettingsData = f.read() + f.close() + configSettings = json.loads(configSettingsData) + problem_type = '' + modelSize = '' + problemtypes = configSettings['basic']['analysisType'] + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_type = k + break + mlmodels ='' + algorihtms = configSettings['basic']['algorithms'][problem_type] + for k in algorihtms.keys(): + if configSettings['basic']['algorithms'][problem_type][k] == 'True': + if mlmodels != '': + mlmodels += ', ' + mlmodels += k + if 'modelSize' in configSettings['basic']: + selectedModelSize = configSettings['basic']['modelSize']['llmFineTuning'][mlmodels] + for k in selectedModelSize.keys(): + if configSettings['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True': + modelSize = k + break + eval = '' + if configSettings['basic']['folderSettings']['fileType'] == 'LLM_Document': + eval = 'doc' + elif configSettings['basic']['folderSettings']['fileType'] == 'LLM_Code': + eval = 'code' + #print(sys.executable, scriptPath,hypervisor,instanceid,f'{mlmodels}-{modelSize}',selected_use_case+'_'+str(ModelVersion),eval) + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','llmbenchmarking','-hv',hypervisor,'-i',instanceid,'-md',f'{mlmodels}-{modelSize}','-uc',usecasename+'_'+str(ModelVersion),'-e',eval]) + return trainmodel(request) +def trainresult(request): + from appbe.aion_config import settings + usecasetab = settings() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + usecasename = request.session['usecaseid'].replace("" "", ""_"") + log = logging.getLogger('log_ux') + computeinfrastructure = compute.readComputeConfig() + trainmodel =request.POST.get('trainmodel') + + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"", encoding=""utf-8"") + configSettingsData = f.read() + configSettings = json.loads(configSettingsData) + total_steps = calculate_total_activities(configSettings) + request.session['total_steps'] = total_steps + p = usecasedetails.objects.get(usecaseid=request.session['usecaseid']) + usecaseindex = p.id #bugid:14163 + if trainmodel == 'Train Model': + try: + if configSettings['basic']['analysisType']['survivalAnalysis'] != 'True' and configSettings['basic']['analysisType']['llmFineTuning'] != 'True': + configSettings['advance']['testPercentage'] = int(request.POST.get('TrainTestPercentage',0)) #Unnati + configSettings['advance']['categoryBalancingMethod'] = request.POST.get('BalancingMethod','NA') + if configSettings['basic']['analysisType']['llmFineTuning'] == 'True': + configSettings['basic']['vmRunning'] = request.POST.get('vmRunning','KeepRunning') + if configSettings['basic']['analysisType']['similarityIdentification'] == 'True': + dbs = configSettings['basic']['preprocessing']['similarityIdentification'] + for dbt in dbs.keys(): + configSettings['basic']['preprocessing']['similarityIdentification'][dbt]='False' + configSettings['basic']['preprocessing']['similarityIdentification'][request.POST.get('contentdb')] = 'True' + errorStatus = checkVectorDBPackage(request.POST.get('contentdb')) + if errorStatus.lower() == 'true': + return render(request, 'training.html', {'error': 'Error: Chromadb package not found.','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''}) + if configSettings['basic']['analysisType']['contextualSearch'] == 'True': + dbs = configSettings['basic']['preprocessing']['contextualSearch'] + for dbt in dbs.keys(): + configSettings['basic']['preprocessing']['contextualSearch'][dbt]='False' + configSettings['basic']['preprocessing']['contextualSearch'][request.POST.get('contentdb')] = 'True' + errorStatus = checkVectorDBPackage(request.POST.get('contentdb')) + if errorStatus.lower() == 'true': + return render(request, 'training.html', {'error': 'Error: Chromadb package not found.','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''}) + updatedConfigSettingsJson = json.dumps(configSettings) + f.seek(0) + f.write(updatedConfigSettingsJson) + f.truncate() + f.close() + # output_json = aion_train_model(updatedConfigFile) + request.session['noflines'] = 0 + request.session['ModelStatus'] = 'Running' + request.session.save() + changeModelStatus(Existusecases,request.session['modelid'],'Running','NA','NA') + #print(configSettings['basic']['distributedLearning']) + #sys.exit() + import timeit + startTime = timeit.default_timer() + process_killed = False + if computeinfrastructure['computeInfrastructure'].lower() == 'aws' and configSettings['basic']['analysisType']['llmFineTuning'] != 'True': + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + #print(scriptPath,updatedConfigFile) + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','awstraining','-c',updatedConfigFile]) + elif computeinfrastructure['computeInfrastructure'].lower() in ['aws','gcp']: + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','llmtuning','-c',updatedConfigFile]) + else: + if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True' or configSettings['basic']['analysisType']['multiModalLearning'] == 'True': + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_gluon.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath, updatedConfigFile]) + elif configSettings['basic']['onlineLearning'] == 'True': + scriptPath = os.path.norm" +"path(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','onlinetraining','-c',updatedConfigFile]) + elif configSettings['basic']['distributedLearning'] == 'True': + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','distributedtraining','-c',updatedConfigFile]) + else: + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + cmd = [sys.executable, scriptPath,'-m','training','-c',updatedConfigFile] # task 4343 abort training + training_proc = subprocess.Popen( cmd) + + outputStr = '' + while training_proc.poll() == None: + if getModelStatus(Existusecases,request.session['modelid']) == 'Terminated': + kill_child_proc_rec(training_proc.pid) + training_proc.kill() + process_killed = True + time.sleep(1) + if process_killed: + outputStr = 'aion_learner_status:{""status"":""Fail"",""message"":""Terminated by user""}' + else: + outputStr = read_log_file( updatedConfigFile) + usename = request.session['UseCaseName'].replace("" "", ""_"") + outputfile = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'etc','output.json') + if os.path.isfile(outputfile): + f1 = open(outputfile, ""r+"", encoding=""utf-8"") + outputStr = f1.read() + f1.close() + else: + if not isinstance( outputStr, str): + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + resultJsonObj = json.loads(outputStr) + #print(resultJsonObj) + odataFile = request.session['outputfilepath'] + with open(odataFile, 'w') as json_file: + json.dump(resultJsonObj, json_file) + json_file.close() + + model = Existusecases.objects.get(id=request.session['modelid']) + + request.session['ModelStatus'] = resultJsonObj['status'] + ModelStatus = request.session['ModelStatus'] + model.Status = resultJsonObj['status'] + + training_error = '' + if resultJsonObj['status'] == 'SUCCESS': + model.modelType = resultJsonObj['data']['ModelType'] + model.DeployPath = str(resultJsonObj['data']['deployLocation']) + if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection', 'timeSeriesAnomalyDetection']: #task 11997 + model.ProblemType = 'unsupervised' + else: + model.ProblemType = 'supervised' + else: + training_error = resultJsonObj['message'] + model.save() + problemtypes = configSettings['basic']['analysisType'] + #print(problemtypes.keys()) + problem_typ = """" + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_typ = k + break + modeltyp = problem_typ + listofmodels = '' + problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings) + if mlmodels != '': + listofmodels += str(mlmodels) + if dlmodels != '': + listofmodels += listofmodels+' '+str(dlmodels) + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Algorithms',listofmodels) + + # ----------------------------------------------------------------------------- # + if (problem_type == 'classification' or problem_type == 'regression'): + if len(mlmodels.split(',')) == 1: + trainingTime = timeit.default_timer() - startTime + trainingTime = round(trainingTime/60) + + # calculate the size of uploaded dataset + filePath = configSettings['basic']['dataLocation'] + sz = os.path.getsize(filePath) + fileSizeMB = sz / (1024 * 1024) + filesize = str(fileSizeMB) + "" MB"" + + featuresCount = str(len(configSettings['basic']['trainingFeatures'].split(','))) + + modelname = mlmodels.split(',')[0] + + + fileSizeMBLimit = 0 + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training.config') + if(os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"", encoding=""utf-8"") + read = file.read() + file.close() + for line in read.splitlines(): + if 'fileSizeMBLimit=' in line: + fileSizeMBLimit = int(line.split('=',1)[1]) + + # append the new entry into config only if size of uploaded dataset meets the threshold + if fileSizeMB > fileSizeMBLimit: + _val = updateRunConfig(trainingTime, filesize, featuresCount, modelname, problem_type) + # ----------------------------------------------------------------------------- # + + + if resultJsonObj['status'] == 'SUCCESS': + #from appbe import telemetry + + request.session['deploypath'] = str(resultJsonObj['data']['deployLocation']) + from appbe.trainresult import ParseResults + result, survical_images = ParseResults(outputStr) + model_perf = getPerformanceMatrix(request.session['deploypath'],outputStr) + #telemetry.telemetry_data('Training Successfully Done',selected_use_case+'_'+str(ModelVersion),str(listofmodels)) + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Success') + request.session['currentstate'] = 3 + request.session['finalstate'] = 4 + request.session.save() + file_path = request.session['logfilepath'] + my_file = open(file_path, 'r',encoding=""utf8"") + file_content = my_file.read() + my_file.close() + matched_lines = [line.replace('Status:-', '') for line in file_content.split('\\n') if ""Status:-"" in line] + matched_status_lines = matched_lines[::-1] + + matched_status_lines = matched_status_lines[0] + matched_status_lines = matched_status_lines.split('...') + matched_status_lines = matched_status_lines[1] + + no_lines = len(matched_lines) + if 'noflines' not in request.session: + request.session['noflines'] = 0 + request.session['noflines'] = request.session['noflines'] + 1 + if request.session['ModelStatus'] != 'SUCCESS': + numberoflines = request.session['noflines'] + if numberoflines > no_lines: + numberoflines = no_lines + request.session['noflines'] = no_lines + matched_lines = matched_lines[0:numberoflines] + shortlogs = getStatusCount(matched_lines,request.session['total_steps']) + temp = {} + temp['modelName'] = request.session['UseCaseName'] + temp['modelVersion'] = request.session['ModelVersion'] + config = {} + config['modelName'] = request.session['UseCaseName'] + config['modelVersion'] = request.session['ModelVersion'] + config['datetimeFeatures'] = configSettings['basic']['dateTimeFeature'] + config['sequenceFeatures'] = configSettings['basic']['indexFeature'] + config['FeaturesList'] = configSettings['basic']['trainingFeatures'] + config['unimportantFeatures'] = '' + config['targetFeature'] = configSettings['basic']['targetFeature'] + modelCondition = '' + problemtypes = configSettings['basic']['analysisType'] + + problem_type = """" + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_type = k + break + problem_type,dproblemType,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings) + configSettings['basic']['problem_type'] = problem_type + configSettings['basic']['dproblem_type'] = dproblemType + if mlmodels != '': + configSettings['basic']['mllearner'] = 'enable' + if dlmodels != '': + configSettings['basic']['dllearner'] = 'enable' + if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True': + configSettings['basic']['selected_ML_Models'] = 'AutoGluon' + configSettings['basic']['mllearner'] = 'enable' + else: + configSettings['basic']['selected_ML_Models'] = mlmodels + configSettings['basic']['selected_DL_Models'] = dlmodels + configSettings['basic']['smodel_size'] = smodelsize + if 'noOfRecords' in configSettings['basic']: + records = configSettings['basic']['noOfRecords'] + else: + from appbe.train_output import getDataSetRecordsCount + records = getDataSetRecordsCount(configSettings['basic']['dataLocation']) + filesCount = 0 + filesSize = 0 + #print(configSettings['basic']['analysisType']['llmFineTuning'].lower()) + #print(configSettings['basic']['folderSettings']['fileType'].lower()) + if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'true' and configSettings['basic']['folderSettings']['fileType'].lower() in ['llm_document','llm_code']: + filesCount,filesSize = getDataFileCountAndSize(configSettings['basic']) + noofIteration = calculate_total_interations(configSettings) + features = configSettings['basic']['trainingFeatures'].split(',') + noOfTrainingFeatures = len(features) + configSettings['basic']['problem_type']=problem_type + featuretype = configSettings['advance']['profiler']['featureDict'] + if ('Logistic Regression' not in mlmodels) or ('Linear Regression' not in mlmodels): + selectedmodel = 'modelcomparision' + else: + selectedmodel = "" "" + + + user_provided_data_type = {} + text_type='' + for feat_conf in featuretype: + colm = feat_conf.get('feature', '') + if feat_conf['type'] == ""text"": + text_type=""text"" + break + contentdb = '' + if problem_type.lower() in ['similarityidentification','contextualsearch']: + if configSettings['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true': + contentdb = 'CSV' + elif configSettings['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true': + contentdb = 'VectorDB' + + context = {'tab': 'trainresult','filesCount':filesCount,'filesSize':filesSize, 'result': result, 'selectedmodel': selectedmodel, 'advconfig': configSettings, 'shortlogs':shortlogs, + 'selected_use_case': selected_use_case, 'noOfRecords': records,'noOfTrainingFeatures':noOfTrainingFeatures, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecaseid':usecaseindex,#bugid:14163 #BugID13336 + 'noofIteration':noofIteration,'log_file':file_content,'contentdb':contentdb, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'model_perf': model_perf,'logs':matched_status_lines, 'perf_images': survical_images, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'usecasename':usecasename} + context['version'] = AION_VERSION + return render(request, 'training.html', context) + else: + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Error') + request.session['currentstate'] = 3 + request.session['finalstate'] = 4 + #from appbe import telemetry + if process_killed: + errorMsg = 'Terminated by user' + else: + errorMsg = 'Model Training Error (check log file for more details)' + contentdb = '' + if problem_type.lower() in ['similarityidentification','contextualsearch']: + if configSettings['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true': + contentdb = 'CSV' + elif configSettings['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true': + contentdb = 'VectorDB' + + + + #telemetry.telemetry_data('Training Error',selected_use_case+'_'+str(ModelVersion),str(listofmodels)) + context = {'tab': 'trainresult', 'error': errorMsg,'selected_use_case': selected_use_case,'contentdb':contentdb, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecaseid':usecaseindex,#bugid:14163 #BugID13336 + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], + 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'usecasename':usecasename} + context['version'] = AION_VERSION + return render(request, 'training.html', context) + except Exception as e: + log.info('Training Fail:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0' + 'sec' + ':' + 'Training fail '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + print(e) + return render(request, 'training.html', {'error': 'Model Training Error','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasename':usecasename,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''}) + + else: + modelCondition = '' + problemtypes = configSettings['basic']['analysisType'] + + problem_type = """" + for k in problemtypes.keys(): + if configSettings['basic']['analysisType'][k] == 'True': + problem_type = k + break + problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings) + configSettings['basic']['problem_type'] = problem_type + configSettings['basic']['dproblem" +"_type'] = dproblem_type + if mlmodels != '': + configSettings['basic']['mllearner'] = 'enable' + if dlmodels != '': + configSettings['basic']['dllearner'] = 'enable' + if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True': + configSettings['basic']['selected_ML_Models'] = 'AutoGluon' + configSettings['basic']['mllearner'] = 'enable' + else: + configSettings['basic']['selected_ML_Models'] = mlmodels + configSettings['basic']['selected_DL_Models'] = dlmodels + + if 'noofRecords' in configSettings['basic']: + records = configSettings['basic']['noofRecords'] + else: + from appbe.train_output import getDataSetRecordsCount + records = getDataSetRecordsCount(configSettings['basic']['dataLocation']) + filesCount = 0 + filesSize = 0 + print(configSettings['basic']['analysisType']['llmFineTuning'].lower()) + print(configSettings['basic']['folderSettings']['fileType'].lower()) + if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'true' and \\ + configSettings['basic']['folderSettings']['fileType'].lower() in ['llm_document', 'llm_code']: + filesCount, filesSize = getDataFileCountAndSize(configSettings['basic']) + noofIteration = calculate_total_interations(configSettings) + features = configSettings['basic']['trainingFeatures'].split(',') + noOfTrainingFeatures = len(features) + configSettings['basic']['problem_type']=problem_type + + context = { 'advconfig': configSettings,'filesCount':filesCount,'filesSize':filesSize, + 'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion,'noofIteration':noofIteration,'usecasename':usecasename, + 'modelCondition':modelCondition, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures} + context['version'] = AION_VERSION + return render(request, 'training.html',context) + +def getTrainingTime(filePath, no_of_features): + #filePath = 'C:\\\\MyFolder\\AION\\\\AION Datasets\\\\Heavy Datasets\\\\class_1MRows_26Cols.csv' + returnVal = '0_0' + if(os.path.isfile(filePath)): + trainingTime = 0 + + neartrainingTime = 0 # It's used to store the closest Training-Time + nearsampleSize = 0 # It's used to store the closest Sample-Size + leastSizeDifference = 0 # It's used to find the possible minimum difference between the dataset's actual size and Sample-Size in JSON file + inRange = 0 # It's used to identify if Extrapolation is needed or not + + fileSizeMBLimit = 0 # It's used to check/limit the size of uploaded dataset + acceptSizeVariance = 10 # It's used to cover the variance in sample-size + featuresThreshold = 50 # It's used to set the boundary/binary-classification of records-typte + + # ------------------------------------------------------------------------------------------------------------ # + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training.config') + if(os.path.isfile(configfilepath)): + file = open(configfilepath, ""r"") + read = file.read() + file.close() + for line in read.splitlines(): + if 'fileSizeMBLimit=' in line: + fileSizeMBLimit = int(line.split('=',1)[1]) + if 'acceptSizeVariance=' in line: + acceptSizeVariance = int(line.split('=',1)[1]) + if 'featuresThreshold=' in line: + featuresThreshold = int(line.split('=',1)[1]) + + # get the size of uploaded dataset/file (in MB) + sz = os.path.getsize(filePath) + fileSizeMB = sz / (1024 * 1024) + + # check if uploaded dataset/file is bigger than defined threshold or not. If yes, only than go to calculate the tentative training-time + if(fileSizeMB > fileSizeMBLimit): + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training_runs.json') + try: + if(os.path.isfile(configfilepath)): + # Opening JSON file + f = open(configfilepath) + # returns JSON object as a dictionary + data = json.load(f) + + # Iterating through the json list + for run in data['runs']: + sampleSize = run['sampleSize'].replace("" MB"","""") + sampleSize = int(float(sampleSize)) + + features = int(run['features']) + + # match records under 10% (+ or -) of variance + sizeDifference = fileSizeMB - sampleSize + if (sizeDifference < 0): + sizeDifference = sizeDifference * -1 + + if (leastSizeDifference == 0): + leastSizeDifference = sizeDifference + + # ------------------------------------------------------------------------------------------------ # + if (no_of_features <= featuresThreshold): + if ((sizeDifference * 100)/fileSizeMB < acceptSizeVariance and features <= featuresThreshold): + acceptSizeVariance = (sizeDifference * 100)/fileSizeMB + trainingTime = run['trainingTime'].replace("" Mins"","""") + trainingTime = int(trainingTime) + returnVal = str(trainingTime) + '_match' + inRange = 1 + + # get the nearest value of sampleSize (which can be used for extrapolation) from the JSON file + if (sizeDifference <= leastSizeDifference and features <= featuresThreshold): + nearsampleSize = sampleSize + leastSizeDifference = sizeDifference + neartrainingTime = run['trainingTime'].replace("" Mins"","""") + neartrainingTime = int(neartrainingTime) + # ------------------------------------------------------------------------------------------------ # + + # ------------------------------------------------------------------------------------------------ # + if (no_of_features > featuresThreshold): + if ((sizeDifference * 100)/fileSizeMB < acceptSizeVariance and features > featuresThreshold): + acceptSizeVariance = (sizeDifference * 100)/fileSizeMB + trainingTime = run['trainingTime'].replace("" Mins"","""") + trainingTime = int(trainingTime) + returnVal = str(trainingTime) + '_match' + inRange = 1 + + # get the nearest value of sampleSize (which can be used for extrapolation) from the JSON file + if (sizeDifference <= leastSizeDifference and features > featuresThreshold): + nearsampleSize = sampleSize + leastSizeDifference = sizeDifference + neartrainingTime = run['trainingTime'].replace("" Mins"","""") + neartrainingTime = int(neartrainingTime) + # ------------------------------------------------------------------------------------------------ # + + # When there is no record (sample-size) matched with 10% of variance then go for the extrapolation + if (inRange == 0): + sizeDifference = fileSizeMB - nearsampleSize + ratio = (sizeDifference * 100)/nearsampleSize + trainingTime = neartrainingTime + ((ratio * neartrainingTime)/100) + trainingTime = int(trainingTime) + returnVal = str(trainingTime) + '_extrapolation' + + # Closing file + f.close() + + except Exception as inst: + pass + + return returnVal + +def getllmmodelscore(usecaseid,model): + DB_TABLE = 'llm_benchmarking' + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = str(Path(DATA_DIR)/'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + usecaseidcond = f'usecaseid=""{usecaseid}""' + helptxt ='' + msg = '' + #print(usecaseidcond) + if sqlite_obj.table_exists(DB_TABLE): + usecasemodelscore = sqlite_obj.read_data(DB_TABLE,usecaseidcond) + status = '' + finetunedscore = 'NA' + foundationscore = 'NA' + benchmarkdataset = 'NA' + modelfunctionscore = {'CodeLLaMA-2-7B':'33%','CodeLLaMA-2-13B':'36%','LLaMA-2-7B':'16.8%','LLaMA-2-13B':'20.1%','LLaMA-2-70B':'31.0%','LLaMA-2-Chat-7B':'76%','LLaMA-2-Chat-13B':'79.2%','LLaMA-2-Chat-70B':'84.2%','Falcon-7B':'NA','Falcon-40B':'NA'} + foundationscore = modelfunctionscore.get(model,'NA') + scoretype='NA' + for x in usecasemodelscore: + #print(x) + keys = sqlite_obj.column_names(DB_TABLE) + #print(keys) + status = x[keys.index('state')] + if status.lower() in ['success','finished']: + result_type = x[keys.index('result_type')] + result = eval(x[keys.index('result')]) + scoretype = list(result.keys())[0] + if scoretype.lower() == 'hellaswag': + benchmarkdataset = 'HellaSwag' + helptxt = 'HellaSwag is a challenge dataset for evaluating commonsense Natural Language Inferencing. It consists of ~70k multiple choice questions with four answer choices about what might happen next. The correct answer is the (real) sentence for the next event; the three incorrect answers are adversarial generated and human verified.' + else: + benchmarkdataset = 'HumanEval' + if result_type == 'dict': + sub_result = list(result.values())[0] + scoretype = list(sub_result.keys())[0] + if scoretype == 'acc': + scoretype = 'Accuracy' + finetunedscore = str(round((float(list(sub_result.values())[0])*100),2)) + finetunedscore = f'{finetunedscore}%' + else: + finetunedscore = str(round((float(list(result.values())[0])*100),2)) + elif status.lower() == 'error': + msg = x[keys.index('result')] + + evaluation = {'status':status,'msg':msg,'benchmarkdataset':benchmarkdataset,'scoreType':scoretype,'finetunedscore':str(finetunedscore),'foundationscore':foundationscore,'helptxt':helptxt} + else: + evaluation = {'status':'','scoreType':'','benchmarkdataset':'','finetunedscore':'','foundationscore':'','helptxt':''} + #print(evaluation) + return evaluation +def trainmodel(request): + from appbe.aion_config import settings + usecasetab = settings() + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + usecasename = request.session['usecaseid'].replace("" "", ""_"") + try: + checkModelUnderTraining(request,usecasedetails,Existusecases) + computeinfrastructure = compute.readComputeConfig() + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"", encoding=""utf-8"") + configSettingsData = f.read() + configSettingsJson = json.loads(configSettingsData) + total_steps = calculate_total_activities(configSettingsJson) + warning = check_unsupported_col(configSettingsJson) + time_series_warning = check_granularity(configSettingsJson) + noofIteration = calculate_total_interations(configSettingsJson) + request.session['total_steps'] = total_steps + p = usecasedetails.objects.get(usecaseid=request.session['usecaseid']) + usecaseid = p.id + + modelCondition = '' + + problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson) + configSettingsJson['basic']['problem_type'] = problem_type + configSettingsJson['basic']['dproblem_type'] = dproblem_type + if mlmodels != '': + configSettingsJson['basic']['mllearner'] = 'enable' + if dlmodels != '': + configSettingsJson['basic']['dllearner'] = 'enable' + if configSettingsJson['basic']['analysisType']['multiLabelPrediction'] == 'True' or configSettingsJson['basic']['analysisType']['multiModalLearning'] == 'True': + configSettingsJson['basic']['selected_ML_Models'] = 'AutoGluon' + configSettingsJson['basic']['mllearner'] = 'enable' + else: + configSettingsJson['basic']['selected_ML_Models'] = mlmodels + configSettingsJson['basic']['selected_DL_Models'] = dlmodels + configSettingsJson['basic']['smodel_size'] = smodelsize + + + + + # ---------------------------------------------------------------------- # + cal_trainingTime = 0. + is_extrapolation = 'No' + is_DataImbalance = 'No' + if (request.session['ModelStatus'] == 'Not Trained' and (problem_type == 'classification' or problem_type == 'regression')): + + # + if ( problem_type == 'classification' ): + is_DataImbalance = 'Yes' + # + + if len(mlmodels.split(',')) == 1: + filePath = configSettingsJson['basic']['dataLocation'] + no_of_features = len(configSettingsJson['basic']['trainingFeatures'].split(',')) + + returnVal = getTrainingTime(filePath, no_of_features) + cal_trainingTime = int(returnVal.split('_')[0]) + + if (returnVal.split('_')[1] == 'extrapolation'): + is_extrapolation = 'Yes' + # ---------------------------------------------------------------------- # + + + features = configSettingsJson['basic']['trainingFeatures'].split(',') + if configSettingsJson['basic']['targetFeature'] in features: + features.remove(configSettingsJson['basic']['targetFeature']) + noOfTrainingFeatures = len(features) + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + if 'noOfRecords' in configSettingsJson['basic']: + records = configSettingsJson['basic']['noOfRecords'] + else: + from appbe.train_output import getDataSetRecordsCount + records = getDataSetRecordsCount(configSettingsJson['basic']['dataLocation']) + filesCount = 0 + filesSize = 0 + try: + if configSettingsJson['basic']['analysisType']['llmFineTuning'].lower() == 'true' and \\ + configSettingsJson['basic']['folderSettings']['fileType'].lower()" +"in ['llm_document', 'llm_code']: + filesCount, filesSize = getDataFileCountAndSize(configSettingsJson['basic']) + except: + pass + if request.session['finalstate'] <= 3: + request.session['finalstate'] = 3 + request.session['currentstate'] = 3 + if request.session['ModelStatus'].lower() == 'running': + model = Existusecases.objects.get(ModelName=request.session['ModelName'], + Version=request.session['ModelVersion']) + status = checkversionrunningstatus(model.id,usecasedetails,Existusecases) + request.session['ModelStatus'] = status + request.session.save() + + if request.session['ModelStatus'] == 'SUCCESS': + model = Existusecases.objects.get(ModelName=request.session['ModelName'], + Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"") + training_output = f.read() + f.close() + model_perf = getPerformanceMatrix(request.session['deploypath'],training_output) + from appbe.trainresult import ParseResults + result, survical_images = ParseResults(training_output) + file_path = request.session['logfilepath'] + my_file = open(file_path, 'r',encoding=""utf-8"") + file_content = my_file.read() + my_file.close() + matched_lines = [line.replace('Status:-', '') for line in file_content.split('\\n') if ""Status:-"" in line] + matched_status_lines = matched_lines[::-1] + + matched_status_lines = matched_status_lines[0] + matched_status_lines = matched_status_lines.split('...') + matched_status_lines = matched_status_lines[1] + + no_lines = len(matched_lines) + if 'noflines' not in request.session: + request.session['noflines'] = 0 + request.session['noflines'] = request.session['noflines'] + 1 + if request.session['ModelStatus'] != 'SUCCESS': + numberoflines = request.session['noflines'] + if numberoflines > no_lines: + numberoflines = no_lines + request.session['noflines'] = no_lines + matched_lines = matched_lines[0:numberoflines] + shortlogs = getStatusCount(matched_lines,request.session['total_steps']) + + featuretype = configSettingsJson['advance']['profiler']['featureDict'] + + + user_provided_data_type = {} + text_type='' + for feat_conf in featuretype: + colm = feat_conf.get('feature', '') + if feat_conf['type'] == ""text"": + text_type=""text"" + break + + configSettingsJson['basic']['problem_type']= problem_type + configSettingsJson['basic']['selected_ML_Models']= mlmodels + if ('Logistic Regression' not in mlmodels) or ('Linear Regression' not in mlmodels): + selectedmodel = 'modelcomparision' + else: + selectedmodel = "" "" + contentdb = '' + finetunedeval = {} + if problem_type.lower() in ['similarityidentification','contextualsearch']: + if configSettingsJson['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true': + contentdb = 'CSV' + elif configSettingsJson['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true': + contentdb = 'VectorDB' + if problem_type.lower() == 'llmfinetuning': + modelSize = getModelSize(configSettingsJson,mlmodels) + usecasename = request.session['usecaseid'].replace("" "", ""_"") + finetunedeval = getllmmodelscore(f'{usecasename}_{ModelVersion}',f'{mlmodels}-{modelSize}') + + context = {'result': result,'filesCount':filesCount,'filesSize':filesSize, 'text_type':text_type,'selectedmodel':selectedmodel, 'advconfig': configSettingsJson,'usecaseid':usecaseid,'usecasename':usecasename, + 'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus,'warning':warning,'time_series_warning':time_series_warning, + 'modelCondition':modelCondition,'ModelVersion': ModelVersion,'shortlogs':shortlogs,'logs':matched_status_lines,'currentstate': request.session['currentstate'],'finalstate': request.session['finalstate'], 'model_perf': model_perf,'perf_images': survical_images, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures,'version':AION_VERSION,'noofIteration':noofIteration,'log_file':file_content,'contentdb':contentdb,'finetunedeval':finetunedeval} + else: + contentdb = '' + if problem_type.lower() in ['similarityidentification','contextualsearch']: + if configSettingsJson['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true': + contentdb = 'CSV' + elif configSettingsJson['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true': + status = importlib.util.find_spec('chromadb') + if not status: + contentdb = 'CSV' + else: + contentdb = 'VectorDB' + else: + status = importlib.util.find_spec('chromadb') + if not status: + contentdb = 'CSV' + else: + contentdb = 'VectorDB' + configSettingsJson['basic']['problem_type']=problem_type + context = {'cal_trainingTime':cal_trainingTime,'filesCount':filesCount,'filesSize':filesSize, 'is_extrapolation': is_extrapolation,'advconfig': configSettingsJson,'usecaseid':usecaseid,'usecasename':usecasename, + 'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus, 'warning':warning,'time_series_warning':time_series_warning,'is_DataImbalance' : is_DataImbalance, + 'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'], + 'modelCondition':modelCondition,'finalstate': request.session['finalstate'], 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures,'version':AION_VERSION,'noofIteration':noofIteration,'contentdb':contentdb} + return render(request, 'training.html', context) + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + context = { 'error': 'Model Training Error','selected_use_case': selected_use_case,'contentdb':'','usecasename':usecasename, + 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + return render(request, 'training.html', context) from django.test import TestCase + +# Create your tests here. + from django import forms +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases + +class usecasedetailsForm(forms.ModelForm): + class Meta: + model = usecasedetails + fields = ""__all__"" + +class ExistusecasesForm(forms.ModelForm): + class Meta: + model = Existusecases + fields = ""__all__"" from django.contrib import admin + +# Register your models here. + from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe.pages import getversion +from appbe.aion_config import running_setting +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +from appbe import compute +AION_VERSION = getversion() +def basicconfig(request): + try: + from appbe.aion_config import settings + usecasetab = settings() + from appbe import basic_Config as bc + request.session['defaultfilepath'] = DEFAULT_FILE_PATH + request.session['configfilepath'] = CONFIG_FILE_PATH + request.session['deploylocation'] = DEPLOY_LOCATION + computeinfrastructure = compute.readComputeConfig() + submit_button = request.POST.get('upload_submit') + ModelVersion = request.session['ModelVersion'] + ruuningSetting = running_setting() + selected_use_case = request.session['UseCaseName'] + ModelStatus = request.session['ModelStatus'] + #print(request.method) + # Retraing settings changes + if request.method == 'POST' and request.session['finalstate'] == 0: + context = bc.gotoconf(request) + else: + context = bc.openbasicconf(request) + context['computeinfrastructure'] = computeinfrastructure + context['version'] = AION_VERSION + context['usecasetab'] = usecasetab + return render(request, 'basicconfig.html', context) + except Exception as e: + print(e) + import sys,os + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return render(request, 'basicconfig.html', {'error':'Fail to retreive basic config file inputs','version':AION_VERSION,'usecasetab':usecasetab}) + +def savebasicconfig(request): + from appbe import basic_Config as bc + from appbe import advance_Config as ac + from appfe.modelTraining.train_views import trainmodel + try: + if request.method != 'GET': + status,msg,context =bc.save(request) + else: + status = 'pass' + msg = '' + except Exception as e: + print(e) + + if status.lower()!='error': + if request.method == 'GET': + context = ac.basicconfignex(request) + computeinfrastructure = compute.readComputeConfig() + context['computeinfrastructure'] = computeinfrastructure + context['version'] = AION_VERSION + context['currentstate'] = 1 + return render(request, 'advancedconfig.html', context) + elif request.POST.get('BasicSubmit') == 'GotoAdvance': + context = ac.basicconfignex(request) + computeinfrastructure = compute.readComputeConfig() + context['computeinfrastructure'] = computeinfrastructure + context['version'] = AION_VERSION + context['currentstate'] = 1 + return render(request, 'advancedconfig.html', context) + else: + return trainmodel(request) + else: + context = bc.openbasicconf(request) + computeinfrastructure = compute.readComputeConfig() + context['computeinfrastructure'] = computeinfrastructure + context['config_error']= msg + context['version'] = AION_VERSION + return render(request, 'basicconfig.html', context) + from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import time +from django.template import loader +from django import template +from django.views.decorators.csrf import csrf_exempt +from os import walk +from plotly.subplots import make_subplots +import plotly.graph_objects as go +from appbe import help_Text as ht +import random +from appbe import service_url +from appbe import compute +from appbe import installPackage +from appbe.pages import getusercasestatus +from appbe import images_analysis as ia +from django.db.models import Max, F +from appbe.aion_config import settings +from appbe.aion_config import get_graviton_data +from appbe.aion_config import get_llm_data +from appbe.aion_config import get_edafeatures +from appbe.training import calculate_total_activities +from appbe.training import calculate_total_interations +from appbe.training import checkModelUnderTraining +from appbe.training import checkversionrunningstatus +from appbe.training import changeModelStatus +from appbe.training import getStatusCount +from appbe.training import getModelStatus +from appbe.training import check_unsupported_col +from appbe.publish import chech_publish_info +from appbe.publish import check_input_data +import uuid +import numpy as np +from appbe.aion_config import kafka_setting +from appbe.aion_config import running_setting +from appbe.validatecsv import csv_validator +from appbe.aion_config import addKafkaModel +from appbe.aion_config import getrunningstatus +from appbe.aion_config import aion_service +from appbe.pages import getversion +from appbe.s3bucketsDB import get_s3_bucket +from appbe.s3bucketsDB import read_s3_bucket +from appbe.gcsbucketsDB import get_gcs_bucket +from appbe.gcsbucketsDB import read_gcs_bucket +from appbe.azureStorageDB import get_azureStorage +from appbe.azureStorageDB import read_azureStorage +from appbe.dataIngestion import getcommonfields +from appbe.dataIngestion import ingestDataFromFile +from appbe.dataIngestion import delimitedsetting +import pdfplumber +from docx import Document + + +from appbe.trainresult import ParseResults +import pandas as pd +import numpy as np +import re +import xml.etree.ElementTree as ET +import json +import glob +from appbe import dataPath +from pathlib import Path +import urllib, base64 +import os +from os.path import expanduser +import platform +import time +import sys +import csv +import subprocess +import base64 +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.forms import usecasedetailsForm +from appfe.modelTraining.models import Existusecases +from django.shortcuts import get_list_or_404, get_object_or_404 +from pandas import json_normalize +from django.contrib.sessions.models import Session +import logging +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from utils.file_ops import read_df_compressed +from appbe.dataPath import LOG_LOCATION +from appbe.log_ut import logg + + +LOG_FILE_NAME = 'model_training_logs.log' +LOG_FOLDER = 'log' + +if os.path.isdir(DATA_FILE_PATH) == False: + os.makedirs(DATA_FILE_PATH) + +if os.path.isdir(CONFIG_FILE_PATH) == False: + os.makedirs(CONFIG_FILE_PATH) + +if os.path.isdir(DEPLOY_LOCATION) == False: + os.makedirs(DEPLOY_LOCATION" +") + + +# EION_SCRIPT_PATH = 'C:\\\\Project\\\\Analytics\\\\eion\\\\eion\\\\eion.py' +PYTHON_PATH = 'python.exe' +AION_VERSION = getversion() +usecasetab = settings() +#AION_VERSION +# MainPage +logg_obj = logg(LOG_LOCATION) +log = logg_obj.create_log(AION_VERSION) +def index(request): + from appbe.pages import index_page + status,context,action = index_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request,action,context) + +def localsetings(request): + from appbe.pages import get_usecase_page + try: + compute.updatelocalsetings(request) + time.sleep(2) + request.session['IsRetraining'] = 'No' + #print(1) + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + #print(2) + return render(request,action,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Fail to update localsetings','version':AION_VERSION}) + +def computetoAWS(request): + from appbe.pages import get_usecase_page + try: + compute.updateToComputeSettings(request) + time.sleep(2) + #print(1) + request.session['IsRetraining'] = 'No' + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request,action,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION}) +def licensekey(request): + try: + command = request.POST['licensesubmit'] + if command.lower() == 'generatelicense': + userkey = request.POST['userkey'] + from records import pushrecords + msg = pushrecords.generateLicenseKey(userkey) + context = {'msg':msg} + context['selected'] = 'License' + print(context) + return render(request,'licenseexpired.html',context) + + else: + licensekey = request.POST['licensekey'] + from records import pushrecords + pushrecords.updateLicense(licensekey) + from appbe.pages import get_usecase_page + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request,action,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Fails in loading the page','version':AION_VERSION}) +def help(request): + context = {'selected': 'userguide', 'usecasetab': usecasetab} + context['version'] = AION_VERSION + return render(request, 'help.html', context) + +def mlac_userguide(request): + context = {'selected': 'mlac_userguide', 'usecasetab': usecasetab} + context['version'] = AION_VERSION + return render(request, 'help.html', context) + +def AionProblem(request): + if request.method == ""POST"": + AionProblem = request.POST[""Algorithm""] + request.session[""AionProblem""] = AionProblem + return HttpResponse(AionProblem) + +def features(request): + if request.method == ""POST"": + + typedata = request.POST['datatype'] + + if typedata == ""datapath"": + datapath = request.POST['datap'] + if(os.path.isfile(datapath) and os.path.isfile(datapath)): + df = pd.read_csv(datapath) + modelfeature = df.columns.tolist() + + modelfeatures = json.dumps(modelfeature) + return HttpResponse(modelfeatures) + else: + return HttpResponse(json.dumps(""Data path does not exist ""), content_type=""application/error"") + elif typedata == ""scriptpath"": + scriptPath = request.POST['scriptp'] + #print(scriptPath) + f = open(scriptPath, ""r"") + pythoncode = f.read() + f.close() + ldict = {} + exec(pythoncode, globals(), ldict) + df = ldict['dfpy'] + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + df.to_csv(dataFile, index=False) + modelfeature = df.columns.tolist() + output = {'features':modelfeature,'datafile':dataFile} + output = json.dumps(output) + + # return render(request,'prediction.html',{'modelfeatures':modelfeatures,'test':'test'}) + return HttpResponse(output) + +def mllite(request): + from appbe.pages import mllite_page + context = mllite_page(request) + context['version'] = AION_VERSION + return render(request, 'ConvertOnnx.html',context) + +def usecasefilter(request): + from appbe import mlstyles as mls + selectedoption = request.GET[""selectedoption""] + context = mls.Aiusecases(request,selectedoption) + context['listtype'] = selectedoption + context['version'] = AION_VERSION + return render(request, 'aiUseCases.html',context) +def AIusecases(request): + from appbe import mlstyles as mls + context = mls.Aiusecases(request,'Implemented') + context['listtype'] = 'Implemented' + context['version'] = AION_VERSION + return render(request, 'aiUseCases.html',context) + +def mlstyles(request): + from appbe import mlstyles as mls + context = mls.mlstyles(request) + context['selected'] = 'DataOperations' + context['version'] = AION_VERSION + return render(request, 'mlstyles.html',context) + +def mlpredict(request): + from appbe import mlstyles as mls + context, button_flag = mls.mlpredict(request) + context['selected'] = 'DataOperations' + context['version'] = AION_VERSION + if button_flag in ['prediction','predictsingle']: + return render(request, 'mlstyles.html', context) + else: + return context + +def mltrain(request): + from appbe import mlstyles as mls + context, button_flag = mls.mltrain(request) + context['selected'] = 'DataOperations' + context['version'] = AION_VERSION + if button_flag == 'training': + return render(request, 'mlstyles.html', context) + else: + return context + +def getdatasetname(request): + try: + from appbe.dataPath import DATA_DIR + from appbe.sqliteUtility import sqlite_db + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + temp_data = sqlite_obj.read_data('dataingest') + + data = [] + for x in temp_data: + data_dict = {} + data_dict['datasetname'] = x[1] + data.append(data_dict) + + except Exception as e: + print(e) + data = [] + return HttpResponse(json.dumps(data)) + +def outputdrift(request): + try: + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + computeinfrastructure = compute.readComputeConfig() + if ModelStatus != 'SUCCESS': + context = {'error': 'Please train the model first or launch an existing trained model', 'selected_use_case': selected_use_case,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure} + + else: + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + problemtypes = configSettingsJson['basic']['analysisType'] + problem_type = """" + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + problem = problem_type + ser_url = service_url.read_performance_service_url_params() + iterName = request.session['UseCaseName'].replace("" "", ""_"") + ModelVersion = request.session['ModelVersion'] + ser_url = ser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion) + if problem.lower() not in ['classification','regression']: + context = {'error': 'Output drift only available for classification and regression problems type', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, + 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab} + else: + context = {'SUCCESS': 'Model is trained', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'usecasetab':usecasetab, + 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'ser_url':ser_url,'trainingDataLocation':request.session['datalocation']} + + return render(request, 'outputdrif.html', context) + except: + return render(request, 'outputdrif.html', {'error':'Fail to do outputdrift analysis','usecasetab':usecasetab}) + + +# -------------------------------- Graviton-Integration Changes S T A R T -------------------------------- +def getuserdata(request): + import requests + data = [] + try: + graviton_url,graviton_userid = get_graviton_data() + gravitonURL = graviton_url + gravitonUserId = graviton_userid + + # url = 'https://xenius.azurewebsites.net/api/dataservices?userid='+ + url = gravitonURL + 'dataservices?userid=' + gravitonUserId + print(url) + response = requests.get(url) + statuscode = response.status_code + print(statuscode) + if statuscode == 200: + json_dictionary = json.loads(response.content) + data = json_dictionary['result'] + print(data) + except Exception as e: + print(e) + data = [] + data_json = json.dumps(data) + return HttpResponse(data_json) + + +def getdataservice(request): + import requests + data = [] + dataServiceId = request.GET.get('DataServiceId') + try: + graviton_url,graviton_userid = get_graviton_data() + gravitonURL = graviton_url + gravitonUserId = graviton_userid + + # url = 'https://xenius.azurewebsites.net/api/getmetadata?userid=1&dataserviceid='+str(dataServiceId) + url = gravitonURL + 'getmetadata?userid=' + gravitonUserId +'&dataserviceid='+str(dataServiceId) + response = requests.get(url) + statuscode = response.status_code + if statuscode == 200: + json_dictionary = json.loads(response.content) + data = json_dictionary['result'] + except Exception as e: + print(e) + data = [] + data_json = json.dumps(data) + return HttpResponse(data_json) +# ------------------------------------------------ E N D ------------------------------------------------- + + + +def getvalidateddata(request): + import requests + computeinfrastructure = compute.readComputeConfig() + taskid = request.POST.get('elixirdatataskid') + try: + url = 'http://'+elixir_ip+':'+elixir_port+'/api/get_validation_result?task_id='+str(taskid) + #print(url) + response = requests.get(url) + statuscode = response.status_code + if statuscode == 200: + json_dictionary = json.loads(response.content) + data = json_dictionary['Result'] + else: + data = [] + except Exception as e: + print(e) + data = [] + try: + df = pd.DataFrame.from_dict(data) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + request.session['datalocation'] = str(dataFile) + df.to_csv(dataFile, index=False) + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False,'computeinfrastructure':computeinfrastructure} + return render(request, 'upload.html', context) + except: + context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,""usecaseerror"":""Error in validating data!""} + return render(request, 'upload.html', context) + +def trigger_DAG(request): + from appfe.modelTraining import AirflowLib + response = AirflowLib.TriggerDag(""example_complex"", """") + return HttpResponse(response, content_type=""application/json"") + +def Airflow(request): + try: + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context = {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, + 'selected': 'monitoring', 'airflow': True} + return render(request, 'upload.html', context) + except: + return render(request, 'upload.html', {'error':'interrupted error'}) + + +def Results(request): + return render(request, 'modeltraning.html', context) + +def uploadnext(request): + return render(request, 'basicconfig.html', {'selected': 'modeltraning','version':AION_VERSION}) + +def basicconfignext(request): + from appbe import advance_Config as ac + context = ac.basicconfignex(request) +" +" computeinfrastructure = compute.readComputeConfig() + context['computeinfrastructure'] = computeinfrastructure + context['version'] = AION_VERSION + return render(request, 'advancedconfig.html', context) + + + + + +def updateRunConfig(_trainingTime, _filesize, _features, _modelname, _problem_type): + returnVal = 'Success' + try: + import psutil + memInGB = round(psutil.virtual_memory().total / (1024 * 1024 * 1024)) + _resource = str(memInGB) + "" GB"" + + _time = str(_trainingTime) + "" Mins"" + + new_record = { + ""sampleSize"": _filesize, + ""features"": _features, + ""algorithm"": _modelname, + ""machineResource"": _resource, + ""trainingTime"": _time, + ""problemtype"": _problem_type + } + + configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training_runs.json') + if(os.path.isfile(configfilepath)): + with open(configfilepath,'r+') as file: + # load existing data into a dict. + file_data = json.load(file) + # join new_record with file_data inside runs + file_data[""runs""].append(new_record) + # sets file's current position at offset. + file.seek(0) + # convert back to json. + json.dump(file_data, file, indent = 4) + + except Exception as inst: + returnVal = 'Fail' + pass + + return returnVal + + +def objectlabeldone(request): + try: + computeinfrastructure = compute.readComputeConfig() + request.session['datatype'] = 'Object' + request.session['csvfullpath'] = request.session['objectLabelFileName'] + df = pd.read_csv(request.session['csvfullpath']) + df1 = df.groupby(['Label']).agg({""File"":{""count"",""nunique""}}) + df1.columns = df1.columns.droplevel(0) + df1 = df1.reset_index() + class_count = [] + for i in range(len(df1)): + dct = {} + dct['Label'] = df1.loc[i, ""Label""] + dct['TotalAnnotations'] = df1.loc[i, ""count""] + dct['Images'] = df1.loc[i, ""nunique""] + class_count.append(dct) + #orxml_file in glob.glob(request.session['datalocation'] + '/*.xml'): + status_msg = 'Successfully Done' + wordcloudpic = '' + bargraph = '' + firstFile = pd.DataFrame() + #print(class_count) + context = {'tab': 'upload','firstFile':firstFile,'dataa': class_count,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure} + return render(request, 'upload.html', context) + except: + context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,""usecaseerror"":""Error in labeling object!""} + return render(request, 'upload.html', context) + + +def ObjLabelDiscard(request): + return redirect(reverse('objectlabelling')) + +def ObjLabelAdd(request,id): + angle = request.GET.get(""angle"") + gid = request.GET.get(""gid"") + xMin = min(int(request.GET.get(""xMin"")),int(request.GET.get(""xMax""))) + xMax =max(int(request.GET.get(""xMin"")),int(request.GET.get(""xMax""))) + yMin = min(int(request.GET.get(""yMin"")),int(request.GET.get(""yMax""))) + yMax = max(int(request.GET.get(""yMin"")),int(request.GET.get(""yMax""))) + height = request.GET.get(""height"") + width = request.GET.get(""width"") + #print(""=====> ""+str(angle) +"" ""+ str(gid) +"" ""+ str(xMin) + "" "" + str(xMax) + "" "" +str(yMin) +"" ""+ str(yMax)+"" ""+str(width)) + # with open(""out.csv"", 'w') as f: + # # writer = csv.writer(f) + # # writer.writerow([angle, id, gid, xMin, xMax, yMin, yMax]) + # f.write(angle +"" ""+ gid +"" ""+ xMin + "" "" + xMax + "" "" +yMin +"" ""+ yMax) + labels = request.session['labels'] + labels.append({""id"":id, ""name"":"""", ""xMin"":xMin, ""xMax"":xMax, ""yMin"":yMin, ""yMax"":yMax, ""height"":height,""width"":width, ""angle"":angle}) + request.session['labels'] = labels + return redirect(reverse('objectlabelling')) + +def imageeda(request): + try: + computeinfrastructure = compute.readComputeConfig() + request.session['datatype'] = 'Image' + filename = request.session['csvfullpath'] + os.remove(filename) + request.session['csvfullpath'] = request.session['LabelFileName'] + df = pd.read_csv(request.session['csvfullpath']) + eda_result = '' + duplicate_img = '' + color_plt = '' + df2 = df.groupby('Label', as_index=False)['File'].count().reset_index() + df_json = df2.to_json(orient=""records"") + df_json = json.loads(df_json) + cfig = go.Figure() + xaxis_data = df2['Label'].tolist() + yaxis_data = df2['File'].tolist() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data)) + cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520) + firstFile = df.groupby('Label').first().reset_index() + #firstFile['FilePath'] = firstFile['File'].apply(lambda x: os.path.join(request.session['datalocation'], x)) + images = [] + qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation']) + for i in range(len(firstFile)): + + filename = firstFile.loc[i, ""File""] + filePath = os.path.join(request.session['datalocation'], filename) + string = base64.b64encode(open(filePath, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + firstFile.loc[i, ""Image""] = image_64 + firstFile.loc[i, ""Quality""] = qualityscore[filename] + status_msg = 'Successfully Done' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context = {'tab': 'upload', 'featuregraph': bargraph,'dataa': df_json, 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'validcsv': True,'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt, 'firstFile': firstFile, + 'status_msg': status_msg,'computeinfrastructure':computeinfrastructure} + return(context) + except: + context={'error':'Fail to load Eda result'} + return (context) + +def imagelabelling(request): + if (request.session['currentIndex']) == (request.session['endIndex']+1): + try: + context = imageeda(request) + return render(request, 'upload.html', context) + except: + context = {'error': 'Image labeling error'} + return render(request, 'upload.html', context) + else: + try: + df = pd.read_csv(request.session['csvfullpath']) + filePath = os.path.join(request.session['datalocation'],df[""File""].iloc[request.session['currentIndex']]) + string = base64.b64encode(open(filePath, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + context = {'tab': 'upload','id':request.session['currentIndex'],'labels': request.session['labels'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df)} + return render(request, 'imagelabelling.html', context) + except: + context = {'error': 'Image labeling error'} + return render(request, 'upload.html', context) + +def objecteda(request): + request.session['datatype'] = 'Object' + filename = request.session['csvfullpath'] + try: + os.remove(filename) + except: + pass + try: + request.session['csvfullpath'] = request.session['LabelFileName'] + df = pd.read_csv(request.session['csvfullpath']) + df1 = df.groupby(['Label']).agg({""File"":{""count"",""nunique""}}) + df1.columns = df1.columns.droplevel(0) + df1 = df1.reset_index() + class_count = [] + for i in range(len(df1)): + dct = {} + dct['Label'] = df1.loc[i, ""Label""] + dct['TotalAnnotations'] = df1.loc[i, ""count""] + dct['Images'] = df1.loc[i, ""nunique""] + class_count.append(dct) + #orxml_file in glob.glob(request.session['datalocation'] + '/*.xml'): + status_msg = 'Successfully Done' + wordcloudpic = '' + bargraph = '' + firstFile = pd.DataFrame() + context = {'tab': 'upload','firstFile':firstFile,'dataa': class_count,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True} + return(context) + except: + context={'tab': 'upload','error':'Fail to load Eda result'} + return(context) + +def objectlabelling(request): + + if (request.session['currentIndex']) == (request.session['endIndex']+1): + try: + context = objecteda(request) + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + except: + return render(request, 'upload.html', {'error':'objectlabelling error','version':AION_VERSION}) + else: + try: + df = pd.read_csv(request.session['csvfullpath']) + filePath = os.path.join(request.session['datalocation'],df[""File""].iloc[request.session['currentIndex']]) + string = base64.b64encode(open(filePath, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + bounds = [] + context = {'tab': 'upload','bounds':bounds,'labels': request.session['labels'],'directory':request.session['datalocation'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'filelist':df,'selectedfile':df[""File""].iloc[request.session['currentIndex']]} + context['version'] = AION_VERSION + return render(request, 'objectlabelling.html',context) + except: + return render(request, 'objectlabelling.html',{'tab': 'upload','error':'Objectlabelling Error','version':AION_VERSION}) + + +def imagelabel(request,id): + request.session['labels'] = request.GET.get(""name"") + return redirect(reverse('imagelabelling')) +def objectlabel(request,id): + name = request.GET.get(""name"") + labels = request.session['labels'] + labels[int(id) - 1][""name""] = name + request.session['labels'] = labels + return redirect(reverse('objectlabelling')) + +def ObjLabelRemove(request,id): + index = int(id) - 1 + labels = request.session['labels'] + del labels[index] + for label in labels[index:]: + label[""id""] = str(int(label[""id""]) - 1) + request.session['labels'] = labels + return redirect(reverse('objectlabelling')) + +def ImgLabelNext(request): + df = pd.read_csv(request.session['csvfullpath']) + filePath = df[""File""].iloc[request.session['currentIndex']] + if request.session['labels'] != '': + dataFile = request.session['LabelFileName'] + #print(dataFile) + with open(dataFile,'a') as f: + f.write(filePath + "","" + + request.session['labels'] + ""\\n"") + f.close() + request.session['currentIndex'] = request.session['currentIndex']+1 + request.session['labels'] = '' + return redirect(reverse('imagelabelling')) + +def ObjLabelPrev(request): + df = pd.read_csv(request.session['csvfullpath']) + imagePath = df[""File""].iloc[request.session['currentIndex']] + request.session['currentIndex'] = request.session['currentIndex'] - 1 + process_marked_area_on_image(imagePath,request) + return redirect(reverse('objectlabelling')) + +def remove_labelling_from_csv(imagePath,request): + dataFile = request.session['LabelFileName'] + df = pd.read_csv(dataFile) + if not df.empty: + if imagePath in df.values: + df = df.set_index(""File"") + df = df.drop(imagePath, axis=0) + df.to_csv(dataFile, index=True) + +def process_marked_area_on_image(imagePath,request): + df = pd.read_csv(request.session['csvfullpath']) + dataFile = request.session['LabelFileName'] + remove_labelling_from_csv(imagePath,request) + write_coordinates_and_label_to_csv(imagePath,request) + if request.session['currentIndex'] < len(df): + image = df[""File""].iloc[request.session['currentIndex']] + request.session['labels'] = [] + with open(dataFile, 'r') as file: + reader = csv.reader(file) + for row in reader: + if row[0] == image: + labels = request.session['labels'] + labels.append({""id"":row[1], ""name"":row[9], ""xMin"": row[3], ""xMax"":row[4], ""yMin"":row[5], ""yMax"":row[6], ""height"":row[7],""width"":row[8], ""angle"":row[2]}) + request.session['labels'] = labels + labels = request.session['labels'] + return True + +def write_coordinates_and_label_to_csv(imagePath,request): + dataFile = request.session['LabelFileName'] + with open(dataFile, 'a') as f: + for label in request.session['labels']: + f.write(imagePath + "","" + + str(round(float(label[""id""]))) + "","" + + str(label[""angle""]) + "","" + + str(round(float(label[""x" +"Min""]))) + "","" + + str(round(float(label[""xMax""]))) + "","" + + str(round(float(label[""yMin""]))) + "","" + + str(round(float(label[""yMax""]))) + "","" + + str(round(float(label[""height""]))) + "","" + + str(round(float(label[""width""]))) + "","" + + label[""name""] + ""\\n"") + f.close() +def ObjLabelSelect(request): + selectedimage=request.GET.get('file') + df = pd.read_csv(request.session['csvfullpath']) + filePath = df[""File""].iloc[request.session['currentIndex']] + remove_labelling_from_csv(filePath,request) + dataFile = request.session['LabelFileName'] + with open(dataFile,'a') as f: + for label in request.session['labels']: + f.write(filePath + "","" + + str(round(float(label[""id""]))) + "","" + + str(label[""angle""]) + "","" + + str(round(float(label[""xMin""]))) + "","" + + str(round(float(label[""xMax""]))) + "","" + + str(round(float(label[""yMin""]))) + "","" + + str(round(float(label[""yMax""]))) + "","" + + str(round(float(label[""height""]))) + "","" + + str(round(float(label[""width""]))) + "","" + + label[""name""] + ""\\n"") + f.close() + currentIndex = 0 + for index,row in df.iterrows(): + #print(row['File']) + if row['File'] == selectedimage: + break + else: + currentIndex = currentIndex+1 + request.session['currentIndex'] = currentIndex + if request.session['currentIndex'] < len(df): + image = df[""File""].iloc[request.session['currentIndex']] + request.session['labels'] = [] + with open(dataFile, 'r') as file: + reader = csv.reader(file) + for row in reader: + if row[0] == image: + labels = request.session['labels'] + labels.append({""id"":row[1], ""name"":row[9], ""xMin"": row[3], ""xMax"":row[4], ""yMin"":row[5], ""yMax"":row[6], ""height"":row[7],""width"":row[8], ""angle"":row[2]}) + request.session['labels'] = labels + labels = request.session['labels'] + return redirect(reverse('objectlabelling')) + +def ObjLabelNext(request): + df = pd.read_csv(request.session['csvfullpath']) + filePath = df[""File""].iloc[request.session['currentIndex']] + remove_labelling_from_csv(filePath,request) + dataFile = request.session['LabelFileName'] + with open(dataFile,'a') as f: + for label in request.session['labels']: + f.write(filePath + "","" + + str(round(float(label[""id""]))) + "","" + + str(label[""angle""]) + "","" + + str(round(float(label[""xMin""]))) + "","" + + str(round(float(label[""xMax""]))) + "","" + + str(round(float(label[""yMin""]))) + "","" + + str(round(float(label[""yMax""]))) + "","" + + str(round(float(label[""height""]))) + "","" + + str(round(float(label[""width""]))) + "","" + + label[""name""] + ""\\n"") + f.close() + request.session['currentIndex'] = request.session['currentIndex']+1 + if request.session['currentIndex'] < len(df): + image = df[""File""].iloc[request.session['currentIndex']] + request.session['labels'] = [] + with open(dataFile, 'r') as file: + reader = csv.reader(file) + for row in reader: + if row[0] == image: + labels = request.session['labels'] + labels.append({""id"":row[1], ""name"":row[9], ""xMin"": row[3], ""xMax"":row[4], ""yMin"":row[5], ""yMax"":row[6], ""height"":row[7],""width"":row[8], ""angle"":row[2]}) + request.session['labels'] = labels + labels = request.session['labels'] + return redirect(reverse('objectlabelling')) + + + + + +def encryptedpackage(request): + from appbe.encryptedPackage import encrptpackage_command + from appbe.encryptedPackage import download_sclient + context = encrptpackage_command(request,Existusecases,usecasedetails) + context['version'] = AION_VERSION + try: + return download_sclient(request,context) #Task 9981 + except Exception as e: + print(e) + return render(request, 'usecases.html', context) + +def StationarySeasonalityTest(request): + + from appbe.stationarity_seasonality_check import StationarySeasonalityTest as sst + datapath = request.GET.get('datapath') + datetimefeature = request.GET.get('datefeature') + featurename = request.GET.get('targetfeature') + seasonality_status = request.GET.get('seasonality_status') + stationarity_status = request.GET.get('stationarity_status') + + df=pd.read_csv(datapath) + ss_obj=sst(df,featurename,datetimefeature) + result_dict=ss_obj.analysis(seasonality_status,stationarity_status) + + return HttpResponse(json.dumps(result_dict), content_type=""application/json"") + +def dataoverframe(df): + from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator + gfsg = GenericFeatureStatisticsGenerator() + proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': df}]) + protostr = base64.b64encode(proto.SerializeToString()).decode(""utf-8"") + return protostr + + +def getimpfeatures(dataFile, numberoffeatures): + imp_features = [] + if numberoffeatures > 20: + from appbe.eda import ux_eda + eda_obj = ux_eda(dataFile, optimize=1) + pca_map = eda_obj.getPCATop10Features() + imp_features = pca_map.index.values.tolist() + return imp_features + + +def uploaddata(request): + from appbe import exploratory_Analysis as ea + from appbe.aion_config import eda_setting + # context={'test':'test'} + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + computeinfrastructure = compute.readComputeConfig() + try: + if selected_use_case == 'Not Defined': + context = {'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'tab': 'tabconfigure', + 'usecaseerror': 'Please create a new use case for training the model or select an existing use case for retraining', 'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage() + ,'usecasetab':usecasetab,'version':AION_VERSION} + return render(request, 'upload.html', context) + if 'ModelVersion' in request.session: + ModelVersion = request.session['ModelVersion'] + else: + ModelVersion = 0 + + if 'ModelStatus' in request.session: + ModelStatus = request.session['ModelStatus'] + else: + ModelStatus = 'Not Trained' + + if request.session['finalstate'] > 0: + if request.session['datatype'] in ['Video', 'Image','Document','Object']: + folderLocation = str(request.session['datalocation']) + dataFile = os.path.join(folderLocation, request.session['csvfullpath']) + df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace') + if df['Label'].isnull().sum() > 0: + if request.session['datatype'] == 'Document': + dataDf = pd.DataFrame() + dataDict = {} + keys = [""text""] + for key in keys: + dataDict[key] = [] + for i in range(len(df)): + filename = os.path.join(request.session['datalocation'],df.loc[i,""File""]) + with open(filename, ""r"",encoding=""utf-8"") as f: + dataDict[""text""].append(f.read()) + f.close() + dataDf = pd.DataFrame.from_dict(dataDict) + tcolumns=['text'] + wordcloudpic,df_text = ea.getWordCloud(dataDf,tcolumns) + status_msg = 'Successfully Done' + request.session['currentstate'] = 0 + firstFile = pd.DataFrame() + context = {'tab': 'upload','firstFile':firstFile,'validcsv': True,'singletextdetails':wordcloudpic,'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage() + ,'usecasetab':usecasetab,'version':AION_VERSION} + return render(request, 'upload.html', context) + eda_result = '' + duplicate_img = '' + color_plt = '' + df2 = df.groupby('Label', as_index=False)['File'].count().reset_index() + df_json = df2.to_json(orient=""records"") + df_json = json.loads(df_json) + cfig = go.Figure() + xaxis_data = df2['Label'].tolist() + yaxis_data = df2['File'].tolist() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data)) + cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520) + firstFile = df.groupby('Label').first().reset_index() + images = [] + if request.session['datatype'] == 'Image': + qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation']) + for i in range(len(firstFile)): + filename = firstFile.loc[i, ""File""] + filePath = os.path.join(request.session['datalocation'], filename) + string = base64.b64encode(open(filePath, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + firstFile.loc[i, ""Image""] = image_64 + firstFile.loc[i, ""Quality""] = qualityscore[filename] + + elif request.session['datatype'] == 'Document': + dataDrift = '' + dataDf = pd.DataFrame() + dataDict = {} + keys = [""text"",""Label""] + for key in keys: + dataDict[key] = [] + for i in range(len(df)): + filename = os.path.join(request.session['datalocation'],df.loc[i,""File""]) + with open(filename, ""r"",encoding=""utf-8"") as f: + dataDict[""text""].append(f.read()) + f.close() + dataDict[""Label""].append(df.loc[i,""Label""]) + dataDf = pd.DataFrame.from_dict(dataDict) + wordcloudpic = ea.getCategoryWordCloud(dataDf) + status_msg = 'Successfully Done' + context = {'tab': 'upload','dataa': df_json,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket() + ,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),'version':AION_VERSION} + return render(request, 'upload.html', context) + + status_msg = 'Successfully Done' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 0 + + context = {'tab': 'upload', 'featuregraph': bargraph, 'validcsv': True, 'firstFile': firstFile, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(), + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt,'azurestorage':get_azureStorage(), + 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure, + 'usecasetab':usecasetab,'version':AION_VERSION + } + return render(request, 'upload.html', context) + elif request.session['datatype'].lower() in ['llm_document', 'llm_code']: + request.session['currentstate'] = 0 + dataFile = request.session['csvfullpath'] + df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace') + filesCount = 0 + filesSize = 0 + files = [] + for index, row in df.iterrows(): + filename = row['File'] + files.append(filename) + filesCount = filesCount + 1 + get_size = os.path.getsize(filename) + filesSize = round(filesSize + get_size, 1) + + if filesSize > 1048576: + size = round((filesSize / (1024 * 1024)), 1) + filesSize = str(size) + ' M' + elif filesSize > 1024: + size = round((filesSize /1024), 1) + filesSize = str(size) + ' K' + else: + filesSize = str(filesSize) + ' B' + files = pd.DataFrame(files, columns=['File']) + files.index = range(1, len(files) + 1) + files.reset_index(level=0, inplace=True) + files = files.to_json(orient=""records"") + files = json.loads(files) + from appbe.prediction import get_instance + hypervisor, instanceid,region,image = get_instance(selected_use_case + '_' + str(ModelVersion)) + if hypervisor != '': + computeinfrastructure['computeInfrastructure'] = hypervisor + else: + computeinfrastructure['computeInfrastructure'] = 'AWS' + context = {'tab': 'upload',""selected_use_case"":selected_use_case,""selectedPath"":request.session['datalocation'],""selectedfile"":request.session['fileExtension'],'csvgenerated': True,'filesCount':filesCount,'filesSize':filesSize,'files':files, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'s3buckets':get_s3_bucket(),'gcsbuckets':get" +"_gcs_bucket(), + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'azurestorage':get_azureStorage(), + 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'datatype':request.session['datatype'], + 'usecasetab':usecasetab,'version':AION_VERSION,""selectedfile"":request.session['fileExtension'],""selectedPath"":request.session['datalocation'] + } + return render(request, 'upload.html', context) + else: + dataFile = str(request.session['datalocation']) + check_df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace') + check_df.rename(columns=lambda x: x.strip(), inplace=True) + featuresList = check_df.columns.tolist() + + numberoffeatures = len(featuresList) + imp_features = getimpfeatures(dataFile, numberoffeatures) + + # check_df = pd.read_csv(dataFile) + # check_df.rename(columns=lambda x: x.strip(), inplace=True) + # ---------------------------- + + # EDA Performance change + # ---------------------------- + sample_size = int(eda_setting()) + samplePercentage = 100 + samplePercentval = 0 + showRecommended = False + + + #dflength = len(eda_obj.getdata()) + dflength = len(check_df) + if dflength > sample_size: + samplePercentage = round(float((sample_size/dflength) * 100),2) + samplePercentval = samplePercentage / 100 + showRecommended = True + # ---------------------------- + + # df_top = df.head(10) + df_top = check_df.head(10) + + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = '' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 0 + + # EDA Subsampling changes + context = {'range':range(1,101),'samplePercentage':samplePercentage,'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList, 'selected_use_case': selected_use_case,'data': df_json,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage(), + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'imp_features':imp_features,'numberoffeatures':numberoffeatures, + 'version':AION_VERSION, + 'selected': 'modeltraning','exploratory':False,'computeinfrastructure':computeinfrastructure} + else: + request.session['uploaddone'] = False + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + + context = {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),'clusteringModels':clusteringModels, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(), + 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure + } + context['version'] = AION_VERSION + + return render(request, 'upload.html', context) + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + print(e) + return render(request, 'upload.html', {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Fail to upload Data','usecasetab':usecasetab,'version':AION_VERSION}) + +def mlflowtracking(request): + import requests + response = requests.get(""http://localhost:5000/"") + #response = requests.get(url) + statuscode = response.status_code + data = [] + context = {'statuscode':statuscode} + context['version'] = AION_VERSION + return render(request, 'openmlflow.html', context) + + + +def readlogfile(request): + file_path = request.session['logfilepath'] + try: + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"") + configSettingsData = f.read() + configSettings = json.loads(configSettingsData) + f.close() + if os.path.exists(file_path): + my_file = open(file_path, 'r',encoding=""utf-8"") + file_content = my_file.read() + my_file.close() + matched_lines = [line.replace('Status:-', '') for line in file_content.split('\\n') if ""Status:-"" in line] + matched_status_lines = matched_lines[::-1] + if len(matched_status_lines) > 0: + no_lines = len(matched_lines) + if 'noflines' not in request.session: + request.session['noflines'] = 0 + request.session['noflines'] = request.session['noflines'] + 1 + if request.session['ModelStatus'] != 'SUCCESS': + numberoflines = request.session['noflines'] + if numberoflines > no_lines: + numberoflines = no_lines + request.session['noflines'] = no_lines + matched_lines = matched_lines[0:numberoflines] + + matched_status_lines = matched_status_lines[0] + output = getStatusCount(matched_lines,request.session['total_steps']) + matched_status_lines = matched_status_lines.split('...') + matched_status_lines = matched_status_lines[1] + output2=[] + output2.append(matched_status_lines) + from appbe import leaderboard + import pandas + result = leaderboard.get_leaderboard(file_content) + if result.empty==False: + result = result.to_html(classes='table',col_space='100px', index=False) + else: + result = 'Leaderboard is not available' + data_details = {'status':output2,'logs':output,'log_file':file_content,'leaderboard': result,'trainingstatus':request.session['ModelStatus']} + return HttpResponse(json.dumps(data_details), content_type=""application/json"") + else: + matched_lines = [] + matched_lines.append('Initializing Training Engine') + data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines} + return HttpResponse(json.dumps(data_details), content_type=""application/json"") + else: + stepsdone = 0 + matched_lines = [] + if request.session['ModelStatus'] == 'Running': + matched_lines.append('Initializing Training Engine') + else: + matched_lines.append('Not Trained') + data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines} + return HttpResponse(json.dumps(data_details), content_type=""application/json"") + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + matched_lines = [] + if request.session['ModelStatus'] == 'Running': + stepsdone = 0 + matched_lines.append('Initializing Training Engine') + data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines} + return HttpResponse(json.dumps(data_details), content_type=""application/json"") + else: + matched_lines.append('Not Trained') + data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines,'leaderboard':matched_lines,'trainingstatus':matched_lines} + return HttpResponse(json.dumps(data_details), content_type=""application/json"") + + + +# EDA Visualization changes +# ---------------------------- +def getgraph(request): + from appbe import exploratory_Analysis as ea + output = ea.get_edaGraph(request) + return HttpResponse(output) +# ---------------------------- + + +# --- 12686:Data Distribution related Changes S T A R T --- +def getDataDistribution(request): + from appbe import exploratory_Analysis as ea + output = ea.get_DataDistribution(request) + return HttpResponse(output) +# ---------------------- E N D ---------------------- + + +def getDeepDiveData(request): + from appbe import exploratory_Analysis as ea + output = ea.get_DeepDiveData(request) + return HttpResponse(output) + + +# Fairness Metrics changes +# ---------------------------- +def getmetrics(request): + from appbe import exploratory_Analysis as ea + output = ea.get_fairmetrics(request) + return HttpResponse(output) +# ---------------------------- + + +def getdataimbalance(request): + d3_url = request.GET.get('d3_url') + mpld3_url = request.GET.get('mpld3_url') + + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"", encoding=""utf-8"") + configSettingsData = f.read() + configSettingsJson = json.loads(configSettingsData) + + df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8') + targetFeature = configSettingsJson['basic']['targetFeature'] + df1 = df[targetFeature].value_counts().to_frame() + + if (len(df1) < 1): + response = 'Data balancing detail is not available due to no class is found in target feature.' + elif (len(df1) > 30): + response = 'Data balancing detail is not available due to high number of classes in target feature.' + else: + dfStyler = df1.style.set_properties(**{'text-align': 'right'}) + dfStyler.set_table_styles([dict(selector='th', props=[('text-align', 'right')])]) + valueCount = dfStyler.to_html() + + import matplotlib.pyplot as plt + import mpld3 + fig, ax = plt.subplots(figsize=[6.5,6]) + + df2 = df[targetFeature].value_counts().sort_values() + _ncol = 1 + _radius = 0.5 + + if (len(df1) > 10): + _radius = 0.4 + _ncol = 1 + else: + _radius = 0.6 + _ncol = 1 + + ax = df2.plot(kind = 'pie', ylabel='', title=targetFeature, labeldistance=None, radius=_radius, autopct='%1.0f%%') + ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol = _ncol) + # ax.legend(bbox_to_anchor=(1,1), bbox_transform=plt.gcf().transFigure) + + plt.subplots_adjust(left=0.02, bottom=0.05, right=0.9) + ax.get_yaxis().set_visible(False) + html_graph = mpld3.fig_to_html(fig,d3_url=d3_url,mpld3_url=mpld3_url) + response = valueCount + ' ' + html_graph + + return HttpResponse(response) + +def dotextSummarization(request): + from appbe.textSummarization import startSummarization + context = startSummarization(request,DEFAULT_FILE_PATH,CONFIG_FILE_PATH,DATA_FILE_PATH) + context['version'] = AION_VERSION + return render(request, 'summarization.html', context) +def openmodelevaluation(request,id): + deploypath = request.session['deploypath'] + if id == 1: + contentFile= os.path.join(deploypath,'log','boosting_overfit.html') + if id == 2: + contentFile= os.path.join(deploypath,'log','boosting_overfit_condition.html') + if id == 3: + contentFile= os.path.join(deploypath,'log','smc.html') + if id == 4: + contentFile= os.path.join(deploypath,'log','smc_condition.html') + if id == 5: + contentFile= os.path.join(deploypath,'log','mi.html') + if id == 6: + contentFile= os.path.join(deploypath,'log','mi_con.html') + try: + my_file = open(contentFile, 'r', encoding=""utf-8"") + file_content = my_file.read() + my_file.close() + context = {'content': file_content,'status':request.session['ModelStatus']} + context['version'] = AION_VERSION + return render(request, 'deepcheck.html', context, content_type=""text/html"") + except: + context = {'content': 'Not available'} + context['version'] = AION_VERSION + return render(request, 'deepcheck.html', context, content_type=""text/html"") +def downloadlogfile(request,id,currentVersion): + import mimetypes + from django.http import FileResponse + p = usecasedetails.objects.get(id=id) + model = Existusecases.objects.filter(ModelName=p,Version=currentVersion) + if model[0].DeployPath != 'NA': + file_path = os.path.join(str(model[0].DeployPath),'log','model_training_logs.log') + else: + file_path = os.path.join(DEPLOY_LOCATION,model[0].ModelName.usecaseid,str(currentVersion),'log','model_training_logs.log') + try: + + if os.path.exists(file_path): + my_file = open(file_path, 'r', encoding=""utf-8"") + file_content = my_file.read() + my_file.close() + mime_type, _ = mimetypes.guess_type(file_path) + response = HttpResponse(file_content, content_type=mime" +"_type)#bugid 12513 + # Set the HTTP header for sending to browser + filename = p.usecaseid+'.log' + response['Content-Disposition'] = ""attachment; filename=%s"" % filename + return response + else: + response = HttpResponse('File Not Found')#bugid 12513 + # Set the HTTP header for sending to browser + filename = p.usecaseid+'.log' + response['Content-Disposition'] = ""attachment; filename=%s"" % filename + return response + except Exception as e: + response = HttpResponse('File Not Found')#bugid 12513 + # Set the HTTP header for sending to browser + filename = p.usecaseid+'.log' + response['Content-Disposition'] = ""attachment; filename=%s"" % filename + return response +def opendetailedlogs(request,id,currentVersion): + p = usecasedetails.objects.get(id=id) + model = Existusecases.objects.filter(ModelName=p,Version=currentVersion) + if model[0].DeployPath != 'NA': + file_path = os.path.join(str(model[0].DeployPath),'log','model_training_logs.log') + else: + file_path = os.path.join(DEPLOY_LOCATION,model[0].ModelName.usecaseid,str(currentVersion),'log','model_training_logs.log') + try: + + if os.path.exists(file_path): + my_file = open(file_path, 'r', encoding=""utf-8"") + file_content = my_file.read() + my_file.close() + context = {'content':file_content} + return HttpResponse(json.dumps(context),content_type=""application/json"") + else: + context = {'content':'Status not available'} + return HttpResponse(json.dumps(context),content_type=""application/json"") + except Exception as e: + print(e) + context = {'content':'Status not available'} + return HttpResponse(json.dumps(context),content_type=""application/json"") + + +def batchlearning(request): + from appbe.onlineLearning import startIncrementallearning + action,context = startIncrementallearning(request,usecasedetails,Existusecases,DATA_FILE_PATH) + context['version'] = AION_VERSION + return render(request,action,context) +def downlpredictreport(request): + predictionResults = request.POST.get('predictionResults') + predictionResults = pd.DataFrame.from_dict(eval(predictionResults)) + usename = request.session['UseCaseName'].replace("" "", ""_"") + '_' + str(request.session['ModelVersion']) + predictFileName = usename + '_prediction.xlsx' + from io import BytesIO as IO + excel_file = IO() + excel_writer = pd.ExcelWriter(excel_file, engine=""xlsxwriter"") + predictionResults.to_excel(excel_writer, sheet_name='Predictions') + workbook = excel_writer.book + #excel_writer.save() + excel_writer.close() + excel_file.seek(0) + response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + predictFileName + return response +# EDA Reports changes +# ---------------------------- +def downloadxplainreport(request): + from appbe.xplain import global_explain + status,msg,ale_view,sentences,bargraph,inputFields,nrows,ncols,targetFeature,dataPoints,target_classes,df_proprocessed,numberofclasses,modelfeatures,problemType,mfcount,topTwoFeatures,topFeaturesMsg,most_influencedfeature,interceppoint,anchorjson,labelMaps = global_explain(request) + if status == 'Success': + usename = request.session['UseCaseName'].replace("" "", ""_"") + '_' + str(request.session['ModelVersion']) + predictFileName = usename + '_xplain.xlsx' + df = pd.DataFrame({'What kind of data does the system learn from?': ['This dataset is a dataset of measurements taken for '+str(numberofclasses)+' categories of '+str(targetFeature),'The '+str(numberofclasses)+' different categories of '+str(targetFeature)+' as per the data are:']}) + i = 1 + df1 = [] + for x in target_classes: + df1.append({'What kind of data does the system learn from?':' '+str(i)+':'+str(x)}) + i = i+1 + df1.append({'What kind of data does the system learn from?':'The total number of data points is '+str(dataPoints)}) + df = pd.concat([df, pd.DataFrame(df1)], ignore_index = True) + from io import BytesIO as IO + excel_file = IO() + excel_writer = pd.ExcelWriter(excel_file, engine=""xlsxwriter"") + df.to_excel(excel_writer, sheet_name='Dashboard',index=False) + pd.DataFrame(df_proprocessed).to_excel(excel_writer, sheet_name='Top 5 Rows',index=False) + df = pd.DataFrame({'What are the various features of the data used for model training?': ['The various features of the data are:']}) + i = 1 + df1 = [] + for x in modelfeatures: + df1.append({'What are the various features of the data used for model training?':' '+str(i)+': '+str(x)}) + i = i+1 + df = pd.concat( [df, pd.DataFrame( df1)], ignore_index = True) + df.to_excel(excel_writer, sheet_name='Features',index=False) + topFeaturesMsg = pd.DataFrame(topFeaturesMsg,columns=[""Feature Importance""]) + topFeaturesMsg.to_excel(excel_writer, sheet_name='Feature importance',index=False) + achors = pd.DataFrame(anchorjson) + achors.to_excel(excel_writer, sheet_name='Prediction',index=False) + workbook = excel_writer.book + #excel_writer.save() + excel_writer.close() + excel_file.seek(0) + response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + predictFileName + return response + else: + response = HttpResponse() + return response + +def gotoreport(request): + report_button = request.POST.get('trainmodel') + usename = request.session['UseCaseName'].replace("" "", ""_"") + '_' + str(request.session['ModelVersion']) + + if report_button == 'download_edafile': + from appbe.reports import downloadtrainingfile + edaFileName,excel_file = downloadtrainingfile(request,Existusecases) + response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + edaFileName + return response + + +def LoadBasicConfiguration(request): + try: + from appbe import exploratory_Analysis as ea + configFile = DEFAULT_FILE_PATH + 'eion_config.json' + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + + temp = {} + temp['ModelName'] = request.session['UseCaseName'] + temp['Version'] = request.session['ModelVersion'] + + dataLocation = str(request.session['datalocation']) + df = pd.read_csv(dataLocation, encoding='latin1') + featuresList = df.columns.values.tolist() + datetimeFeatures = [] + sequenceFeatures = [] + unimportantFeatures = [] + featuresRatio = {} + for i in featuresList: + check = ea.match_date_format(df[i]) + if check == True: + datetimeFeatures.append(i) + unimportantFeatures.append(i) + continue + seq_check = ea.check_seq_feature(df[i]) + if seq_check == True: + sequenceFeatures.append(i) + unimportantFeatures.append(i) + continue + ratio = ea.check_category(df[i]) + if ratio != 0: + featuresRatio[i] = ratio + else: + unimportantFeatures.append(i) + targetFeature = min(featuresRatio, key=featuresRatio.get) + unimportantFeatures.append(targetFeature) + config = {} + config['modelName'] = request.session['UseCaseName'] + config['modelVersion'] = request.session['ModelVersion'] + config['datetimeFeatures'] = datetimeFeatures + config['sequenceFeatures'] = sequenceFeatures + config['FeaturesList'] = featuresList + config['unimportantFeatures'] = unimportantFeatures + config['targetFeature'] = targetFeature + context = {'tab': 'configure', 'temp': temp, 'config': config} + context['version'] = AION_VERSION + return render(request, 'modeltraning.html', context) + except: + return render(request, 'modeltraning.html', {'error':'Fail to load basic config file','version':AION_VERSION}) + +def LoadDataForSingleInstance(request): + try: + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + problemtypes = configSettingsJson['basic']['analysisType'] + #print(problemtypes.keys()) + problem_type = """" + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + if problem_type == 'timeSeriesForecasting': #task 11997 + inputFieldsDict = {'noofforecasts': 10} + elif problem_type == 'recommenderSystem': + inputFieldsDict = {""uid"": 1, ""iid"": 31, ""rating"": 0} + elif problem_type == 'videoForecasting': + inputFieldsDict = {'VideoPath': 'person01_boxing_d1_uncomp.avi'} + else: + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeaturesList = inputFeatures.split(',') + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + df = pd.read_csv(dataFilePath, encoding='latin1') + singleInstanceData = df.loc[0, inputFeaturesList] + inputFieldsDict = singleInstanceData.to_dict() + inputFields = [] + inputFields.append(inputFieldsDict) + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context = {'tab': 'predict', 'inputFields': inputFields, 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction'} + return render(request, 'prediction.html', context=context) + except: + return render(request, 'prediction.html', {'tab': 'predict', 'error': 'Fail to load inputfields', 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction'}) +def uploadDatafromunsupervisedmodel(request): + computeinfrastructure = compute.readComputeConfig() + try: + modelid = request.POST.get('modelid') + p = Existusecases.objects.get(id=modelid) + dataFile = str(p.DataFilePath) + deploypath = str(p.DeployPath) + if(os.path.isfile(dataFile) == False): + context = {'tab': 'tabconfigure', 'error': 'Data file does not exist','computeinfrastructure':computeinfrastructure} + return render(request, 'prediction.html', context) + predictionScriptPath = os.path.join(deploypath,'aion_predict.py') + outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + predict_dict = json.loads(outputStr) + if (predict_dict['status'] == 'SUCCESS'): + predictionResults = predict_dict['data'] + df2 = pd.json_normalize(predictionResults) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + request.session['datalocation'] = str(dataFile) + df2.to_csv(dataFile, index=False) + request.session['datalocation'] = str(dataFile) + from appbe.eda import ux_eda + eda_obj = ux_eda(dataFile) + featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures() + # ---------------------------- + samplePercentage = 100 + samplePercentval = 0 + showRecommended = False + df = pd.read_csv(dataFile,nrows=100) + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + No_of_Permissible_Features_EDA = get_edafeatures() + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + context = {'tab': 'tabconfigure','range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage,'computeinfrastructure':computeinfrastructure, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'clusteringModels':clusteringModels, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + except Exception as e: + print(e) + return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','version':AION_VERSION}) + +def qlearning(request): + return render(request, 'qlearning.html', {}) + +def RLpath(request): + return render(request, 'rl_path.html', {}) + +def stateTransitionSettings(request): + selected_use_case = request." +"session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + import requests + setting_url = service_url.read_service_url_params(request) + usecasename = request.session['usecaseid'].replace("" "", ""_"") + setting_url = setting_url+'pattern_anomaly_settings?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion']) + #print(setting_url) + inputFieldsDict = {} + inputFieldsDict['groupswitching'] = request.POST.get('groupswitching') + inputFieldsDict['transitionprobability'] = request.POST.get('transitionprobability') + inputFieldsDict['transitionsequence'] = request.POST.get('transitionsequence') + inputFieldsDict['sequencethreshold'] = request.POST.get('sequencethreshold') +# print(inputFieldsDict) + inputFieldsJson = json.dumps(inputFieldsDict) + #print(inputFieldsJson) + try: + response = requests.post(setting_url,data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + if response.status_code != 200: + outputStr=response.content + context = {'tab': 'tabconfigure', 'error': outputStr.decode('utf-8'), 'selected': 'prediction'} + return render(request, 'prediction.html', context) + except Exception as inst: + if 'Failed to establish a new connection' in str(inst): + context = {'tab': 'tabconfigure', 'error': 'AION Service needs to be started', 'selected': 'prediction'} + else: + context = {'tab': 'tabconfigure', 'error': 'Prediction Error '+str(inst), 'selected': 'prediction'} + return render(request, 'prediction.html', context) + try: + outputStr=response.content + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + #print(outputStr) + predict_dict = json.loads(str(outputStr)) + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + inputFeaturesList = inputFeatures.split(',') + inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'} + inputFields = [] + inputFields.append(inputFieldsDict) + iterName = request.session['UseCaseName'].replace("" "", ""_"") + settings_url = '' + problemtypes = configSettingsJson['basic']['analysisType'] + #print(problemtypes.keys()) + problem_type = """" + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + if problem_type == 'StateTransition': + ser_url = service_url.read_pattern_anomaly_url_params(request) + settings_url = service_url.read_pattern_anomaly_setting_url_params(request) + else: + ser_url = service_url.read_service_url_params(request) + + ser_url = ser_url+'predict?usecaseid='+iterName+'&version='+str(ModelVersion) + onnx_runtime = False + if str(configSettingsJson['advance']['deployer']['edge_deployment']) == 'True': + if str(configSettingsJson['advance']['deployer']['edge_format']['onnx']) == 'True': + onnx_runtime = True + analyticsTypes = problem_type + imagedf = '' + return render(request, 'prediction.html', + {'inputFields': inputFields,'imagedf':imagedf, 'selected_use_case': selected_use_case,'ser_url':ser_url,'analyticsType':analyticsTypes,'settings_url':settings_url,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus,'onnx_edge':onnx_runtime,'ModelVersion': ModelVersion, 'selected': 'prediction'}) + except Exception as e: + print(e) + return render(request, 'prediction.html', {'error': 'Fail to do state Transition Settings', 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction'}) + +def flcommand(request): + try: + from appbe.flconfig import fl_command + context = fl_command(request,Existusecases,usecasedetails) + return render(request, 'usecases.html', context) + except Exception as e: + print(e) + return render(request, 'models.html',{'error': 'Failed to generate federated learning client code'}) + +def maaccommand(request): + from appbe.models import maac_command + try: + context,page = maac_command(request,Existusecases,usecasedetails) + context['version'] = AION_VERSION + return render(request,page,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'errormlac': 'Failed to generate code: '+str(e),'version':AION_VERSION}) + + +def onnxruntime(request): + try: + onnx_scriptPath = os.path.join(request.session['deploypath'],'edge','onnxvalidation.py') + outputStr = subprocess.check_output([sys.executable, onnx_scriptPath]) + #print(outputStr) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + predict_dict = json.loads(outputStr) + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context = {'tab': 'predict', 'predictionResults': predict_dict, 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction','onnx_edge':True,'version':AION_VERSION} + return render(request, 'prediction.html', context=context) + + except Exception as inst: + print('-------------------->'+str(inst)) + context = {'tab': 'tabconfigure', 'error': 'Failed To Perform Prediction', 'selected': 'prediction','version':AION_VERSION} + return render(request, 'prediction.html', context) +def instancepredict(request): + log = logging.getLogger('log_ux') + from appbe.train_output import get_train_model_details + modelType='' + trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request) + computeinfrastructure = compute.readComputeConfig() + selected_use_case, ModelVersion, ModelStatus = getusercasestatus(request) + try: + t1 = time.time() + if request.FILES: + Datapath = request.FILES['DataFilePath'] + from io import StringIO + ext = str(Datapath).split('.')[-1] + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet','txt']: + content = StringIO(Datapath.read().decode('utf-8')) + reader = csv.reader(content) + df = pd.DataFrame(reader) + df.columns = df.iloc[0] + df = df[1:] + + filetimestamp = str(int(time.time())) + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet','txt','pdf']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) + else: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + dataPath = dataFile + if(os.path.isfile(dataFile) == False): + context = {'tab': 'tabconfigure', 'error': 'Data file does not exist','computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Data file does not exist') + + return render(request, 'prediction.html', context) + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + + predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py') + outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr = outputStr.strip() + predict_dict = json.loads(outputStr) + + problemtypes = configSettingsJson['basic']['analysisType'] + problem_type = '' + for k in problemtypes.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + PredictionResultsOfTextSum = [] + if (predict_dict['status'] == 'SUCCESS'): + predictionResults = predict_dict['data'] + predictionResultsTextSum= predict_dict['data'] + if problem_type in ['similarityIdentification','contextualSearch']: + for x in predictionResults: + msg='' + for y in x['prediction']: + msg += str(y) + msg += '\\n' + msg += '\\n' + msg += '\\n' + msg += '\\n' + msg += '\\n' + x['prediction'] = msg + + if problem_type == 'textSummarization': + Results = {} + Results['msg'] = predict_dict['msg'] + PredictionResultsOfTextSum.append(Results) + Results['prediction'] = predict_dict['data'] + PredictionResultsOfTextSum.append(Results) + + t2 = time.time() + log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str( + round(t2 - t1)) + ' sec' + ' : ' + 'Success') + + else: + context = {'tab': 'tabconfigure', 'error': 'Failed To perform prediction','version':AION_VERSION} + log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction') + return render(request, 'prediction.html', context) + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + from appfe.modelTraining.train_views import getMLModels + problem_type,dproblemtype,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson) + from appbe.prediction import createInstanceFeatures + ser_url = service_url.read_service_url_params(request) + inputFields,ser_url = createInstanceFeatures(configSettingsJson,problem_type,mlmodels,request.session['usecaseid'],request.session['ModelVersion'],ser_url) + from appfe.modelTraining.prediction_views import getTrainingStatus + result = getTrainingStatus(request) + context = {'tab': 'predict','ser_url':ser_url,'predictionResults': predictionResults, 'selected_use_case': selected_use_case,'problem_type':problem_type,'result':result, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'bestmodel':bestmodel,'usecasetab':usecasetab,'version':AION_VERSION,'modelType':modelType,'inputFields':inputFields,'configSettingsJson':configSettingsJson} + if problem_type == 'textSummarization': + context={'tab': 'predict','predictionResultsTextSum': predictionResultsTextSum, 'PredictionResultsOfTextSum': PredictionResultsOfTextSum,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction','problem_type':problem_type} + + + return render(request, 'prediction.html', context=context) + except Exception as inst: + print(inst) + context = {'tab': 'tabconfigure', 'error': 'Failed To perform prediction', 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + log.info('Predict Batch :' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction, '+str(inst)) + + return render(request, 'prediction.html', context) + + +def LoadAdvanceConfiguration(request): + try: + if request.method == 'POST': + configFile = request.session['config_json'] + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + + configSettingsJson = json.loads(configSettings) + context = {'tab': 'advconfig', 'advconfig': configSettingsJson} + context['version'] = AION_VERSION + context['usecasetab'] = usecasetab + return render(request, 'modeltraning.html', context) + except: + return render(request, 'modeltraning.html', {'error':'Fail to load advance config file','version':AION_VERSION,'usecasetab':usecasetab}) + + +# advance +def Advance(request): + try: + from appbe import advance_Config as ac + request.session['defaultfilepath'] = DEFAULT_FILE_PATH + context = ac.save(request) + submittype = request.POST.get('AdvanceSubmit') + computeinfrastructure = compute.readComputeConfig() + if submittype != 'AdvanceDefault': + from appfe.modelTraining.train_views import trainmodel + return trainmodel(request) + else: + context['version'] = AION_VERSION + context['usecasetab'] = usecasetab + context['computeinfrastructure'] = computeinfrastructure + return render(request, 'advancedconfig.html', context) + except Exception as e: + print(e) + return render(request, 'advancedconfig.html', {'erroradvance':'Fail to save','version':AION_VERSION,'usecasetab':usecasetab,'computeinfrastructure':computeinfrastructure}) + +def templatepage(request): + computeinfrastructure = compute.readComputeConfig() + try: + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + ser_url = service_url.read_service_url_params(request) + packagetip=''' + Call From Command Line + 1. Click AION Shell + 2. python {packageAbsolutePath}/aion_prediction.py {json_data} + Call As a Package + 1. Go To package_path\\WHEELfile + 2. python -m pip install {packageName}-py3-none-any.whl + Call the predict function after wheel package installation + 1. from {packageName} import aion_prediction as p1 + 2. p1.predict({json_" +"data}) + ''' + + usecase = usecasedetails.objects.all() + models = Existusecases.objects.filter(Status='SUCCESS') + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + if len(usecase) > 0: + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + else: + nouc = 1 + context = {'usecasedetail': usecase, 'nouc': nouc,'models': models, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ser_url':ser_url,'packagetip':packagetip,'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab} + return (context) + except: + + context = {'error':'Fail to load usecases details','usecasetab':usecasetab} + return (context) + +def modelkafka(request): + try: + addKafkaModel(request,request.session['datalocation']) + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + computeinfrastructure = compute.readComputeConfig() + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + ser_url = service_url.read_service_url_params(request) + packagetip=''' + Call From Command Line + 1. Click AION Shell + 2. python {packageAbsolutePath}/aion_prediction.py {json_data} + Call As a Package + 1. Go To package_path\\WHEELfile + 2. python -m pip install {packageName}-py3-none-any.whl + Call the predict function after wheel package installation + 1. from {packageName} import aion_prediction as p1 + 2. p1.predict({json_data}) + ''' + models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id') + usecase = usecasedetails.objects.all().order_by('-id') + if len(usecase) > 0: + nouc = usecasedetails.objects.latest('id') + nouc = (nouc.id)+1 + else: + nouc = 1 + return render(request, 'usecases.html', + {'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting}) + except: + return render(request, 'usecases.html',{'selected': 'usecase', 'selected_use_case': selected_use_case,'error': 'Fail to load modelkafka'}) + +def startTracking(request): + from appbe.aion_config import aion_tracking + from appbe.aion_config import start_tracking + try: + status = aion_tracking() + if status.lower() == 'error': + start_tracking() + status = 'MLflowSuccess' + else: + status = 'MLflowSuccess' + context = {'selected':'DataOperations','usecasetab':usecasetab,'status':status} + context['version'] = AION_VERSION + return render(request, ""dataoperations.html"",context) + except: + context = {'selected':'DataOperations','usecasetab':usecasetab,'status':'Error'} + context['version'] = AION_VERSION + return render(request, ""dataoperations.html"",context) + +def startService(request): + try: + status = aion_service() + if status == 'Running': + status = 'AION service already running' + elif status == 'Started': + status = 'AION service started successfully' + else: + status = 'Error in starting' + context = settings(request) + context['status'] = status + return render(request, 'settings_page.html', context) + except: + return render(request, 'settings_page.html', {'error':'Fail to start service'}) + +def Dataupload(request): + from appbe.pages import usecases_page + checkModelUnderTraining(request,usecasedetails,Existusecases) + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + context['currentstate'] =0 + from appbe.aion_config import get_telemetryoptout + telemetryoptout = get_telemetryoptout() + if telemetryoptout == 'No': + from appbe.telemetry import checkTelemtry + checkTelemtry() + return render(request,action,context) + + +def show(request): + try: + models = Existusecases.objects.all() + # print(models) + return render(request, ""usecases.html"", {'models': models, 'selected': 'usecase'}) + except: + return render(request, ""usecases.html"", {'error': 'Error to show Usecases', 'selected': 'usecase'}) + + +def edit(request, id): + try: + usecasedetail = usecasedetails.objects.get(id=id) + return render(request, 'edit.html', {'usecasedetail': usecasedetail, 'selected': 'usecase'}) + except: + return render(request, ""usecases.html"", {'error': 'Error in editing usecase', 'selected': 'usecase'}) + +def opentraining(request, id,currentVersion): + from appbe.pages import usecases_page + try: + p = usecasedetails.objects.get(id=id) + model = Existusecases.objects.filter(ModelName=p,Version=currentVersion) + Version = model[0].Version + usecasename = p.UsecaseName + request.session['ModelName'] = p.id + request.session['UseCaseName'] = usecasename + request.session['usecaseid'] = p.usecaseid + request.session['ModelVersion'] = Version + request.session['ModelStatus'] = 'Not Trained' + request.session['finalstate'] = 0 + usecase = usecasedetails.objects.all().order_by('-id') + configfile = str(model[0].ConfigPath) + + dataFile = '' + if configfile != '': + request.session['finalstate'] = 2 + f = open(configfile, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + dataFile = configSettings['basic']['dataLocation'] + + if configSettings['basic']['folderSettings']['fileType'] == 'Object': + request.session['datatype'] = configSettings['basic']['folderSettings']['fileType'] + request.session['objectLabelFileName'] = configSettings['basic']['folderSettings']['labelDataFile'] + request.session['datalocation'] = configSettings['basic']['dataLocation'] + return objectlabeldone(request) + elif configSettings['basic']['folderSettings']['fileType'] in ['LLM_Document','LLM_Code']: + request.session['datatype'] = configSettings['basic']['folderSettings']['fileType'] + request.session['fileExtension'] = configSettings['basic']['folderSettings']['fileExtension'] + request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile'] + request.session['datalocation'] = configSettings['basic']['dataLocation'] + else: + request.session['datalocation'] = str(configSettings['basic']['dataLocation']) + request.session['datatype'] = 'Normal' + if 'fileSettings' in configSettings['basic'].keys(): + fileSettings = configSettings['basic']['fileSettings'] + if 'delimiters' in fileSettings.keys(): + delimiters = configSettings['basic']['fileSettings']['delimiters'] + textqualifier = configSettings['basic']['fileSettings']['textqualifier'] + request.session['delimiter'] = delimiters + request.session['textqualifier'] = textqualifier + else: + request.session['delimiter'] = ',' + request.session['textqualifier'] = '""' + if dataFile == '': + dataFile = str(model[0].DataFilePath) + if dataFile != '': + request.session['finalstate'] = 2 + request.session['datalocation'] = dataFile + return uploaddata(request) + except Exception as e: + print(e) + checkModelUnderTraining(request,usecasedetails,Existusecases) + request.session['IsRetraining'] = 'No' + status,context,action = usecases_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + context['Status'] = 'Error' + context['Msg'] = 'Error in retraining usecase. Check log file for more details' + return render(request,action,context) + +def stopmodelservice(request): + try: + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + id = request.POST.get('modelid') + pid = request.POST.get('pid') + installPackage.stopService(pid) + time.sleep(5) + usecasedetail = usecasedetails.objects.get(id=id) + usecasename = usecasedetail.UsecaseName + runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) + installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename) + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') + for model in models: + model.scoringCreteria = 'NA' + model.score = 'NA' + model.deploymodel = 'NA' + model.maacsupport = 'False' + model.flserversupport = 'False' + if os.path.isdir(str(model.DeployPath)): + modelPath = os.path.join(str(model.DeployPath), 'output.json') + try: + with open(modelPath) as file: + outputconfig = json.load(file) + file.close() + if outputconfig['status'] == 'SUCCESS': + model.scoringCreteria = outputconfig['data']['ScoreType'] + model.score = outputconfig['data']['BestScore'] + model.deploymodel = outputconfig['data']['BestModel'] + supportedmodels = [""Logistic Regression"", + ""Naive Bayes"",""Decision Tree"",""Support Vector Machine"",""K Nearest Neighbors"",""Gradient Boosting"",""Random Forest"",""Linear Regression"",""Lasso"",""Ridge""] + if model.deploymodel in supportedmodels: + model.maacsupport = 'True' + else: + model.maacsupport = 'False' + supportedmodels = [""Logistic Regression"",""Neural Network"",""Linear Regression""] + if model.deploymodel in supportedmodels: + model.flserversupport = 'True' + else: + model.flserversupport = 'False' + except Exception as e: + pass + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + nouc = 0 + usecase = usecasedetails.objects.all() + return render(request, 'models.html', + {'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id}) + except: + return render(request, 'models.html',{'error': 'Fail to stop model service'}) + +def startmodelservice(request): + try: + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + installPackage.startService(request.POST.get('modelName'),request.POST.get('ip'),request.POST.get('portNo')) + time.sleep(5) + id = request.POST.get('modelid') + usecasedetail = usecasedetails.objects.get(id=id) + usecasename = usecasedetail.UsecaseName + runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) + installationStatus,modelName,modelVersion=insallPackage.checkInstalledPackge(usecasename) + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') + for model in models: + model.scoringCreteria = 'NA' + model.score = 'NA' + model.deploymodel = 'NA' + model.maacsupport = 'False' + model.flserversupport = 'False' + if os.path.isdir(str(model.DeployPath)): + modelPath = os.path.join(str(model.DeployPath),'etc', 'output.json') + try: + with open(modelPath) as file: + outputconfig = json.load(file) + file.close() + if outputconfig['status'] == 'SUCCESS': + model.scoringCreteria = outputconfig['data']['ScoreType'] + model.score = outputconfig['data']['BestScore'] + model.deploymodel = outputconfig['data']['BestModel'] + supportedmodels = [""Logistic Regression"", + ""Naive Bayes"",""Decision Tree"",""Support Vector Machine"",""K Nearest Neighbors"",""Gradient Boosting"",""Random Forest"",""Linear Regression"",""Lasso"",""Ridge""] + if model.deploymodel in supportedmodels: + model.maacsupport = 'True' + else: + model.maacsupport = 'False' + supportedmodels = [""Logistic Regression"",""Neural Network"",""Linear Regression""] + if model.deploymodel in supportedmodels: + model.flserversupport = 'True' + else: + model.flserversupport = 'False' + except Exception as e: + pass + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + nouc = 0 + usecase = usecasedetails.objects.all() + return render(request, 'models.html', + {'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id}) + except: + return render(request, 'models.html',{'error': 'Fail to start model service'}) +def downloadpackage(request, id,version): + return(installPackage.downloadPackage(request,id,version,usecasedet" +"ails,Existusecases)) + +def createpackagedocker(request, id,version): + try: + context = installPackage.createPackagePackage(request,id,version,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request, 'usecases.html',context) + except Exception as e: + return render(request, 'usecases.html',{'error': str(e)}) + +def publish(request, id): + print(""Inside Publish Tab"") + try: + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + usecasedetail = usecasedetails.objects.get(id=id) + usecasename = usecasedetail.UsecaseName + publish_version,publish_status,publish_drift_status =chech_publish_info(usecasename) + runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) + installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename) + models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') + + for model in models: + model.scoringCreteria = 'NA' + model.score = 'NA' + model.deploymodel = 'NA' + model.maacsupport = 'False' + model.flserversupport = 'False' + if os.path.isdir(str(model.DeployPath)): + modelPath = os.path.join(str(model.DeployPath),'etc', 'output.json') + try: + with open(modelPath) as file: + outputconfig = json.load(file) + file.close() + if outputconfig['status'] == 'SUCCESS': + model.scoringCreteria = outputconfig['data']['ScoreType'] + model.score = outputconfig['data']['BestScore'] + model.deploymodel = outputconfig['data']['BestModel'] + model.featuresused = eval(outputconfig['data']['featuresused']) + model.targetFeature = outputconfig['data']['targetFeature'] + if 'params' in outputconfig['data']: + model.modelParams = outputconfig['data']['params'] + model.modelType = outputconfig['data']['ModelType'] + model.dataPath = os.path.join(str(model.DeployPath),'data', 'postprocesseddata.csv') + supportedmodels = [""Logistic Regression"", + ""Naive Bayes"",""Decision Tree"",""Support Vector Machine"",""K Nearest Neighbors"",""Gradient Boosting"",""Random Forest"",""Linear Regression"",""Lasso"",""Ridge"",""Extreme Gradient Boosting (XGBoost)"",""Light Gradient Boosting (LightGBM)"",""Categorical Boosting (CatBoost)"",""LSTM""] + print(model.deploymodel) + if model.deploymodel in supportedmodels: + model.maacsupport = 'True' + else: + model.maacsupport = 'False' + supportedmodels = [""Logistic Regression"",""Neural Network"",""Linear Regression""] + if model.deploymodel in supportedmodels: + model.flserversupport = 'True' + else: + model.flserversupport = 'False' + supportedmodels = [""Extreme Gradient Boosting (XGBoost)""] + if model.deploymodel in supportedmodels: + model.encryptionsupport = 'True' + else: + model.encryptionsupport = 'False' + except Exception as e: + print(e) + pass + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + nouc = 0 + usecase = usecasedetails.objects.all() + print(models) + return render(request, 'models.html', + {'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,'usecasetab':usecasetab,'publish_version':publish_version,'publish_status':publish_status,'publish_drift_status':publish_drift_status, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id}) + except Exception as e: + print(e) + return render(request, 'models.html',{'error': 'Fail to publish model'}) + +def remove_version(request, id): + from appbe.pages import get_usecase_page + try: + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + if request.method == 'GET': + try: + model = Existusecases.objects.get(id=id) + usecaseid = model.ModelName.id + if os.path.isdir(str(model.DeployPath)): + import shutil + if DEPLOY_LOCATION != str(model.DeployPath): + shutil.rmtree(str(model.DeployPath)) + else: + uname = model.ModelName.usecaseid.replace("" "", ""_"") + usecaseversion = model.Version + deployLocation = os.path.join(str(model.DeployPath),uname+'_'+str(usecaseversion)) + if os.path.isdir(str(deployLocation)): + shutil.rmtree(str(deployLocation)) + model.delete() + usecasedetail = usecasedetails.objects.get(id=model.ModelName.id) + models = Existusecases.objects.filter(ModelName=usecasedetail) + if len(models) == 0: + usecasedetail.delete() + Status = 'SUCCESS' + Msg = 'Version Deleted Successfully' + except Exception as e: + print(e) + Status = 'Error' + Msg = str(e) + status, context,page = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = Status + context['Msg'] = Msg + context['version'] = AION_VERSION + return render(request, 'usecases.html',context) + except Exception as e: + print(e) + status, context,page = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = 'Error' + context['Msg'] = 'Usecase Version Deletion Error' + context['version'] = AION_VERSION + return render(request, 'usecases.html',context) + +def destroy(request, id): + from appbe.pages import get_usecase_page + try: + kafkaSetting = kafka_setting() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + if request.method == 'GET': + try: + usecasedetail = usecasedetails.objects.get(id=id) + usecasename = usecasedetail.usecaseid + models = Existusecases.objects.filter(ModelName=usecasedetail) + for model in models: + if os.path.isdir(str(model.DeployPath)): + import shutil + if DEPLOY_LOCATION != str(model.DeployPath): + shutil.rmtree(str(model.DeployPath)) + else: + uname = usecasename.replace("" "", ""_"") + usecaseversion = model.Version + deployLocation = os.path.join(str(model.DeployPath),uname+'_'+str(usecaseversion)) + if os.path.isdir(str(deployLocation)): + shutil.rmtree(str(deployLocation)) + usecasedetail.delete() + Status = 'SUCCESS' + Msg = 'Deleted Successfully' + except Exception as e: + print(e) + Status = 'Error' + Msg = str(e) + else: + usecasename = 'Not Defined' + if 'UseCaseName' in request.session: + if (usecasename == request.session['UseCaseName']): + selected_use_case = 'Not Defined' + request.session['UseCaseName'] = selected_use_case + request.session['ModelVersion'] = 0 + request.session['ModelStatus'] = 'Not Trained' + else: + selected_use_case = request.session['UseCaseName'] + else: + selected_use_case = 'Not Defined' + + status, context,page = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = Status + context['Msg'] = Msg + context['version'] = AION_VERSION + return render(request, 'usecases.html',context) + except: + status, context,page = get_usecase_page(request,usecasedetails,Existusecases) + context['Status'] = 'Error' + context['Msg'] = 'Usecase Deletion Error' + context['version'] = AION_VERSION + return render(request, 'usecases.html',context) + + +def update(request, id): + try: + lab = get_object_or_404(usecasedetails, id=id) + if request.method == 'POST': + form = usecasedetailsForm(request.POST, instance=lab) + request.session['usecaseid'] = form['id'] + # print(request.session['usecaseid']) + if form.is_valid(): + form.save() + return redirect('/show') + else: + form = usecasedetailsForm(instance=lab) + request.session['usecaseid'] = form['id'] + # print(request.session['usecaseid']) + return render(request, 'edit.html', {'form': form, 'selected': 'usecase'}) + except: + return render(request, 'edit.html', {'error': 'Error in updating usecase', 'selected': 'usecase'}) + + +def newfile(request): + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + try: + model = Existusecases.objects.get(ModelName=request.session['ModelName'], Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"") + training_output = f.read() + f.close() + training_output = json.loads(training_output) + dataFile = request.POST.get('localfilePath') + if(os.path.isfile(dataFile) == False): + context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + return render(request, 'outputdrif.html', context) + df = pd.read_csv(dataFile) + request.session['drift_datalocations'] = dataFile + request.session['Features_dr'] = df.columns.values.tolist() + Featrs = request.session['Features_dr'] + statusmsg = 'Data File Uploaded Successfully' + + context = {'tab': 'tabconfigure', 'status_msg': statusmsg, + 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, + 'selected': 'monitoring', 'z': Featrs} + context['version'] = AION_VERSION + return render(request, 'outputdrif.html', context) + except Exception as Isnt: + context = {'error': 'Error during output drift.'+str(Isnt), 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + context['version'] = AION_VERSION + return render(request, 'outputdrif.html', context) + +def summarization(request): + context = {'selected':'DataOperations','usecasetab':usecasetab} + context['version'] = AION_VERSION + return render(request, ""summarization.html"",context) + + +# ------------------ Debiasing Changes ------------------ +def getdataclasses(request): + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r+"", encoding=""utf-8"") + configSettingsData = f.read() + configSettingsJson = json.loads(configSettingsData) + + df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8') + classeslist = [] + selectedFeature = request.GET.get('features') + classeslist = df[selectedFeature].unique().tolist() + _list = [] + for item in classeslist: + _list.append("""") + + return HttpResponse(_list) +# ------------------ ------------------ + +def ucdetails(request, id): + from appbe.pages import usecases_page + checkModelUnderTraining(request, usecasedetails, Existusecases) + request.session['IsRetraining'] = 'No' + + status, context, action = usecases_page(request, usecasedetails, Existusecases, id) + + context['version'] = AION_VERSION + + return render(request, 'usecasedetails.html', context) + +def dataoperations(request): + context = {'selected':'DataOperations','usecasetab':usecasetab} + context['version'] = AION_VERSION + return render(request, ""dataoperations.html"",context) +# @login_required(login_url=""/login/"") +def datalabel(request): + context = {'selected':'DataOperations','usecasetab':usecasetab} + context['version'] = AION_VERSION + return render(request, ""label_dataset_ver2.html"",context) + + +# @login_required(login_url=""/login/"") +def pages(request): + context = {} + # All resource paths end in .html. + # Pick out the html file name from the url. And load that template. + try: + load_template = request.path.split('/')[-1] + html_template = loader.get_template(load_template) + return HttpResponse(html_template.render(context, request)) + + except template.TemplateDoesNotExist: + html_template = loader.get_template('page-404.html') + return HttpResponse(html_template.render(context, request)) + + except: + html_template = loader.get_template('page-500.html') + return HttpResponse(html_template.render(context, request)) + +def delimitedsetting(delimiter='',textqualifier='',other=''): + if delimiter != '': + if delimiter.lower() == 'tab' or delimiter.lower() == '\\t': + delimiter = '\\t' + elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';': + delimiter = ';' + elif delimiter.lower() == 'comma' or delimiter.lower() == ',': + delimiter = ',' + elif delimiter.lower() == 'space' or delimiter.lower() == ' ': + delimiter = ' ' + elif delimiter.lower() == 'other' or other.lower() != '': + if other != '': + delimiter = other + else: + delimiter = ',' + elif delimiter != '': + delimiter = delimiter + else: + delimiter = ',' + else: + delimiter = ',' + if textqualifier == '': + textqualifier = '""' + return delimiter,textqual" +"ifier + + +@csrf_exempt +def upload_and_read_file_data(request): + file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file']) + file_delim = request.POST.get(""file_delim"") + textqualifier = request.POST.get(""qualifier"") + delimiters = request.POST.get(""delimiters"") + delimiter,textqualifier = delimitedsetting(request.POST.get('file_delim'),request.POST.get('qualifier'),request.POST.get('delimiters_custom_value')) + size_take = 100 + + if file_ext in [""csv"", ""tsv""]: + num_records = sum(1 for line in open(file_path)) - 1 + num_rows = num_records + if num_records > size_take: + skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take)) + else: + skip = 0 + # with open(file_path, 'r') as file: + # data = file.readline(10) + # from detect_delimiter import detect + # row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t', ' ']) + # if file_delim == ""custom"" and request.POST[""custom_delim""] != """": + # row_delimiter = request.POST[""custom_delim""] + # print('row_delimiter',row_delimiter) + file_content = pd.read_csv(file_path, sep=delimiter,quotechar=textqualifier, engine='python',skiprows=skip,encoding='utf-8-sig',skipinitialspace = True) + elif file_path.endswith("".json""): + file_content_df = pd.read_json(file_path) + file_content = pd.json_normalize(file_content_df.to_dict(""records"")) + num_rows = len(file_content) + elif file_path.endswith("".avro""): + import pandavro as pdx + from avro.datafile import DataFileReader + from avro.io import DatumReader + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + file_content = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + num_rows = len(file_content) + elif file_path.endswith("".parquet""): + from pyarrow.parquet import ParquetFile + import pyarrow as pa + import pyarrow.parquet as pq + pf = ParquetFile(file_path) + take_rows = next(pf.iter_batches(batch_size=size_take)) + file_content = pa.Table.from_batches([take_rows]).to_pandas() + table = pq.read_table(file_path, columns=[]) + num_rows = table.num_rows + # file_content = pd.read_parquet(file_path, engine=""pyarrow"") + else: + raise ValueError(""Invalid file format"") + response = {} + column_list = [] + for key, val in dict(file_content.dtypes).items(): + if str(val) == 'object': + try: + pd.to_datetime(file_content[str(key)]) + column_list.append({""column_name"": str(key), 'data_type': 'datetime64'}) + except ValueError: + column_list.append({""column_name"": str(key), 'data_type': 'string'}) + pass + else: + column_list.append({""column_name"": str(key), 'data_type': str(val)}) + response[""column_list""] = column_list + response[""data_html""] = file_content.to_html(classes='table table-striped table-bordered table-hover dataTable no-footer', justify='left', index=False) + response[""record_count""] = num_rows + response[""file_ext""] = file_ext + return HttpResponse(json.dumps(response), content_type=""application/json"") + + +@csrf_exempt +def handle_uploaded_file(path, file, test_dataset=False): + print('path',path) + if test_dataset: + filename = os.path.join(path,""test_data_file."" + file.name.split('.')[1]) + with open(filename, 'wb+') as destination: + for chunk in file.chunks(): + destination.write(chunk) + return filename, file.name.split('.')[1] + else: + filename = os.path.join(path,""uploaded_file."" + file.name.split('.')[1]) + with open(filename, 'wb+') as destination: + for chunk in file.chunks(): + destination.write(chunk) + return filename, file.name.split('.')[1] + + +@csrf_exempt +def apply_rule(request): + from appbe import labelling_utils as utils + rule_list = json.loads(request.POST['rule_list']) + file_ext = request.POST.get(""file_ext"") + label_list = json.loads(request.POST['label_list']) + not_satisfy_label = request.POST.get(""non_satisfied_label"") + response = utils.label_dataset(rule_list, file_ext, label_list, not_satisfy_label) + return HttpResponse(json.dumps(response), content_type=""application/json"") + + +@csrf_exempt +def get_sample_result_of_individual_rule(request): + from appbe import labelling_utils as utils + rule_json = json.loads(request.POST['rule_json']) + file_ext = request.POST.get(""file_ext"") + label_list = json.loads(request.POST['label_list']) + not_satisfy_label = request.POST.get(""non_satisfied_label"") + print(""rule_json>>>"", rule_json) + print(""file_ext>>>"", file_ext) + print(""label_list>>>>"", label_list) + print(""not_satisfied_label"", not_satisfy_label) + response = utils.get_sample_result_of_individual_rule(rule_json, file_ext, label_list, not_satisfy_label) + return HttpResponse(json.dumps(response), content_type=""application/json"") + + +def download_result_dataset(request): + #file_name = request.GET.get(""filename"") + file_name = request.session['AION_labelled_Dataset'] + file_path = os.path.join(DATA_FILE_PATH, file_name) + is_exist = os.path.exists(file_path) + if is_exist: + with open(file_path, ""rb"") as file: + response = HttpResponse(file, content_type=""application/force-download"") + response[""Content-Disposition""] = ""attachment; filename=%s"" % file_name + return response + else: + return HttpResponse(json.dumps(""file not found""), content_type=""application/error"") + + +@csrf_exempt +def get_sample_result_of_individual_rule_ver2(request): + from appbe import labelling_utils as utils + rule_json = json.loads(request.POST['rule_json']) + file_ext = request.POST.get(""file_ext"") + label_list = json.loads(request.POST['label_list']) + not_satisfy_label = request.POST.get(""non_satisfied_label"") + response = utils.get_sample_result_of_individual_rule_ver2(rule_json, file_ext, label_list, not_satisfy_label) + return HttpResponse(json.dumps(response), content_type=""application/json"") + + +def get_label_list(label_json): + label_list = [] + label_weightage = [] + for item in label_json: + label_list.append(item[""label_name""]) + if item[""label_weightage""] != """": + weightage_perc = float(item[""label_weightage""]) / 100 + label_weightage.append(np.around(weightage_perc, 2)) + else: + label_weightage.append(100 / len(label_json)) + return label_list, label_weightage + + +@csrf_exempt +def apply_rule_ver2(request): + from appbe import labelling_utils as utils + rule_list = json.loads(request.POST['rule_list']) + file_ext = request.POST.get(""file_ext"") + label_json = json.loads(request.POST['label_list']) + label_list, label_weightage = get_label_list(label_json) + not_satisfy_label = request.POST.get(""non_satisfied_label"") + include_proba = request.POST.get(""is_include_proba"") == 'true' + response = utils.label_dataset_ver2(request,rule_list, file_ext, label_list, not_satisfy_label, label_weightage, + include_proba) + return HttpResponse(json.dumps(response), content_type=""application/json"") + + +@csrf_exempt +def upload_and_read_test_data(request): + file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'], test_dataset=True) + + # file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file']) + file_delim_test = request.POST.get(""file_delim_test"") + size_take = 100 + if file_ext in [""csv"", ""tsv""]: + num_records = sum(1 for line in open(file_path)) - 1 + num_rows = num_records + if num_records > size_take: + skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take)) + else: + skip = 0 + with open(file_path, 'r') as file: + data = file.readline(10) + from detect_delimiter import detect + row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t', ' ']) + if file_delim_test == ""custom"" and request.POST[""custom_test_delim""] != """": + row_delimiter = request.POST[""custom_test_delim""] + file_content = pd.read_csv(file_path, sep=row_delimiter, quotechar=""'"", escapechar=""/"", engine='python',skiprows=skip,encoding='utf-8-sig',skipinitialspace = True) + elif file_path.endswith("".json""): + file_content_df = pd.read_json(file_path) + file_content = pd.json_normalize(file_content_df.to_dict(""records"")) + num_rows = len(file_content) + elif file_path.endswith("".avro""): + import pandavro as pdx + from avro.datafile import DataFileReader + from avro.io import DatumReader + reader = DataFileReader(open(file_path, ""rb""), DatumReader()) + schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) + file_content = pdx.read_avro(file_path, schema=schema, na_dtypes=True) + num_rows = len(file_content) + elif file_path.endswith("".parquet""): + from pyarrow.parquet import ParquetFile + import pyarrow as pa + import pyarrow.parquet as pq + pf = ParquetFile(file_path) + take_rows = next(pf.iter_batches(batch_size=size_take)) + file_content = pa.Table.from_batches([take_rows]).to_pandas() + table = pq.read_table(file_path, columns=[]) + num_rows = table.num_rows + # file_content = pd.read_parquet(file_path, engine=""pyarrow"") + else: + raise ValueError(""Invalid file format"") + response = {} + column_list = [] + for key, val in dict(file_content.dtypes).items(): + if str(val) == 'object': + try: + pd.to_datetime(file_content[str(key)]) + column_list.append({""column_name"": str(key), 'data_type': 'datetime64'}) + except ValueError: + column_list.append({""column_name"": str(key), 'data_type': 'string'}) + pass + else: + column_list.append({""column_name"": str(key), 'data_type': str(val)}) + response[""column_list""] = column_list + response[""data_html""] = file_content.to_html(classes='table table-striped text-left',table_id='testdata', justify='left', index=False) + response[""record_count""] = num_rows + response[""file_ext""] = file_ext + response[""file_delim_test""] = file_delim_test + response[""custom_test_delim""] = request.POST[""custom_test_delim""] + + return HttpResponse(json.dumps(response), content_type=""application/json"") + + +@csrf_exempt +def get_label_and_weightage(request): + from appbe import labelling_utils as utils + test_file_ext = request.POST.get(""test_file_ext"") + file_delim_test = request.POST.get(""file_delim_test"") + marked_label_column = request.POST.get(""marked_label_column"") + custom_test_delim = request.POST.get(""custom_test_delim"") + label_list_with_weightage = utils.get_label_and_weightage(test_file_ext, marked_label_column, file_delim_test, custom_test_delim) + return HttpResponse(json.dumps(label_list_with_weightage), content_type=""application/json"") + + + +def modelcompare(request): + + deploypath = request.GET.get('DeployLocation') + + filepath = os.path.join(deploypath,'etc','output.json') + with open(filepath) as file: + config = json.load(file) + file.close() + # training/testing data needs to be updated as below once it is available in deployment folder + #trainingDataPath = os.path.join(deploypath,'data','trainData.csv') + #testingDataPath = os.path.join(deploypath,'data','testData.csv') + + trainingDataPath = os.path.join(deploypath,'data','postprocesseddata.csv.gz') + testingDataPath = os.path.join(deploypath,'data','postprocesseddata.csv.gz') + + featureUsedInTraining=config['data']['featuresused'] + targetFeature= config['data']['targetFeature'] + scoringCriteria=config['data']['ScoreType'] + scoringCriteria=scoringCriteria.lower() + problemType=config['data']['ModelType'] + problemType=problemType.lower() + + tempFeatureUsedInTraining = featureUsedInTraining.split(',') + finalFeatures=[] + for i in range (len(tempFeatureUsedInTraining)) : + tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace('[', '') + tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace(']', '') + tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[" +"i].replace(""'"", '') + tempFeatureUsedInTraining[i] = tempFeatureUsedInTraining[i].lstrip() + tempFeatureUsedInTraining[i] = tempFeatureUsedInTraining[i].rstrip() + finalFeatures.append(tempFeatureUsedInTraining[i]) + featureUsedInTraining = finalFeatures + + #print(""trainingDataPath----"",trainingDataPath) + #print(""testingDataPath----"",testingDataPath) + #print(""problemType----"",problemType) + #print(""scoringCriteria----"",scoringCriteria) + #print(""featureUsedInTraining----"",featureUsedInTraining,type(featureUsedInTraining)) + #print(""targetFeature----"",targetFeature) + + + if problemType == 'classification': + + try: + df1 = pd.read_csv(trainingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip') + df2 = pd.read_csv(testingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip') + trainX=df1[featureUsedInTraining] + trainY=df1[targetFeature] + testX=df2[featureUsedInTraining] + testY=df2[targetFeature].to_numpy() + from sklearn import linear_model + estimator = linear_model.LogisticRegression() + estimator.fit(trainX, trainY) + predictedData = estimator.predict(testX) + from learner.aion_matrix import aion_matrix + scoring = aion_matrix() + score = scoring.get_score(scoringCriteria, testY, predictedData) + context = {'Model': 'Logistic regression','Testing Score': score, 'Confidence Score': ""Not supported"", 'Feature Engineering Method': ""ModelBased""} + return HttpResponse(json.dumps(context), content_type=""application/json"") + except Exception as e: + print(""exception ""+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + context = {'Model': 'Logistic regression','Testing Score': ""Exception Occured"", 'Confidence Score': ""Not supported"", 'Feature Engineering Method': ""ModelBased""} + return HttpResponse(json.dumps(context), content_type=""application/json"") + + if problemType == 'regression': + + try: + df1 = pd.read_csv(trainingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip') + df2 = pd.read_csv(testingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip') + trainX=df1[featureUsedInTraining] + trainY=df1[targetFeature] + testX=df2[featureUsedInTraining] + testY=df2[targetFeature].to_numpy() + from sklearn import linear_model + estimator = linear_model.LinearRegression() + estimator.fit(trainX, trainY) + predictedData = estimator.predict(testX) + from learner.aion_matrix import aion_matrix + scoring = aion_matrix() + score = scoring.get_score(scoringCriteria, testY, predictedData) + context = {'Model': 'Linear regression','Testing Score': score, 'Confidence Score': ""Not supported"", 'Feature Engineering Method': ""ModelBased""} + return HttpResponse(json.dumps(context), content_type=""application/json"") + + except Exception as e: + print(""exception"") + context = {'Model': 'Linear regression','Testing Score': ""Exception Occured"", 'Confidence Score': ""Not supported"", 'Feature Engineering Method': ""ModelBased""} + return HttpResponse(json.dumps(context), content_type=""application/json"") + + +def textsummarization(request): + return render(request, ""textsummarization.html"",context={'version':AION_VERSION,'selected': 'textsummarization'}) + + +# LLM Testing Task ID 14533 +def validate_llm(prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample): + default = {'temperature':{'default':0.9,'lower':0.0,'upper':1.0},'similarity_threshold':{'default':0.75,'lower':0.0,'upper':1.0},'perturbations_per_sample':5} + + if not isinstance( prompts, (list,str)): + raise ValueError(f""Prompt should be of type str, got '{prompt}' of type {type(prompt)}"") + elif prompts == '': + raise ValueError(""Prompt field can not be empty"") + if not isinstance( reference_generation, str): + raise ValueError(f""Reference Generated Answer should be of type str, got '{reference_generation}' of type {type(reference_generation)}"") + # elif reference_generation == '': + # raise ValueError(""Reference Generation field can not be empty"") + if not isinstance( temperature, float) or temperature < default['temperature']['lower'] or temperature > default['temperature']['upper']: + if isinstance( temperature, str) and temperature == '': + temperature = default['temperature']['default'] + else: + raise ValueError(f""Model Parameter Temperature should be of type float with range {default['temperature']['lower']} - {default['temperature']['upper']}, got {temperature} of type {type(temperature)}"") + if not isinstance( similarity_threshold, float) or similarity_threshold < default['similarity_threshold']['lower'] or similarity_threshold > default['similarity_threshold']['upper']: + if isinstance( similarity_threshold, str) and similarity_threshold == '': + similarity_threshold = default['similarity_threshold']['default'] + else: + raise ValueError(f""Similarity Threshold should be of type float with range {default['similarity_threshold']['lower']} - {default['similarity_threshold']['upper']}, got {similarity_threshold} of type {type(similarity_threshold)}"") + if not isinstance( perturbations_per_sample, int): + if isinstance( perturbations_per_sample, str) and perturbations_per_sample == '': + perturbations_per_sample = default['perturbations_per_sample'] + else: + raise ValueError(f""Perturbations Per Sample should be of type integer, got {perturbations_per_sample} of type {type(perturbations_per_sample)}"") + return prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample + +def llmtesting(request): + ftmodels = [] + usecase = usecasedetails.objects.all().order_by('-id') + for x in usecase: + #print(x.id) + models = Existusecases.objects.filter(Status='SUCCESS',ModelName=x.id).order_by('-id') + if len(models) > 0: + for model in models: + #print(str(model.ConfigPath)) + version = model.Version + if os.path.isdir(str(model.DeployPath)): + modelPath = os.path.join(str(model.DeployPath),'etc','output.json') + with open(modelPath) as file: + outputconfig = json.load(file) + problemType = outputconfig['data']['ModelType'] + if problemType.lower() == 'llm fine-tuning': + from appbe.models import get_instance + hypervisor,instanceid,region,image,status = get_instance(x.usecaseid+ '_' + str(version)) + with open(str(model.ConfigPath)) as file: + configSettingsJson = json.load(file) + file.close() + from appbe.pages import getMLModels + problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson) + ft = mlmodels+'-'+smodelsize+'-'+x.usecaseid+'_'+str(version) + finetunedModel = {} + finetunedModel['ft']=ft + finetunedModel['basemodel'] = mlmodels+'-'+smodelsize + finetunedModel['usecaseid'] = x.usecaseid+'_'+str(version) + ftmodels.append(finetunedModel) + return render(request, ""llmtesting.html"",context={'version':AION_VERSION,'selected': 'llmtesting','ftmodels':ftmodels}) + +# LLM Testing Result Task ID 14533 +def llmtestingresult(request): + try: + + context = {'result':result,'provider':provider,'tabledata':tabledata,'summary':summary,'modelName':modelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompt,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'version':AION_VERSION,'selected': 'llmtestingresults','success':'success'} + return render(request, ""llmtestingresults.html"",context) + except Exception as e: + print(e) + context = {'error': 'Fail to Generate LLM Testing Report '+str(e),'version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'} + return render(request, ""llmtestingresults.html"",context) + +# LLM Testing Result Task ID 14533 +def llmtestingresult(request): + + + try: + + generate_test = request.POST['prompt_temp'] + if generate_test == ""generatetest"": + UseCaseName = request.POST['selectusecase'] + ModelName = request.POST['selectmodel'] + temperature = request.POST['modelparam'] + similarity_threshold = request.POST['similarity_threshold'] + perturbations_per_sample = request.POST['perturbations_per_sample'] + selecttype = request.POST['selectquestion'] + reference_generation = (request.POST['reference_generation']) + baseModel = request.POST['basemodel'] + + from appbe.llmTesting import test_LLM + if selecttype == ""Single"": + prompts = request.POST['prompt'] + + + else: + data_file = request.POST['dataFilePath']#Task 16794 + file_name = os.path.splitext(data_file)[0] + file_extension = os.path.splitext(data_file)[-1].lower() + if file_extension != "".csv"": + questions = [] + answers = [] + if file_extension == "".pdf"": + with pdfplumber.open(data_file) as pdf: + for page in pdf.pages: + text = page.extract_text() + lines = text.split(""\\n"") + + current_question = """" + current_answer = """" + reading_question = False + + for line in lines: + line = line.strip() + + if line.endswith(""?""): + if reading_question: + questions.append(current_question) + answers.append(current_answer) + current_question = """" + current_answer = """" + + current_question = line + reading_question = True + + elif reading_question: + current_answer += "" "" + line + if reading_question: + questions.append(current_question) + answers.append(current_answer) + + elif file_extension == "".docx"": + doc = Document(data_file) + + current_question = """" + current_answer = """" + reading_question = False + + for paragraph in doc.paragraphs: + text = paragraph.text.strip() + + if text.endswith(""?""): + if reading_question: + questions.append(current_question) + answers.append(current_answer) + current_question = """" + current_answer = """" + + current_question = text + reading_question = True + elif reading_question: + current_answer += "" ""+ text + + if reading_question: + questions.append(current_question) + answers.append(current_answer) + + else: + print(""unsupported file format. please provide a pdf or docx file."") + + faq = pd.DataFrame({'Question':questions, 'Answers':answers}) + # print(faq) + data_file_csv = file_name+"".csv"" + faq.to_csv(data_file_csv, index=False, encoding='utf-8') + + + else: + faq = pd.read_csv(data_file,encoding='cp1252') + rows = faq.shape[0] + prompts = list(faq['Question']) + try: + temperature = float( temperature) + similarity_threshold = float(similarity_threshold) + perturbations_per_sample = int( perturbations_per_sample) + except: + pass + + + + prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample = validate_llm(prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample) + from appbe.aion_config import get_llm_data + llm_key,llm_url,api_type,api_version=get_llm_data() + urls = { + 'OPENAI_API_BASE' : llm_url, + 'OPENAI_API_KEY' : llm_key, + 'OPENAI_API_TYPE' :api_type, + 'OPENAI_API_VERSION':api_version + } + llm_obj = test_LLM() + llm_obj.set_params(urls) + + if selecttype == ""Single"": + print(UseCaseName,ModelName) + if ModelName.lower() == 'basemodel': + result = llm_obj.run_offline_model( UseCaseName,baseModel,temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,False ) + llmModelName = baseModel + else: + result = llm_obj.run_offline_model( UseCaseName,ModelName,temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,True ) + llmModelName = ModelName+'-'+UseCaseName + print(result) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'llmreport_' + filetimestamp+'.html') + result = result.split(""LLMTestingResultOutput:"")[-1] + + output = json.loads(result) + + with open(dataFile,'w') as htmlfile: + htmlfile.write(output['data']['html_file']) + request.session['llmtestreport'] = str(dataFile) + # provider = result.generation_kwargs['Provider'] + provider = """" + # metric_name = list(result.metric[0].keys())[0] + metric_name = output['data']['metric_name'] + # metric_values = output['data']['metric_values'] + metric_values = eval(output['data']['metric_values']) + passed_tests = output['data']['passed_tests'] + total_tests = output['data']['total_tests'] + summary = f'{passed_tests}/{total_tests}' + tabledata = {} + prompts = output['data']['prompts'] + generations= output['data']['generations'] + Generations = [] + for sub in generations: + Generations.append(sub.replace(""\\" +"n"", """")) + metricvalues = metric_values + text = [eval(x) for x in generations] + gen = [x[0]['generated_text'].split('\\n')[1:] for x in text] + Generations = [' '.join(x) for x in gen] + resultoutput = eval(output['data']['resultoutput'])[0] + for index,val in enumerate(Generations): + Generations[index]= Generations[index].strip() + if len(Generations[index])<=2: + metricvalues[index] = 0 + resultoutput[index] = 0 + + tabledata = zip(prompts,Generations,metricvalues,resultoutput) + + context = {'result':result,'provider':provider,'tabledata':tabledata,'summary':summary,'modelName':llmModelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompts,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'single':'single','version':AION_VERSION,'selected': 'llmtestingresults','success':'success'} + # context = {'result':result,'provider':""provider"",'tabledata':""tabledata"",'summary':""summary"",'modelName':modelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompts,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'single':'single','version':AION_VERSION,'selected': 'llmtestingresults','success':'success'} + else: + if ModelName.lower() == 'basemodel': + result_str =llm_obj.run_multiple_offline_model(UseCaseName,baseModel,temperature, similarity_threshold, perturbations_per_sample,faq,False) + llmModelName = baseModel + else: + result_str =llm_obj.run_multiple_offline_model(UseCaseName,ModelName,temperature, similarity_threshold, perturbations_per_sample,faq,True) + llmModelName = ModelName+'-'+UseCaseName + result_str = result_str.split(""LLMTestingResultOutput:"")[-1] + output = json.loads(result_str) + # result will be df converted from output['data'] + result = pd.DataFrame(json.loads(output['data'])) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'llmreport_' + filetimestamp+'.csv') + request.session['llmtestreport'] = str(dataFile) + result.rename(columns={'Perturbed Prompts':'PerturbedPrompts','Similarity [Generations]':'Similarity'},inplace=True) + result_df = result.head(5) + result.to_csv(dataFile, index=False) + + + context={'result':result_df,'modelName':llmModelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'perturbations_per_sample':perturbations_per_sample,'selected': 'llmtestingresults','multiple':'multiple','success':'success'} + + return render(request, ""llmtestingresults.html"",context) + if generate_test == ""download_prompt"": + + csvdata= os.path.join(DEFAULT_FILE_PATH,""Prompt_template.csv"") + if os.path.isfile(csvdata) and os.path.exists(csvdata): + df = pd.read_csv(csvdata,encoding='utf8') + downloadFileName = 'llmreport.csv' + response = HttpResponse(content_type='text/csv') + response['Content-Disposition'] = 'attachment; filename='+downloadFileName + df.to_csv(response, index=False) + return response + else: + context = {'error': 'Fail to Download File','version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'} + return render(request, ""llmtestingresults.html"",context) + except Exception as e: + print(e) + errormsg = str(e) + if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg: + errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.' + + if 'Max retries exceeded with url' in errormsg: + errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.' + context = {'error':errormsg,'version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'} + return render(request, ""llmtestingresults.html"",context) + +def llmtestreport(request): + file_path = request.session['llmtestreport'] + # file_path = ""C:\\AION\\To_Kiran\\To_Kiran\\codeCloneReport\\code_clone_report.txt"" + report_path = os.path.join(file_path) + if os.path.exists(report_path): + with open(report_path, 'rb') as fh: + response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(report_path) + return response + else: + return render(request, ""llmtestingresults.html"",context={""error"":""Fail To Download File"",'version':AION_VERSION,'result':'result','selected': 'llmtestingresults'}) + +### To display libraries in UI #### +def libraries(request): + current_dir = os.path.dirname(os.path.abspath(__file__)) + file_path = os.path.normpath(os.path.join(current_dir,'..','..','lic',""requirement.csv"")) + library_data = [] + with open(file_path, 'r') as file: + csv_reader = csv.DictReader(file) + for row in csv_reader: + library_info = { + ""library"" :row[""Library""] if row.get(""Library"") else ""none"", + ""version"" :row[""Version""] if row.get(""Version"") else ""none"", + ""license"" :row[""License""] if row.get(""License"") else ""none"" + } + library_data.append(library_info) + # print(library_data) + return render(request, ""libraries.html"", context={""data"":library_data,'version':AION_VERSION,'selected': 'libraries'}) + +# For Code Clone Detection + + +def codeclonedetectionresult(request): + from appbe.codeclonedetection import CodeCloneDetectionFiles + + try: + codecloneconfig = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','code_clone_config.json') + f = open(codecloneconfig, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + rootdir = request.POST.get('rootdirectory') + ccdmode = request.POST.get('ccdmode') + + if(os.path.isdir(rootdir)): + llm_key,llm_url,api_type,api_version = get_llm_data() + openai_apiKey = llm_key + openai_baseUrl = llm_url + try: + openai_apiType = api_type + openai_apiVersion = api_version + except: + openai_apiType = configSettings['openaiApiType'] + openai_apiVersion = configSettings['openaiApiVersion'] + openai_embeddingEngine = configSettings['codeCloneDetection']['openaiEmbeddingEngine'] + openai_embeddingModel = configSettings['codeCloneDetection']['openaiEmbeddingModel'] + openai_chatModel = configSettings['codeCloneDetection']['openaiChatModel'] + openai_deploymentId = configSettings['codeCloneDetection']['openaiDeploymentId'] + rootDirFilesType = configSettings['codeCloneDetection']['rootDirFilesType'] + else: + return render(request, ""codeclone.html"",context={""codeclonedetectionerror"":""Please provide valid root directory file path."",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'}) + filetimestamp = str(int(time.time())) + + config_json_filename = os.path.join(CONFIG_FILE_PATH, 'code_clone_config_' + filetimestamp + '.json') + updatedConfigSettings = json.dumps(configSettings) + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(updatedConfigSettings) + fpWrite.close() + + from appbe.dataPath import DEPLOY_LOCATION + codeclonedir_path = os.path.join(DEPLOY_LOCATION,('codeCloneDetection_'+filetimestamp)) + os.makedirs(codeclonedir_path,exist_ok=True) + request.session['clonereport'] = str(codeclonedir_path) + + try: + + if (rootDirFilesType.lower() == ""python"" and ccdmode.lower() == ""openai""): + cdobj = CodeCloneDetectionFiles(rootdir,openai_baseUrl, openai_apiKey,openai_apiType,openai_apiVersion,codeclonedir_path,openai_embeddingEngine,openai_embeddingModel,openai_chatModel,openai_deploymentId) + + report_str,report_dict,report_df,report_json = cdobj.getCloneReport() + clonetype = report_dict['Code_clones_count_by_clone_type'].to_dict() + for i in clonetype: + clonevalues = clonetype[i].values() + clonekeys = clonetype[i].keys() + clonetype = zip(clonekeys,clonevalues) + return render(request, ""codeclonedetectionresult.html"",context={'report_json':json.loads(report_json),'report_dict':report_dict,'clonetype':clonetype,'clonefunctions':report_dict['clone_functions'],'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult','openai':'openai'}) + ## Pls uncomment below code if you need to use sklearn based code clone detection. + # elif (ccdmode.lower() ==""sklearn""): + # from appbe.codeclonedetection_sklearn import codeCloneDetectionSklearn + # chunk_size = 10000 + # cdobj = codeCloneDetectionSklearn(rootdir,codeclonedir_path,chunk_size) + # report_dict = cdobj.get_clone() + + + # return render(request, ""codeclonedetectionresult.html"",context={'report_dict':report_dict,'function_df':report_dict['result_df'],'function_dict':report_dict['result_df'].to_dict(),'sklearn':'sklearn'}) + else: + raise Exception (""Invalid clonedetection input."") + return render(request, ""codeclone.html"",context={""codeclonedetectionerror"":""Python Files Are Only Supported.""}) + except Exception as e: + return render(request, ""codeclone.html"",context={""codeclonedetectionerror"":""OpenAI Model Connection Error"",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'}) + + except Exception as e: + print('code clone detection interface issue.Error message: ',e) + return render(request, ""codeclone.html"",context={""codeclonedetectionerror"":""OpenAI Model Connection Error"",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'}) + +def codeclonereport(request): + file_path = request.session['clonereport'] + report_path = os.path.join(file_path, 'codeCloneReport','code_clone_report.txt') + if os.path.exists(report_path): + with open(report_path, 'rb') as fh: + response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(report_path) + return response + else: + return render(request, ""codeclonedetectionresult.html"",context={""codeclonedetectionerror"":""Fail To Download File"",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'}) + +def evaluatepromptmetrics(request): + """""" Evaluate prompt only information for LLM Evaluation."""""" + import whylogs as why + from langkit import light_metrics + from whylogs.experimental.core.udf_schema import udf_schema + from whylogs.experimental.core.udf_schema import register_dataset_udf + from langkit import lang_config, response_column + import json + + prompt_msg = request.GET.get('instruction') + text_schema = udf_schema() + llm_schema = light_metrics.init() + df = pd.DataFrame({ + ""prompt"": [ + prompt_msg + ]}) + results = why.log(df, schema=udf_schema()) # .profile() + view = results.view() + + # import pdb + # pdb.set_trace() + + from appbe.evaluate_prompt import evaluate_prompt_metrics + final_output_json,prompt_results = evaluate_prompt_metrics(prompt_msg) + prompt_results_json = json.dumps(prompt_results, indent=4) + # return prompt_results_json,prompt_results + return HttpResponse(final_output_json) + from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from appbe.pages import getversion +AION_VERSION = getversion() + +def datagenrate(request): + from appbe.aion_config import settings + usecasetab = settings() + context = {'selected':'DataOperations','usecasetab':usecasetab} + context['version'] = AION_VERSION + return render(request, ""datagenrate.html"",context) + +def generateconfig(request): + from appbe import generate_json_config as gjc + try: + gjc.generate_json_config(request) + return render(request, ""datagenrate.html"",context={'success':'success','selected':'DataOperations'}) + except Exception as e: + print(e) + return render(request, ""datagenrate.html"",context={'error':str(e),'selected':'DataOperations'}) from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +from appbe.pages import getusercasestatus +from appbe.pages import getversion +AION_VERSION = getversion() +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +from appbe.aion_config import getrunningstatus +import time +def computetoGCPLLaMA13B(request): + from appbe import compute + from appbe.pages import get_usecase_page + try: + compute.updateToComputeSettings('GCP') + time.sleep(2) + request.session['IsRetraining'] = 'No' + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request,action,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION}) + +def computetoLLaMMA7b" +"(request): + from appbe import compute + from appbe.pages import get_usecase_page + try: + compute.updateToComputeSettings('AWS') + time.sleep(2) + #print(1) + request.session['IsRetraining'] = 'No' + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request,action,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION}) + +def computetoAWS(request): + from appbe import compute + from appbe.pages import get_usecase_page + try: + compute.updateToComputeSettings('AWS') + time.sleep(2) + #print(1) + request.session['IsRetraining'] = 'No' + status,context,action = get_usecase_page(request,usecasedetails,Existusecases) + context['version'] = AION_VERSION + return render(request,action,context) + except Exception as e: + print(e) + return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION}) +def setting_context(request): + from appbe.aion_config import get_graviton_data + from appbe.aion_config import get_edafeatures + from appbe.aion_config import get_telemetryoptout + from appbe.aion_config import get_llm_data + from appbe.aion_config import running_setting + from appbe import compute + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + from appbe.aion_config import settings + usecasetab = settings() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + graviton_url, graviton_userid = get_graviton_data() + No_of_Permissible_Features_EDA = get_edafeatures() + telemetryoptout = get_telemetryoptout() + llm_key,llm_url,api_type,api_version =get_llm_data() + ruuningSetting = running_setting() + computeinfrastructure = compute.readComputeConfig() + try: + context = {'computeinfrastructure':computeinfrastructure,'graviton_url':graviton_url,'graviton_userid':graviton_userid,'FeaturesEDA':No_of_Permissible_Features_EDA,'llm_key':llm_key,'llm_url':llm_url,'ruuningSetting':ruuningSetting,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'api_type':api_type,'api_version':api_version,'telemetryoptout':telemetryoptout, + 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion':ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage()} + context['version'] = AION_VERSION + return context + except Exception as e: + print(e) + context = {'computeinfrastructure':computeinfrastructure,'error':'Error in Settings'} + context['version'] = AION_VERSION + return context + +def startKafka(request): + try: + nooftasks = getrunningstatus('AION_Consumer') + if len(nooftasks): + status = 'AION Kafka Consumer Already Running' + else: + import subprocess + kafkapath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','sbin','AION_Consumer.bat')) + #subprocess.Popen(kafkapath, shell=True) + os.system('start cmd /c ""'+kafkapath+'""') + #addKafkaModel(request,request.session['datalocation']) + status = 'Kafka Consumer Initiated Successfully' + context = settings(request) + context['status'] = status + return render(request, 'settings_page.html', context) + except: + return render(request, 'settings_page.html', {'error':'Fail to start Kafka'}) + +def startPublishServices(request): + from appbe.models import startServices + startServices(request,usecasedetails,Existusecases) + status = 'Publish services start successfully' + context = setting_context(request) + context['status'] = status + return render(request, 'settings_page.html', context) + +def saveopenaiconfig(request): + from appbe.aion_config import saveopenaisettings + try: + saveopenaisettings(request) + context = setting_context(request) + context['version'] = AION_VERSION + context['success'] = True + return render(request, 'settings_page.html', context) + except: + context = {'error': 'error', 'runtimeerror': 'runtimeerror'} + return render(request, 'settings_page.html', context) +def savegravitonconfig(request): + from appbe.aion_config import savegravitonconfig + try: + savegravitonconfig(request) + context = setting_context(request) + context['version'] = AION_VERSION + context['success'] = True + return render(request, 'settings_page.html', context) + except: + context={'error':'error','runtimeerror':'runtimeerror'} + return render(request, 'settings_page.html',context) + +def saveaionconfig(request): + from appbe.aion_config import saveconfigfile + try: + saveconfigfile(request) + context = setting_context(request) + context['version'] = AION_VERSION + context['success'] = True + return render(request, 'settings_page.html', context) + except: + context={'error':'error','runtimeerror':'runtimeerror'} + return render(request, 'settings_page.html',context) +def settings_page(request): + try: + context = setting_context(request) + context['version'] = AION_VERSION + context['selected'] = 'Settings' + return render(request, 'settings_page.html', context) + except: + return render(request, 'settings_page.html', {'error':'Please enter valid inputs','version':AION_VERSION}) + +def adds3bucket(request): + try: + if request.method == 'POST': + from appbe.s3bucketsDB import add_new_s3bucket + status = add_new_s3bucket(request) + context = setting_context(request) + context['version'] = AION_VERSION + if status == 'error': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Some values are missing','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + if status == 'error1': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + + return render(request,'settings_page.html',context) + except: + return render(request, 'settings_page.html',{'error': 'Fail to Add S3bucket'}) + +def GCSbucketAdd(request): + try: + if request.method == 'POST': + from appbe.gcsbucketsDB import add_new_GCSBucket + status = add_new_GCSBucket(request) + context = setting_context(request) + context['version'] = AION_VERSION + if status == 'error': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Some values are missing','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + if status == 'error1': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + + return render(request,'settings_page.html',context) + except Exception as e: + print(e) + return render(request, 'settings_page.html',{'error': 'Fail to Add GCSbucket','version':AION_VERSION}) + +def azurestorageAdd(request): + try: + if request.method == 'POST': + from appbe.azureStorageDB import add_new_azureStorage + status = add_new_azureStorage(request) + context = setting_context(request) + context['version'] = AION_VERSION + if status == 'error': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Some values are missing','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + if status == 'error1': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + + return render(request,'settings_page.html',context) + except: + return render(request, 'settings_page.html',{'error': 'Fail to Add Azure Container'}) + +def removeazurebucket(request,name): + try: + if request.method == 'GET': + from appbe.azureStorageDB import remove_azure_bucket + status = remove_azure_bucket(name) + context = setting_context(request) + context['version'] = AION_VERSION + if status == 'error': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Failed to delete Azure Bucket','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + + return render(request,'settings_page.html',context) + + except: + return render(request, 'settings_page.html',{'error': 'Failed to delete Azure Bucket'}) + +def removes3bucket(request,name): + try: + if request.method == 'GET': + from appbe.s3bucketsDB import remove_s3_bucket + status = remove_s3_bucket(name) + context = setting_context(request) + context['version'] = AION_VERSION + if status == 'error': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Failed to delete S3bucket','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + + return render(request,'settings_page.html',context) + + except: + return render(request, 'settings_page.html',{'error': 'Failed to delete S3bucket'}) + +def removegcsbucket(request,name): + try: + if request.method == 'GET': + from appbe.gcsbucketsDB import remove_gcs_bucket + status = remove_gcs_bucket(name) + context = setting_context(request) + context['version'] = AION_VERSION + if status == 'error': + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = {'error':'Failed to delete GCS Bucket','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION} + + return render(request,'settings_page.html',context) + + except: + return render(request, 'settings_page.html',{'error': 'Failed to delete GCS Bucket'}) + +def gcpcomputesettings(request): + try: + from appbe import compute + status = compute.updateGCPConfig(request) + context = setting_context(request) + if status == 'error': + context['ErrorMsg'] = 'Some values are missing' + context['version'] = AION_VERSION + context['success'] = True + return render(request, 'settings_page.html',context) + except: + return render(request, 'settings_page.html',{'error': 'Fail to Save GCP Settings','version':AION_VERSION}) + +def amazonec2settings(request): + try: + from appbe import compute + status = compute.updateComputeConfig(request) + context = setting_context(request) + if status == 'error': + context['ErrorMsg'] = 'Some values are missing' + context['version'] = AION_VERSION + context['success'] = True + return render(request, 'settings_page.html',context) + except: + return render(request, 'settings_page.html',{'error': 'Fail to Save AWS Settings','version':AION_VERSION}) from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import json +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe.pages import getusercasestatus +import os +import plotly.graph_objects as go +import time +import sys +from pathlib import Path +import csv +import pandas as pd +import numpy as np +from appbe.pages import getversion +AION_VERSION = getversion() +def uploadedData(request): + from appbe.dataIngestion import ingestDataFromFile + context = ingestDataFromFile(request,DATA_FILE_PATH) + context['version'] = AION_VERSION + from appbe.aion_config import get_edafeatures + No_of_Permissible_Features_EDA = get_edafeatures() + context['FeturesEDA'] = No_of_Permissible_Features_EDA + return render(request, 'upload.html', context) + +def uploaddatafromscript(request): + from appbe.aion_config import" +"settings + usecasetab = settings() + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + from appfe.modelTraining.models import Existusecases + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + try: + scriptPath = request.POST.get('pythonscriptPath') + if(os.path.isfile(scriptPath) == False ): + context = {'tab': 'upload', 'error': 'File does not exist', 'selected': 'modeltraning','clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + if(scriptPath != ''): + try: + f = open(scriptPath, ""r"") + pythoncode = f.read() + f.close() + ldict = {} + exec(pythoncode, globals(), ldict) + except Exception as e: + context = {'tab': 'upload', 'error': 'Error in script execution i.e., '+str(e), 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + if 'dfpy' not in ldict: + context = {'tab': 'upload', 'error': 'dfpy dataset not found', 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + raw_data = '' + if 'df_aion_raw' in ldict: + df_raw = ldict['df_aion_raw'] + raw_data = df_raw.to_json(orient=""records"") + raw_data = json.loads(raw_data) + df = ldict['dfpy'] + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + request.session['datalocation'] = str(dataFile) + df.to_csv(dataFile, index=False) + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + from appbe.aion_config import get_edafeatures + No_of_Permissible_Features_EDA = get_edafeatures() + context = {'tab': 'tabconfigure','FeturesEDA':No_of_Permissible_Features_EDA,'computeinfrastructure':computeinfrastructure,'raw_data':raw_data,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False,'usecasetab':usecasetab} + return render(request, 'upload.html', context) + else: + from appfe.modelTraining.models import Existusecases + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + context = {'tab': 'upload','computeinfrastructure':computeinfrastructure, 'error': 'Please enter script path', 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + return render(request, 'upload.html', context) + except: + from appfe.modelTraining.models import Existusecases + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + return render(request, 'upload.html', {'tab': 'upload','clusteringModels':clusteringModels,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'error':'Fail to upload data from script','selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}) + +def listfiles(request): + from appbe.labels import label_filename + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + path = request.POST.get('scriptPath') + print(path) + dirstatus = os.path.isdir(path) + import glob + try: + if(path != '' and dirstatus == True): + radiovalue = request.POST.get('filetype') + # create csv + filetimestamp = str(int(time.time())) + header = ['File', 'Label'] + filename = 'AION_List_' + selected_use_case + '.csv' + dataFile = os.path.join(DATA_FILE_PATH, filename) + csvfilename = 'AION_List_' + filetimestamp + request.session['csvfilename'] = dataFile + request.session['datalocation'] = path + type = 'NA' + request.session['fileExtension'] = radiovalue + if radiovalue in ['avi', 'wmv', 'mp4']: + if request.POST.get('computeInfrastructure') in ['AWS','GCP']: + request.session['datatype'] = 'LLM_Video' + type = 'LLM_Video' + else: + request.session['datatype'] = 'Video' + type = 'Video' + elif radiovalue in ['jpeg', 'png', 'bmp']: + if request.POST.get('computeInfrastructure') in ['AWS','GCP']: + request.session['datatype'] = 'LLM_Image' + type = 'LLM_Image' + else: + request.session['datatype'] = 'Image' + type = 'Image' + elif radiovalue in ['txt', 'log', 'pdf','docs','docx','doc']: + if request.POST.get('computeInfrastructure') in ['AWS','GCP']: + request.session['datatype'] = 'LLM_Document' + type = 'LLM_Document' + else: + request.session['datatype'] = 'Document' + type = 'Document' + elif radiovalue in ['java','py']: + if request.POST.get('computeInfrastructure') in ['AWS','GCP']: + request.session['datatype'] = 'LLM_Code' + type = 'LLM_Code' + else: + request.session['datatype'] = 'Code' + type = 'Document' + if type == 'NA': + context = {'tab': 'upload', 'error': 'Please select the type', 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'version':AION_VERSION, 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + return render(request, 'upload.html', context) + request.session['folderPath'] = path + request.session['csvfullpath'] = dataFile + file = open(dataFile, 'w', newline='') + writer = csv.DictWriter(file, fieldnames=header) + # writing data row-wise into the csv file + writer.writeheader() + #os.chdir(path) + tifCounter = 0 + if radiovalue == 'doc': + tifCounter = len(glob.glob(os.path.join(path,""**/*.""+'doc'),recursive=True)) + tifCounter = tifCounter+len(glob.glob(os.path.join(path,""**/*.""+'docx'),recursive=True) ) + + else: + tifCounter = len(glob.glob(os.path.join(path, ""**/*."" + radiovalue), recursive=True)) + if radiovalue == 'jpeg': + tifCounter += len(glob.glob1(path,""*.jpg"")) + labelfileexists = False + dflabels = pd.DataFrame() + if type == 'Image': + + labelfilename = label_filename(request) + labelfileexists = os.path.isfile(labelfilename) + if labelfileexists == True: + dflabels = pd.read_csv(labelfilename) + if len(dflabels) == 0: + labelfileexists = False + else: + dflabels = dflabels.head(5) + if tifCounter == 0: + context = {'tab': 'upload', 'error': 'No files in the folder with selected file type', 'selected': 'modeltraning','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + return render(request, 'upload.html', context) + filesCount = 0 + filesSize = 0 + files=[] + + + for filename in glob.iglob(os.path.join(path, ""**/*."" + radiovalue), recursive=True): + files.append(filename) + if radiovalue == 'doc': + for filename in glob.iglob(os.path.join(path, ""**/*."" + 'docx'), recursive=True): + files.append(filename) + for filename in files: + filesCount = filesCount+1 + writer.writerow({'File': filename, 'Label': ''}) + get_size = os.path.getsize(filename) + filesSize = round(filesSize + get_size, 1) + + if filesSize > 1048576: + size = round((filesSize / (1024 * 1024)), 1) + filesSize = str(size) + ' M' + elif filesSize > 1024: + size = round((filesSize /1024), 1) + filesSize = str(size) + ' K' + else: + filesSize = str(filesSize) + ' B' + files = pd.DataFrame(files,columns=['File']) + files.index = range(1, len(files) + 1) + files.reset_index(level=0, inplace=True) + files = files.to_json(orient=""records"") + files = json.loads(files) + if radiovalue == 'jpeg': + for filename in glob.iglob(os.path.join(path,""**/*.jpg""), recursive=True): + writer.writerow({'File': filename, 'Label': ''}) + from appbe.aion_config import get_edafeatures + No_of_Permissible_Features_EDA = get_edafeatures() + #filesSize = str(filesSize)+' M' + print(filesSize) + print(filesCount) + context = {'tab': 'upload','files':files,'filesCount':filesCount,'filesSize':filesSize,'filelist':dataFile,'finalstate':0, 'file': dataFile,'FeturesEDA':No_of_Permissible_Features_EDA, 'csvfilename': csvfilename,'type':type,'csvgenerated': True,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION,""selectedfile"":radiovalue,""selectedPath"":path} + return render(request, 'upload.html', context) + else: + context = {'tab': 'upload', 'error': 'Error: Folder path either not entered or does not exists.', 'modeltraning': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION,""selectedfile"":radiovalue,""selectedPath"":path} + return render(request, 'upload.html', context) + except Exception as e: + print(e) + return render(request, 'upload.html', {'tab': 'upload','error':'Folder path is mandatory','version':AION_VERSION,'computeinfrastructure':computeinfrastructure, 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}) + +def validatecsv(request): + from appbe.aion_config import settings + usecasetab = settings() + from appbe import exploratory_Analysis as ea + from appbe.labels import label_filename + try: + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + #print(request.POST.get('validatesubmit')) + if request.POST.get('validatesubmit') == 'ObjectDetection': + df = pd.read_csv(request.session['csvfullpath']) + dataFile = label_filename(request) + request.session['LabelFileName'] = dataFile + request.session['currentIndex'] = 0 + request.session['endIndex'] = len(df)-1 + not_end = not(request.session['currentIndex'] == request.session['endIndex']) + filePath = os.path.join(request.session['datalocation'],df[""File""].iloc[request.session['currentIndex']]) + string = base64.b64encode(open(filePath, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + request.session['labels'] = [] + if os.path.isfile(dataFile): + image = df[""File""].iloc[request.session['currentIndex']] + with open(dataFile, 'r') as file: + reader = csv.reader(file) + for row in reader: + if row[0] == image: + labels = request.session['labels'] + labels.append({""id"":row[1], ""name"":row[9], ""xMin"": row[3], ""xMax"":row[4], ""yMin"":row[5], ""yMax"":row[6], ""height"":row[7],""width"":row[8], ""angle"":row[2]}) + request.session['labels'] = labels + labels = request.session['labels'] + else: + with open(dataFile,'w') as f: + f.write(""File,id,angle,xmin,xmax,ymin,ymax,height,width,Label\\n"") + f.close() + bounds = [] + context = {'tab': 'upload','bounds':bounds,'labels': request.session['labels'],'directory':request.session['datalocation'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'filelist':df,'computeinfrastructure':computeinfrastructure} + context['version'] = AION_VERSION + return render(request, 'objectlabelling.html', context) + elif request.POST.get('validatesubmit') == 'bulkLabeling': + type =" +"'BulkImage' + dataFile = request.session['csvfullpath'] + csvfilename = request.session['csvfullpath'] + labelfileexists = False + dflabels = pd.DataFrame() + context = {'tab': 'upload', 'file': dataFile, 'csvfilename': csvfilename,'type':type,'csvgenerated': True,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + elif request.POST.get('validatesubmit') == 'ImageClassification': + df = pd.read_csv(request.session['csvfullpath']) + dataFile = label_filename(request) + request.session['LabelFileName'] = dataFile + with open(dataFile,'w') as f: + f.write(""File,Label\\n"") + f.close() + request.session['currentIndex'] = 0 + request.session['endIndex'] = len(df)-1 + not_end = not(request.session['currentIndex'] == request.session['endIndex']) + filePath = os.path.join(request.session['datalocation'],df[""File""].iloc[request.session['currentIndex']]) + string = base64.b64encode(open(filePath, ""rb"").read()) + request.session['labels'] = '' + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + context = {'tab': 'upload','id':request.session['currentIndex'],'labels': request.session['labels'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'computeinfrastructure':computeinfrastructure} + context['version'] = AION_VERSION + return render(request, 'imagelabelling.html', context) + elif request.POST.get('validatesubmit') == 'submitpreviouslabel': + dataFile = label_filename(request) + request.session['LabelFileName'] = dataFile + df = pd.read_csv(dataFile) + if len(df.columns) == 2: + context = imageeda(request) + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + else: + context = objecteda(request) + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + else: + df = pd.read_csv(request.session['csvfullpath']) + + if request.session['datatype'] in ['LLM_Document','LLM_Code']: + from appfe.modelTraining.bc_views import basicconfig + return basicconfig(request) + else: + if df['Label'].isnull().sum() > 0: + # show error message + if request.session['datatype'] == 'Document': + + dataDf = pd.DataFrame() + + dataDict = {} + keys = [""text""] + for key in keys: + dataDict[key] = [] + for i in range(len(df)): + filename = os.path.join(request.session['datalocation'],df.loc[i,""File""]) + + if Path(filename).suffix == '.pdf': + from appbe.dataIngestion import pdf2text + text = pdf2text(filename) + dataDict[""text""].append(text) + else: + with open(filename, ""r"",encoding=""utf-8"") as f: + dataDict[""text""].append(f.read()) + f.close() + dataDf = pd.DataFrame.from_dict(dataDict) + tcolumns=['text'] + wordcloudpic,df_text = ea.getWordCloud(dataDf,tcolumns) + status_msg = 'Successfully Done' + firstFile = pd.DataFrame() + context = {'tab': 'upload','firstFile':firstFile,'singletextdetails':wordcloudpic,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + else: + errormessage = str(df['Label'].isnull().sum()) + "" rows do not contain label values"" + context = {'error': errormessage} + + else: + eda_result = '' + duplicate_img = '' + color_plt = '' + df2 = df.groupby('Label', as_index=False)['File'].count().reset_index().rename(columns ={'File':'Number of Files'}) + df_json = df2.to_json(orient=""records"") + df_json = json.loads(df_json) + cfig = go.Figure() + xaxis_data = df2['Label'].tolist() + yaxis_data = df2['Number of Files'].tolist() + cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data)) + cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File') + bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520) + firstFile = df.groupby('Label').first().reset_index() + #firstFile['FilePath'] = firstFile['File'].apply(lambda x: os.path.join(request.session['datalocation'], x)) + images = [] + if request.session['datatype'] == 'Image': + qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation']) + #print(qualityscore) + for i in range(len(firstFile)): + filename = firstFile.loc[i, ""File""] + filePath = os.path.join(request.session['datalocation'], filename) + string = base64.b64encode(open(filePath, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + firstFile.loc[i, ""Image""] = image_64 + firstFile.loc[i, ""Quality""] = qualityscore[filename] + + elif request.session['datatype'] == 'Document': + dataDrift = '' + dataDf = pd.DataFrame() + dataDict = {} + keys = [""text"",""Label""] + for key in keys: + dataDict[key] = [] + for i in range(len(df)): + filename = os.path.join(request.session['datalocation'],df.loc[i,""File""]) + if Path(filename).suffix == '.pdf': + from appbe.dataIngestion import pdf2text + text = pdf2text(filename) + dataDict[""text""].append(text) + dataDict[""Label""].append(df.loc[i,""Label""]) + else: + with open(filename, ""r"",encoding=""utf-8"") as f: + dataDict[""text""].append(f.read()) + f.close() + dataDict[""Label""].append(df.loc[i,""Label""]) + dataDf = pd.DataFrame.from_dict(dataDict) + wordcloudpic = ea.getCategoryWordCloud(dataDf) + status_msg = 'Successfully Done' + firstFile = pd.DataFrame() + context = {'tab': 'upload','firstFile':firstFile,'dataa': df_json,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + return render(request, 'upload.html', context) + status_msg = 'Successfully Done' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + context = {'tab': 'upload', 'featuregraph': bargraph,'dataa': df_json, 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'validcsv': True,'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt, 'firstFile': firstFile, + 'status_msg': status_msg,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab} + context['version'] = AION_VERSION + + return render(request, 'upload.html', context) + except UnicodeDecodeError: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return render(request, 'upload.html', {'tab': 'upload','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Only utf8 file encoding supported','computeinfrastructure':computeinfrastructure}) + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return render(request, 'upload.html', {'tab': 'upload','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Validation Failed','computeinfrastructure':computeinfrastructure}) + +def file_successfully_created(request,dataFile): + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + try: + request.session['datalocation'] = str(dataFile) + request.session['delimiter'] = ',' + request.session['textqualifier'] = '""' + from appbe.eda import ux_eda + eda_obj = ux_eda(dataFile,optimize=1) + featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures() + # ---------------------------- + + numberoffeatures = len(featuresList) + from appfe.modelTraining.views import getimpfeatures + imp_features = getimpfeatures(dataFile,numberoffeatures) + samplePercentage = 100 + samplePercentval = 0 + showRecommended = False + from utils.file_ops import read_df + status,df_top = read_df(dataFile,nrows=10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + from appbe.aion_config import get_edafeatures + No_of_Permissible_Features_EDA = get_edafeatures() + context = {'tab': 'tabconfigure','computeinfrastructure':computeinfrastructure,'range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'imp_features':imp_features, 'numberoffeatures':numberoffeatures, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + except Exception as e: + print(e) + return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning'}) + +def uploadDatafromSatandardDataset(request): + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + try: + dataobject = request.POST.get('dataset') + if dataobject == 'Iris': + from sklearn.datasets import load_iris + data = load_iris() + df = pd.DataFrame(data.data, columns=data.feature_names) + df['Species']=data['target'] + df['Species']=df['Species'].apply(lambda x: data['target_names'][x]) + elif dataobject == 'Boston': + from sklearn.datasets import load_boston + df1 = load_boston() + df = pd.DataFrame(data=df1.data, columns=df1.feature_names) + df[""target""] = df1.target + elif dataobject == 'BreastCancer': + from sklearn.datasets import load_breast_cancer + cancer = load_breast_cancer() + df = pd.DataFrame(np.c_[cancer['data'], cancer['target']],columns= np.append(cancer['feature_names'], ['target'])) + elif dataobject == 'Diabetes': + from sklearn.datasets import load_diabetes + data = load_diabetes() + df = pd.DataFrame(data.data, columns=data.feature_names) + df['y']=data['target'] + elif dataobject == 'Wine': + from sklearn.datasets import load_wine + data = load_wine() + df = pd.DataFrame(data.data, columns=data.feature_names) + df['class']=data['target'] + df['class']=df['class'].apply(lambda x: data['target_names'][x]) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + request.session['datalocation'] = str(dataFile) + df.to_csv(dataFile, index=False) + request.session['delimiter'] = ',' + request.session['textqualifier'] = '""' + # EDA Subsampling changes + # ---------------------------- + from appbe.eda import ux_eda + eda_obj = ux_eda(dataFile) + featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures() + # ---------------------------- + + numberoffeatures = len(featuresList) + from appfe.modelTraining.views import getimpfeatures + imp_features = getimpfeatures(dataFile,numberoffeatures) + + + samplePercentage = 100 + samplePercentval = 0 + showRecommended = False + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + selected_use_case = request.session['" +"UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + from appbe.aion_config import get_edafeatures + No_of_Permissible_Features_EDA = get_edafeatures() + from appfe.modelTraining.models import Existusecases + clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') + context = {'tab': 'tabconfigure','computeinfrastructure':computeinfrastructure,'range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'clusteringModels':clusteringModels, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'imp_features':imp_features, 'numberoffeatures':numberoffeatures, + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + except Exception as e: + print(e) + return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning'}) + +def sqlAlchemy(request): + from appbe import alchemy + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + dbType = request.POST.get('dbType') + request.session['dbType'] = dbType + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + from appbe.aion_config import get_edafeatures + No_of_Permissible_Features_EDA = get_edafeatures() + if dbType.lower() == ""sqlite"": + request.session['filepath'] = request.POST.get('filepath') + request.session['tablenamesql'] = request.POST.get('tablenamesql') + table_details = {""Database Type"": dbType, ""File Path"": request.session['filepath']} + if dbType.lower() in [""postgresql"", ""mysql"", ""mssql""]: + if dbType.lower()=='mssql': + db = ""mssql"" + else: + db = ""postgresql"" + request.session['tablename'] = request.POST.get('tablename'+'_'+db) + request.session['dbname'] = request.POST.get('dbname'+'_'+db) + request.session['password'] = request.POST.get('password'+'_'+db) + request.session['username'] = request.POST.get('username'+'_'+db) + request.session['port'] = request.POST.get('port'+'_'+db) + request.session['host'] = request.POST.get('host'+'_'+db) + table_details = {""Database Type"": dbType, ""Database Name"": request.session['dbname'], + ""Host"": request.session['host'], ""Port"": request.session['port']} + + if dbType.lower() == ""mssql"": + request.session['driver'] = request.POST.get('driver'+'_'+db) + table_details.update({""driver"": request.session['driver']}) + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + #print(dbType) + submit_button = request.POST.get('sql_submit') + if submit_button == 'multitable': + try: + connection_string = alchemy.get_connection(request) + import sqlalchemy as db + engine = db.create_engine(connection_string) + engine.connect() + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + print(request.POST.get('dbType')) + context = {'tab': 'tabconfigure','FeturesEDA':No_of_Permissible_Features_EDA,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'version':AION_VERSION} + context.update({'db_details':table_details}) + return render(request, 'querybuildersql.html', context) + except Exception as e: + print(str(e)) + if ""No module named 'psycopg2'"" in str(e): + error = 'Not found module: psycopg2. Please install and try again' + else: + error = 'Error in connecting to the database' + return render(request, 'upload.html', {'tab': 'tabconfigure', 'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, + 'selected': 'modeltraning', 'version': AION_VERSION, + 'error': error}) + + else: + try: + + df = alchemy.getDataFromSingleTable(request) + + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + request.session['datalocation'] = str(dataFile) + df.to_csv(dataFile, index=False) + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'FeturesEDA':No_of_Permissible_Features_EDA, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + except Exception as e: + print(e) + if ""No module named 'psycopg2'"" in str(e): + context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,""error"":""Not found module: psycopg2. Please install and try again""} + else: + context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,""error"":""Error in fetching the data from database.""} + context['version'] = AION_VERSION + return render(request, 'upload.html', context) + +def get_table_list(request): + from appbe import alchemy + dbType = request.session['dbType'] + table_list = alchemy.list_tables(request) + #print(json.dumps(table_list)) + return HttpResponse(json.dumps(table_list), content_type=""application/json"") + +def get_tables_fields_list(request): + from appbe import alchemy + table_list = request.GET.get(""sel_tables"") + + table_field_list = alchemy.list_tables_fields(request,table_list) + + return HttpResponse(table_field_list, content_type=""application/json"") + +def validate_query(request): + from appbe import alchemy + query = request.GET.get(""query"") + table_details = request.GET.get(""table_details"") + join_details = request.GET.get(""join_details"") + where_details = request.GET.get(""where_details"") + request.session[""table_details""]=table_details + request.session[""join_details""]=join_details + request.session[""where_details""]=where_details + df,msg = alchemy.validatequery(request,table_details,join_details,where_details) + return HttpResponse(json.dumps(msg), content_type=""application/json"") + + +def submitquery(request): + from appbe import alchemy + from appbe import compute + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + computeinfrastructure = compute.readComputeConfig() + try: + query = request.POST.get(""txtfinalquery"") + table_details = request.session[""table_details""] + join_details = request.session[""join_details""] + where_details = request.session[""where_details""] + df,msg = alchemy.validatequery(request,table_details,join_details,where_details) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') + request.session['datalocation'] = str(dataFile) + df.to_csv(dataFile, index=False) + df_top = df.head(10) + df_json = df_top.to_json(orient=""records"") + df_json = json.loads(df_json) + statusmsg = 'Data File Uploaded Successfully ' + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + request.session['currentstate'] = 0 + request.session['finalstate'] = 0 + request.session['datatype'] = 'Normal' + context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', + 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False} + return render(request, 'upload.html', context) + except: + return render(request, 'upload.html', {'tab': 'tabconfigure','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','error':'Failed to upload datafile'}) + +def EDAReport(request): + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'EDA','Yes') + from appbe import exploratory_Analysis as ea + request.session['defaultfilepath'] = DEFAULT_FILE_PATH + request.session['configfilepath'] = CONFIG_FILE_PATH + request.session['deploylocation'] = DEPLOY_LOCATION + from appbe import compute + computeinfrastructure = compute.readComputeConfig() + submit_button = request.POST.get('upload_submit') + ModelVersion = request.session['ModelVersion'] + #print(submit_button) + if submit_button == 'data_eda': + try: + from appbe.aion_config import settings + usecasetab = settings() + from appbe.s3bucketsDB import get_s3_bucket + from appbe.gcsbucketsDB import get_gcs_bucket + from appbe.azureStorageDB import get_azureStorage + context = ea.get_eda(request) + context['computeinfrastructure'] = computeinfrastructure + context['s3buckets'] = get_s3_bucket() + context['gcsbuckets'] = get_gcs_bucket() + context['azurestorage'] = get_azureStorage() + context['version'] = AION_VERSION + context['usecasetab'] = usecasetab + except Exception as e: + print(e) + context = {'error':'Error in doing the EDA','ModelVersion': ModelVersion,'version':AION_VERSION} + return render(request, 'upload.html', context) + +def get_features_datatype(v,num_list,cat_list,text_list): + """""" To get exact datatype of the feature in Data Overview."""""" + if v in cat_list: + return 'Categorical' + elif v in num_list: + return 'Numerical' + elif v in text_list: + return 'Text' + +def downloadedareport(request): + des1 = json.loads(request.POST.get('des1')) + des1 = pd.DataFrame(des1) + cluster_df = json.loads(request.POST.get('cluster_df')) + cluster_df = pd.DataFrame(cluster_df) + + pca_df = [] + if request.POST.get('pca_df') != 'Empty DataFrame\\r\\nColumns: []\\r\\nIndex: []': + pca_df = json.loads(request.POST.get('pca_df')) + pca_df = pd.DataFrame(pca_df) + + cor_mat = json.loads(request.POST.get('cor_mat')) + cor_mat = pd.DataFrame(cor_mat) + cor_mat.replace(np.nan, 0, inplace=True) + cor_mat.fillna('None',inplace=True) + usename = request.session['UseCaseName'].replace("" "", ""_"") + '_' + str(request.session['ModelVersion']) + edaFileName = usename + '_EDA.xlsx' + + from io import BytesIO as IO + excel_file = IO() + excel_writer = pd.ExcelWriter(excel_file, engine=""xlsxwriter"") + ##For Task 17622 + actual_df = json.loads(request.POST.get('data_deep_json')) + actual_df = pd.DataFrame(actual_df) + actual_df.replace(np.nan, 0,inplace=True) + actual_df.fillna('None',inplace=True) + top_10_rows = actual_df.head(10) + top_10_rows.to_excel(excel_writer, sheet_name='Top 10 Rows',index=True) + des1 = des1.fillna(0) + #Write everything in one single column + actual_df_numerical_features = actual_df.select_dtypes(exclude='object') + actual_df_categorical_features = actual_df.select_dtypes(include='object') + #For text features + textFeature = json.loads(request.POST.get('textFeature')) + textFeature_df = actual_df.filter(textFeature) + actual_df_categorical_features = actual_df_categorical_features.drop(textFeature, axis=1) + for i in des1['Features']: + num_cols = actual_df_numerical_features.columns.to_list() + cat_cols = actual_df_categorical_features.columns.to_list() + text_cols = textFeature + des1['Features Type'] = des1['Features'].apply(lambda x: get_features_datatype(x, num_cols,cat_cols,text_cols)) + + curr_" +"columns = des1.columns.to_list() + curr_columns.remove('Features Type') + insert_i = curr_columns.index('Features')+1 + curr_columns.insert(insert_i,'Features Type') + des1 = des1[curr_columns] + des1.to_excel(excel_writer, sheet_name='Data Overview',startrow=0, startcol=0,index=False) + ## Hopkins value addition + hopkins_value = str(request.POST.get('hopkins_val')) + hopkins_tip = request.POST.get('hopkins_tip') + hopkins_dict = {'Hopkins_value':[hopkins_value],""hopkins_information"":[hopkins_tip]} + hopkins_df = pd.DataFrame.from_dict(hopkins_dict) + ##Data Distribution + from appbe.eda import ux_eda + eda_obj = ux_eda(actual_df) + datadist_dict={} + for k,v in enumerate(actual_df.columns.to_list()): + distname, sse = eda_obj.DistributionFinder(actual_df[v]) + datadist_dict[v]=[distname,sse] + data_dist_df = pd.DataFrame(datadist_dict) + data_dist_df = data_dist_df.T + data_dist_df.reset_index(inplace=True) + data_dist_df.columns = ['Features','Distribution','SSE'] + data_dist_df.drop(['SSE'],axis=1,inplace=True) + data_dist_df.fillna(""NA"",inplace = True) + data_dist_df = data_dist_df.replace(['',None,pd.NaT],""NA"") + data_dist_df = data_dist_df.replace([""geom""],""geometric"") + data_dist_df.to_excel(excel_writer, sheet_name='Data Distribution',index=False) + + if len(pca_df) > 0: + pca_df.to_excel(excel_writer, sheet_name='Feature Importance',index=False) + + cor_mat.to_excel(excel_writer, sheet_name='Correlation Analysis',index=False) + #Unsupervised clustering + cdf_start_row = 1+len(hopkins_df)+6 + if not textFeature: + import io + hs_info = ""Hopkins Statistics"" + hs_info_df = pd.read_csv(io.StringIO(hs_info), sep="","") + hs_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=0, startcol=2,index=False) + hopkins_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=2, startcol=0,index=False) + else: + # If text features available in data. + import io + hs_info = ""Hopkins Statistics is not availble for data with text features. Unselect text features and retry EDA."" + hs_info_df = pd.read_csv(io.StringIO(hs_info), sep="","") + hs_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=0, startcol=3,index=False) + #cluster_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row, startcol=1,index=True) + cdf_start_row = 1+len(hopkins_df)+4 + + cluster_info = "" Unsupervised clustering results (Excluding text features) "" + cluster_info_df = pd.read_csv(io.StringIO(cluster_info), sep="","") + cluster_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row-2, startcol=1,index=False) + + cluster_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row, startcol=0,index=False) + + workbook = excel_writer.book + + #excel_writer.save() #Save() is deprecated,instead we need to use close(). + excel_writer.close() + excel_file.seek(0) + response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + edaFileName + return response from django.db import models + + +class usecasedetails(models.Model): + id = models.AutoField(primary_key=True) + UsecaseName = models.CharField(max_length=50) + usecaseid = models.CharField(max_length=10, default=UsecaseName) + Description = models.CharField(max_length=200) + + class Meta: + db_table = ""usecasedetails"" + + +class Existusecases(models.Model): + id = models.AutoField(primary_key=True) + ModelName = models.ForeignKey(usecasedetails, on_delete=models.CASCADE) + Version = models.IntegerField(default=0) + DataFilePath = models.FileField(upload_to=None) + ConfigPath = models.FileField(upload_to=None) + DeployPath = models.FileField(upload_to=None) + Status = models.CharField(max_length=200) + publishStatus = models.CharField(max_length=20, default='') + publishPID = models.IntegerField(default=0) + trainingPID = models.IntegerField(default=0) + driftStatus = models.CharField(max_length=20, default='') + ProblemType = models.CharField(max_length=20, default='') + modelType = models.CharField(max_length=40, default='') + portNo = models.IntegerField(default=0) + TrainOuputLocation = models.CharField(max_length=200, default='') + class Meta: + + db_table = ""Existusecases"" # +# AirflowLib.py +# +# It contains methods to consume rest API of Apache airflow instance +# Apache Airflow exposed experimental API +# One can achieve the API output just by using the methods implemented within this python file by importing the same +# + +import requests +import pandas as pd + + + + +# base_url = 'http://localhost:8080/api/experimental' + + +# It defines the API error which actually raised when error occured during API consumption +from modelTraining.airflow_config import base_url + + +class ApiError(Exception): + """"""An API Error Exception"""""" + + def __init__(self, status): + self.status = status + + def __str__(self): + return ""APIError: status={}"".format(self.status) + + +# This method takes dagId as parameter and return the list of Dag Run from apache airflow instance +def GetDagRunList(dagId): + resp = requests.get(base_url + '/dags/' + dagId + '/dag_runs') + if resp.status_code != 200: + raise ApiError('GetDagRunList {}'.format(resp)) + dfData = ConvertJSONtoDF(resp.json()) + return dfData + + +# It is responsible to create/trigger dag of the Airflow instance +# It takes 2 parameter dagId and paramJson +def TriggerDag(dagId, paramJson): + paramJson = {""conf"": ""{\\""key\\"":\\""value\\""}""} + resp = requests.post(base_url + '/dags/' + dagId + '/dag_runs', json=paramJson) + print(resp) + if resp.status_code != 200: + raise ApiError('TriggerDag {}'.format(resp)) + return resp.json() + + +# This method toggle the Dag as off in the airflow instance +def PauseDagRun(dagId): + resp = requests.get(base_url + '/dags/' + dagId + '/paused/true') + if resp.status_code != 200: + raise ApiError('PauseDagRun {}'.format(resp)) + return resp.json() + + +# This method toggle the Dag as on in the airflow instance +def UnPauseDagRun(dagId): + resp = requests.get(base_url + '/dags/' + dagId + '/paused/false') + if resp.status_code != 200: + raise ApiError('UnPauseDagRun {}'.format(resp)) + return resp.json() + + +# It checks if Apache Airflow instance is up and running +def TestAPI(): + resp = requests.get(base_url + '/test') + if resp.status_code != 200: + raise ApiError('TestAPI {}'.format(resp)) + return resp.json() + + +# It return the latest dag run info for each available dag +def GetLatestDagRun(): + resp = requests.get(base_url + '/latest_runs') + if resp.status_code != 200: + raise ApiError('GetLatestDagRun {}'.format(resp)) + dfData = ConvertJSONtoDF(resp.json()['items']) + return dfData + + +# It will return the list of available pools +def GetPoolsList(): + resp = requests.get(base_url + '/pools') + if resp.status_code != 200: + raise ApiError('GetPoolsList {}'.format(resp)) + return resp.json() + + +# It return the specific pool info by pool Name +def GetPoolInfo(poolName): + resp = requests.get(base_url + '/pools/' + poolName) + if resp.status_code != 200: + raise ApiError('GetPoolInfo {}'.format(resp)) + return resp.json() + + +# Return the task info created within the DAG +def GetDagTaskInfo(dagId, taskId): + resp = requests.get(base_url + '/dags/' + dagId + '/tasks/' + taskId) + if resp.status_code != 200: + raise ApiError('GetDagTaskInfo {}'.format(resp)) + return resp.json() + + +# Returns the Paused state of a DAG +def GetDagPausedState(dagId): + resp = requests.get(base_url + '/dags/' + dagId + '/paused') + if resp.status_code != 200: + raise ApiError('GetDagPausedState {}'.format(resp)) + return resp.json() + + +# It will create a pool into the Airflow instance +def CreatePool(name, description, slots): + paramJson = {""description"": description, ""name"": name, ""slots"": slots} + resp = requests.post(base_url + '/pools', json=paramJson) + if resp.status_code != 200: + raise ApiError('CreatePool {}'.format(resp)) + return resp.json() + + +# It is responsible to delete the specific pool by pool Name +def DeletePool(name): + resp = requests.delete(base_url + '/pools/' + name) + if resp.status_code != 200: + raise ApiError('DeletePool {}'.format(resp)) + return resp.json() + + +def ConvertJSONtoDF(jsonData): + df = pd.json_normalize(jsonData) + return df from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import time +from django.template import loader +from django import template +from appbe.aion_config import get_llm_data +from django.views.decorators.csrf import csrf_exempt +import os +import json +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from utils.file_ops import read_df_compressed +from appbe.dataPath import LOG_LOCATION +from appbe.pages import getversion +AION_VERSION = getversion() + +def QueryToOpenAI(text,tempPrompt): + FragmentationAllowed=""yes"" #yes or no + try: + import openai + key,url,api_type,api_version=get_llm_data() + if (key == """") and (url == """") : + print(""No API Key"") + return(""API Key and URL not provided"") + openai.api_key = key + openai.api_base = url + openai.api_type = 'azure' + openai.api_version = '2023-05-15' + deployment_name=""Text-Datvinci-03"" + import tiktoken + encoding = tiktoken.encoding_for_model(""text-davinci-003"") + maxTokens=1024 #4096-1024 == 3072 + lgt=0 + + if FragmentationAllowed==""yes"" : + words = text.split(""."") + chunk="""" + chunks=[] + multipleChunk=""no"" + partialData=""no"" + for i in range(len(words)): + chunk=chunk+words[i]+""."" + chunk_token_count = encoding.encode(chunk) + length=len(chunk_token_count) + partialData=""yes"" + if length > 2800 : + chunks.append(chunk) + chunk="""" + #print(""\\n\\n\\n"") + partialData=""no"" + multipleChunk=""yes"" + if (multipleChunk ==""no"" ): + chunks.append(chunk) + chunk="""" + if ((partialData ==""yes"") and (multipleChunk ==""yes"")): + chunks.append(chunk) + chunk="""" + summaries = [] + for chunk in chunks: + response = openai.Completion.create(engine=deployment_name, prompt=f""{tempPrompt}: {chunk}"",temperature=0.2, max_tokens=maxTokens,frequency_penalty=0,presence_penalty=0) + summary = response['choices'][0]['text'].replace('\\n', '').replace(' .', '.').strip() + summaries.append(summary) + wordsInSum = summary.split() + summaries=' '.join(summaries) + wordsInSum = summaries.split() + return summaries + else : + return ""ok"" + except openai.error.Timeout as e: + return ""exception : Timeout Error due to Network Connection"" + except Exception as e: + return ""exception : ""+str(e) + +def azureOpenAiDavinciSumarization(request): + inputDataType = str(request.GET.get('FileType')) + import time + t1=time.time() + documentType="""" + if inputDataType == 'file': + dataPath = str(request.GET.get('dataPath')) + #print(""Datapath--"",dataPath) + if dataPath.endswith("".pdf""): + from appbe.dataIngestion import pdf2text + originalText=pdf2text(dataPath) + if dataPath.endswith("".txt""): + data=[] + with open(dataPath, ""r"",encoding=""utf-8"") as f: + data.append(f.read()) + str1 = """" + for ele in data: + str1 += ele + originalText=str1 + if dataPath.endswith("".docx""): + import docx + doc = docx.Document(dataPath) + fullText = [] + for para in doc.paragraphs: + fullText.append(para.text) + fullText= '\\n'.join(fullText) + originalText=fullText + if inputDataType == 'rawText': + originalText = str(request.GET.get('textDataProcessing')) + dataPath="""" + if originalText== ""None"" or original" +"Text== """": + context = {'originalText': originalText,'returnedText': ""No Input given""} + print(""returned due to None"") + return render(request, ""textsummarization.html"",context) + KeyWords=str(request.GET.get('userUpdatedKeyword')) + contextOfText=str(request.GET.get('userUpdatedContext')) + doctype = str(request.GET.get('doctypeUserProvided')) + docDomainType = [""medical"",""other""] + Prompts = [ + ""Summarize the following article within 500 words with proper sub-heading so that summarization include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"", + ""Summarize the following article with minimum 500 words so that summarization include all main points from topics like: "" + ] + for i in range (len(docDomainType)) : + if docDomainType[i] in doctype.lower() : + docDomainPrompts=Prompts[i] + if docDomainType[i]==""medical"" : + print(""medical doc"") + documentType=""medical"" + docDomainFinalPrompts=docDomainPrompts + tempPrompt1=""Summarize the following article so that summarization must include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"" + + tempPrompt2=""Summarize the following article within 500 words with proper sub-heading so that summarization include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"" + else : + print(""other doc-a-"") + docDomainFinalPrompts=docDomainPrompts+"" ""+contextOfText + tempPrompt1=""Summarize the following article with minimum 500 words so that summarization include all main points from topics like: ""+contextOfText + tempPrompt2=tempPrompt1 + break + if (i== len(docDomainType)-1) : + print(""other doc-b-"") + docDomainPrompts=Prompts[i] + docDomainFinalPrompts=docDomainPrompts+"" ""+contextOfText + tempPrompt1=""Summarize the following article so that summarization include all main points from topics like: ""+contextOfText + tempPrompt2=tempPrompt1 + + + try: + pattern =['Summary','Study Objective','Study Design', 'Demographics of Patients', 'Devices Used in Study','Duration of Exposure to Device','Study Outcomes','Complications','Adverse Events','Confounding Factors','Study Limitations and Weakness','Usability of the Device','Misuse and Off-Label Use of the Device','Conflict of Interest','Statistical Analysis','Conclusions'] + + + import tiktoken + encoding = tiktoken.encoding_for_model(""text-davinci-003"") + encodedData = encoding.encode(originalText) + totalToken=len(encodedData) + while totalToken > 2800: + originalText=QueryToOpenAI(originalText,tempPrompt1) + encodedData = encoding.encode(originalText) + totalToken=len(encodedData) + retText=QueryToOpenAI(originalText,tempPrompt2) + + import re + summary1=retText + summary2=retText + if documentType==""medical"" : + for i in range(len(pattern)): + summary1=summary1.replace(pattern[i]+':','
'+''+pattern[i]+''+'
') + for i in range(len(pattern)): + summary1=summary1.replace(pattern[i],'
'+''+pattern[i]+''+'
') + + for i in range(len(pattern)): + summary2=summary2.replace(pattern[i]+':','') + for i in range(len(pattern)): + summary2=summary2.replace(pattern[i],'') + #retText2="""" + #tempPrompt=""Find some most highlighting points in the following article"" + #retText2=QueryToOpenAI(originalText,tempPrompt) + + #retText3="""" + #tempPrompt=""Find only one or two risk factors that are mentioned in the following article"" + #retText3=QueryToOpenAI(originalText,tempPrompt) + + #retText4="""" + #tempPrompt=""Find statistical informtation that are mentioned in the following article"" + #retText4=QueryToOpenAI(originalText,tempPrompt) + + #retText5="""" + #tempPrompt=""Find name of the author only one time that are mentioned in the following article"" + #retText5=QueryToOpenAI(originalText,tempPrompt) + + #retText6="""" + #tempPrompt=""Suggest the name of the title for the following article"" + #retText6=QueryToOpenAI(originalText,tempPrompt) + + t2=time.time() + #print(""\\n time taken-->"", t2-t1 ,""length of sum"",str(length)) + print(""\\n time taken-->"", t2-t1 ) + #print(""\\n summary from LLM-->\\n"",returnedText) + #context = {'title': retText6, 'summary': summary1, 'summary2': summary2, 'AuthorName': ""Author names :""+retText5,'BulletPoints': retText2,'Riskfactor': retText3,'StatInfo': retText4} + + context = {'title': """", 'summary': summary1, 'summary2': summary2, 'AuthorName': """",'BulletPoints': """",'Riskfactor': """",'StatInfo': """"} + return HttpResponse(json.dumps(context), content_type=""application/json"") + except: + context = {'returnedText': ""exception""} + return HttpResponse(json.dumps(context), content_type=""application/json"") + +def azureOpenAiDavinci(request): + key,url,api_type,api_version=get_llm_data() + inputDataType = str(request.POST.get('FileType')) + if inputDataType == 'file': + Datapath = request.FILES['file'] + #dataPath = str(request.GET.get('dataPath')) + ext = str(Datapath).split('.')[-1] + temp1=str(Datapath).split('.') + filetimestamp = str(int(time.time())) + if ext.lower() in ['pdf','txt','docx']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' +temp1[0]+'_'+filetimestamp+'.'+ext) + #dataFile = os.path.join(DATA_FILE_PATH,'AION_' +filetimestamp+'.'+ext) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + dataPath = dataFile + if dataPath.endswith("".pdf""): + from appbe.dataIngestion import pdf2text + originalText=pdf2text(dataPath) + if dataPath.endswith("".txt""): + data=[] + with open(dataPath, ""r"",encoding=""utf-8"") as f: + data.append(f.read()) + str1 = """" + for ele in data: + str1 += ele + originalText=str1 + if dataPath.endswith("".docx""): + import docx + doc = docx.Document(dataPath) + fullText = [] + for para in doc.paragraphs: + fullText.append(para.text) + fullText= '\\n'.join(fullText) + originalText=fullText + if inputDataType == 'rawText': + originalText = str(request.POST.get('textDataProcessing')) + dataPath="""" + doctype = str(request.POST.get('doctypeUserProvided')) + if originalText== ""None"" or originalText== """": + context = {'originalText': originalText,'returnedText': ""No Input given""} + print(""returned due to None"") + return render(request, ""textsummarization.html"",context) + + length=len(originalText.split()) + inputTextPromptForKeyWords=""Create a list of keywords to summrizing the following document."" + inputTextPromptForKeyWords=""Suggest only ten most important keywords from the following document."" + inputTextPromptForContext=""Suggest ten most important context in the following article. "" + #inputTextPromptForDocType=""Suggest on which domain or field or area the following article is or the article is on sports or politics or medical or music or technology or legal field. "" + try: + tempPrompt=inputTextPromptForKeyWords + retText=QueryToOpenAI(originalText,tempPrompt) + KeyWords=retText + tempPrompt=inputTextPromptForContext + retText=QueryToOpenAI(originalText,tempPrompt) + contextOfText=retText + #tempPrompt=inputTextPromptForDocType + #retText=QueryToOpenAI(originalText,tempPrompt) + #doctype=retText + context = {'originalText': originalText,'KeyWords': KeyWords,'contextOfText': contextOfText,'doctype': doctype,'dataPath' :dataPath} + return HttpResponse(json.dumps(context), content_type=""application/json"") + except Exception as e: + print(e) + context = {'originalText': originalText,'KeyWords': KeyWords,'contextOfText': contextOfText,'doctype': doctype,'dataPath' :dataPath} + return HttpResponse(json.dumps(context), content_type=""application/json"") + +# Text Data Labelling using LLM related changes +# -------------------------------------------------------- +def uploadedTextData(request): + from appbe.dataIngestion import ingestTextData + context = ingestTextData(request,DATA_FILE_PATH) + context['version'] = AION_VERSION + return render(request, 'textdatalabelling.html', context) + +def getTextLabel(request): + from appbe.llm_textdatalabelling import generateTextLabel + context = generateTextLabel(request,DATA_FILE_PATH) + context['version'] = AION_VERSION + return render(request, 'textdatalabelling.html', context) + +def downloadTextLabelReport(request): + file_path = request.session['texttopicdatapath'] + if os.path.exists(file_path): + with open(file_path, 'rb') as fh: + response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path) + return response + raise Http404 + + + +# QnA Generator using LLM related changes +# -------------------------------------------------------- +def genearateQA(request): + from appbe.llm_generateQnA import ingestDataForQA + context = ingestDataForQA(request,DATA_FILE_PATH) + context['version'] = AION_VERSION + context['selected'] = ""llm_features"" + return render(request, 'QnA.html', context) + + +def downloadQnAReport(request): + file_path = request.session['QnAfilepath'] + if os.path.exists(file_path): + with open(file_path, 'rb') as fh: + response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') + response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path) + return response + raise Http404 +# -------------------------------------------------------- from django.apps import AppConfig + + +class ModelTrainingConfig(AppConfig): + name = 'appfe.modelTraining' + from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import json +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe.pages import getusercasestatus +import pandas as pd +import numpy as np +from appbe.pages import getversion +import logging +import json +import time +import os +from appbe import compute +AION_VERSION = getversion() +def sensitivityAnalysis(request): #usnish + from appbe.pages import usecases_page + t1 = time.time() + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + log = logging.getLogger('log_ux') + computeinfrastructure = compute.readComputeConfig() + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + + try: + from trusted_ai.sensitivity_analysis import startSA + + + # request.session['deploypath'] = str(p.DeployPath) + sensitivitystr= startSA(request) + sensitivitystr = json.loads(sensitivitystr) + + ModelStatus = request.session['ModelStatus'] + if sensitivitystr['Status']=='Success': + sensitivityGraph = sensitivitystr['graph'] + + + t2 = time.time() + log.info('Sensitivity Analysis : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + return HttpResponse(json.dumps(sensitivitystr)) + else: + error = sensitivitystr['reason'] + raise Exception(error) + except Exception as e: + print(e) + log.info('Sensitivity Analysis : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to Perform Sensitivity Analysis, ' + str(e)) + outputstr = json.dumps({'Status':'','msg':'Failed to Perform Sensitivity Analysis. '+str(e)}) + return HttpResponse(outputstr) + +def handlefairness(request): + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + updatedConfigFile = request.session['config_json'] + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + problemType = 'classification' + for key in configSettings['basic']['analysisType']: + if configSettings['basic']['analysisType'][key] == 'True': + problemType = key + break + trainingfeature = configSettings['basic']['trainingFeatures'] + targetfeature = configSettings['basic']['targetFeature'] + featuretype = configSettings['advance']['profiler']['featureDict'] + catfeature = [] + for feat_conf in featuretype: + colm = feat_conf.get('feature', '') + if feat_conf['type'] == ""c" +"ategorical"": + catfeature.append(feat_conf['feature']) + output={'targetfeature':targetfeature,'trainingfeature':trainingfeature,'catfeature':catfeature,'problemType':problemType} + return HttpResponse(json.dumps(output)) + +def fairnesmetrics(request): #Richard--Task-13581 + from appbe.pages import usecases_page + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + t1 = time.time() + log = logging.getLogger('log_ux') + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + try: + from trusted_ai.fairness_metrics import get_metrics + output = get_metrics(request) + t2 = time.time() + log.info('Fairness Metrics : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + return HttpResponse(output) + except Exception as e: + print(e) + log.info('Fairness Metrics : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Fairness Metrics, ' + str(e)) + return HttpResponse('') + +def performance_metrics(request): + from appbe.pages import usecases_page + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + t1 = time.time() + log = logging.getLogger('log_ux') + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + try: + from trusted_ai.performance import get_metrics + output = get_metrics(request) + t2 = time.time() + log.info('Performance Metrics : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + print( output) + return HttpResponse(json.dumps(output)) + except Exception as e: + print(e) + log.info('Performance Metrics : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Performance Metrics, ' + str(e)) + return HttpResponse('') + +def uquncertainty(request): + from trusted_ai.trustedai_uq import trustedai_uq + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + output = trustedai_uq(request) + return HttpResponse(output) + +def uqtransparency(request): + t1 = time.time() + log = logging.getLogger('log_ux') + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes') + selected_use_case = request.session['UseCaseName'] + model_version = request.session['ModelVersion'] + try: + deploypath = request.session['deploypath'] + configpath = os.path.join(deploypath,'etc','display.json') + f = open(configpath, ""r"") + configSettings = f.read() + f.close() + configSettings = json.loads(configSettings) + problemType = configSettings['problemType'] + model_Features = configSettings['modelFeatures'] + if problemType.lower() == 'classification': + from trusted_ai.brier_score import get_brier_score + problem_type, brier_score = get_brier_score(request) + display_dict = {""ProblemType"":problem_type.title(),""BrierScore"":round(brier_score, 2),'model_Features':model_Features,'problemTypeuq':problemType} + else: + display_dict = {""ProblemType"":problemType,""BrierScore"":'','model_Features':model_Features,'problemTypeuq':problemType} + display_json = json.dumps(display_dict) + t2 = time.time() + log.info('Brier Score : ' + str(selected_use_case) + ' : ' + str( + model_version) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + return HttpResponse(display_json, content_type=""application/json"") + except Exception as e: + print(e) + log.info('Brier Score : ' + str(selected_use_case) + ' : ' + str( + model_version) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Brier Score, ' + str(e)) + return HttpResponse('') from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +from appbe.pages import getusercasestatus +from appbe.pages import getversion +AION_VERSION = getversion() +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +import os +from django.db.models import Max, F +import pandas as pd +from appbe.publish import check_input_data +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +import json +from appbe import compute + +import logging +def get_instance_id(modelID): + from appbe.sqliteUtility import sqlite_db + from appbe.dataPath import DATA_DIR + file_path = os.path.join(DATA_DIR,'sqlite') + sqlite_obj = sqlite_db(file_path,'config.db') + if sqlite_obj.table_exists(""LLMTuning""): + data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) + print(data) + if len(data) > 0: + return (data[3]+' instance '+data[2]) + else: + return 'Instance ID not available' + else: + return 'Instance ID not available' + +def PredictForSingleInstance(request): + from appbe.trainresult import ParseResults + submittype = request.POST.get('predictsubmit') + from appbe.prediction import singleInstancePredict + context = singleInstancePredict(request,Existusecases,usecasedetails) + if submittype.lower() == 'predict': + from appbe.train_output import get_train_model_details + trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request) + imagedf = '' + + model_count = Existusecases.objects.filter(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'],Status='SUCCESS').count() + model = Existusecases.objects.get(ModelName=request.session['ModelName'], + Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"", encoding=""utf-8"") + training_output = f.read() + f.close() + + result,survical_images = ParseResults(training_output) + context.update({'result':result}) + context['version'] = AION_VERSION + context['modelType'] = modelType + context['bestmodel'] = bestmodel + return render(request, 'prediction.html', context) + else: + context['version'] = AION_VERSION + return context +def getTrainingStatus(request): + model = Existusecases.objects.get(ModelName=request.session['ModelName'],Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"", encoding=""utf-8"") + training_output = f.read() + f.close() + from appbe.trainresult import FeaturesUsedForTraining + return FeaturesUsedForTraining(training_output) +def Prediction(request): + log = logging.getLogger('log_ux') + from appbe.trainresult import ParseResults + from appbe.dataIngestion import delimitedsetting + from appbe import service_url + from appbe.aion_config import settings + usecasetab = settings() + try: + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + computeinfrastructure = compute.readComputeConfig() + #print(computeinfrastructure) + if ModelStatus != 'SUCCESS': + log.info('Prediction:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first or launch an existing trained model') + return render(request, 'prediction.html', { + 'error': 'Please train the model first or launch an existing trained model', + 'selected': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION}) + else: + if 'ModelVersion' not in request.session: + log.info('Prediction:' + str(selected_use_case) + ':' + str( + ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first') + return render(request, 'prediction.html', + {'usecasetab':usecasetab,'error': 'Please train the model first', 'selected': 'prediction','version':AION_VERSION}) + elif request.session['ModelVersion'] == 0: + log.info('Prediction:' + str(selected_use_case) + ':' + str( + ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first') + return render(request,'prediction.html',{'usecasetab':usecasetab,'error':'Please train the model first','selected':'prediction','version':AION_VERSION}) + else: + from appbe.train_output import get_train_model_details + trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request) + imagedf = '' + + model_count = Existusecases.objects.filter(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'],Status='SUCCESS').count() + model = Existusecases.objects.get(ModelName=request.session['ModelName'], + Version=request.session['ModelVersion']) + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"") + training_output = f.read() + f.close() + + + result,survical_images = ParseResults(training_output) + if model_count >= 1: + updatedConfigFile = request.session['config_json'] + #print(updatedConfigFile) + f = open(updatedConfigFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + analysisType = configSettingsJson['basic']['analysisType'] + problem_type = """" + for k in analysisType.keys(): + if configSettingsJson['basic']['analysisType'][k] == 'True': + problem_type = k + break + if problem_type.lower() == 'recommendersystem': + modelName = """" + recommender_models = configSettingsJson['basic']['algorithms']['recommenderSystem'] + for k in recommender_models.keys(): + if configSettingsJson['basic']['algorithms']['recommenderSystem'][k] == 'True': + modelName = k + break + if modelName.lower() == 'associationrules-apriori': + return render(request, 'prediction.html', { + 'error': 'Prediction not supported for Association Rules (Apriori)', + 'selected': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION}) + delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier']) + #problemtypes = configSettingsJson['basic']['analysisType'] + #print(problemtypes.keys()) + from appfe.modelTraining.train_views import getMLModels + problem_type,dproblemtype,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson) + iterName = request.session['usecaseid'].replace("" "", ""_"") + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + if problem_type == 'timeSeriesForecasting': #task 11997 + inputFieldsDict = {'noofforecasts': 10} + elif problem_type == 'recommenderSystem' and mlmodels=='ItemRating': + inputFieldsDict = {""uid"": 1, ""numberOfRecommendation"":10} #Task 11190 + elif problem_type == 'stateTransition': + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + if inputFeatures != '': + inputFeaturesList = inputFeatures.split(',') + else: + inputFeaturesList = [] + inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'} + else: + inputFeatures = configSettingsJson['basic']['trainingFeatures'] + targetFeature = configSettingsJson['basic']['targetFeature'] + if inputFeatures != '': + inputFeaturesList = inputFeatures.split(',') + else: + inputFeaturesList = [] + if targetFeature in inputFeaturesList: + inputFeaturesList.remove(targetFeature) + + if configSettingsJson['basic']['contextFeature'] != '': + inputFeaturesList.append(configSettingsJson['basic']['contextFeature']) + if problem_type == 'llmFineTuning': + inputFeaturesList.append('Temperature') + inputFeaturesList.append('Max Tokens') + if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997 + if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na': + inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) + + dataFilePath = str(configSettingsJson['basic']['dataLocation']) + if problem_type != 'llmFineTuning': + if os.path.isfile(dataFilePath): + df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') + " +" try: + inputFieldsDict = df.to_dict(orient='index')[0] + except: + inputFieldsDict = pd.Series(0, index =inputFeaturesList).to_dict() + else: + inputFieldsDict = {""File"":""EnterFileContent""} + else: + inputFieldsDict = pd.Series('', index =inputFeaturesList).to_dict() + inputFieldsDict['Temperature'] = '0.1' + from appbe.prediction import get_instance + hypervisor,instanceid,region,image = get_instance(iterName+'_'+str(ModelVersion)) + if hypervisor.lower() == 'aws': + inputFieldsDict['Max Tokens'] = '1024' + else: + inputFieldsDict['Max Tokens'] = '4096' + inputFields = [] + inputFields.append(inputFieldsDict) + settings_url = '' + if problem_type == 'llmFineTuning': + ser_url = get_instance_id(iterName+'_'+str(ModelVersion)) + settings_url = '' + modelSize = '' + if 'modelSize' in configSettingsJson['basic']: + selectedModelSize = configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels] + for k in selectedModelSize.keys(): + if configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True': + modelSize = k + break + mlmodels = mlmodels+'-'+modelSize + elif problem_type == 'stateTransition': + ser_url = service_url.read_service_url_params(request) + settings_url = service_url.read_service_url_params(request) + ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+iterName+'&version='+str(ModelVersion) + settings_url = settings_url+'pattern_anomaly_settings?usecaseid='+iterName+'&version='+str(ModelVersion) + else: + ser_url = service_url.read_service_url_params(request) + ser_url = ser_url+'predict?usecaseid='+iterName+'&version='+str(ModelVersion) + onnx_runtime = False + analyticsTypes = problem_type + usecasename = request.session['usecaseid'].replace("" "", ""_"") + + return render(request, 'prediction.html', + {'inputFields': inputFields,'usecasename':usecasename,'mlmodels':mlmodels,'configSettingsJson':configSettingsJson,'result':result,'imagedf':imagedf, 'selected_use_case': selected_use_case,'ser_url':ser_url,'analyticsType':analyticsTypes,'settings_url':settings_url, + 'ModelStatus': ModelStatus,'onnx_edge':onnx_runtime,'ModelVersion': ModelVersion, 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'version':AION_VERSION,'modelType':modelType,'bestmodel':bestmodel,'usecasetab':usecasetab}) + else: + log.info('Prediction; Error: Please train the model first') + return render(request, 'prediction.html', + {'usecasetab':usecasetab,'error': 'Please train the model first', 'selected': 'prediction','version':AION_VERSION}) + except Exception as e: + print(e) + log.info('Prediction:' + str(selected_use_case) + ':' + str( + ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error:'+str(e)) + return render(request, 'prediction.html',{'usecasetab':usecasetab,'error': 'Failed to perform prediction', 'selected': 'prediction','version':AION_VERSION}) from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +from appbe.pages import getusercasestatus +from appbe.pages import getversion +AION_VERSION = getversion() +from appfe.modelTraining.models import usecasedetails +from appfe.modelTraining.models import Existusecases +import os +from django.db.models import Max, F +import pandas as pd +from appbe.publish import check_input_data +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe import installPackage +import json +from appbe import service_url +from appbe import compute +import sys +import csv +import time +from appbe.training import checkModelUnderTraining +import logging +def Distribution(request): + from appbe import exploratory_Analysis as ea + log = logging.getLogger('log_ux') + from appbe.aion_config import settings + usecasetab = settings() + computeinfrastructure = compute.readComputeConfig() + try: + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Drift','Yes') + t1 = time.time() + model = Existusecases.objects.get(ModelName=request.session['ModelName'], + Version=request.session['ModelVersion']) + + output_train_json_filename = str(model.TrainOuputLocation) + f = open(output_train_json_filename, ""r+"") + training_output = f.read() + f.close() + training_output = json.loads(training_output) + featuresused = training_output['data']['featuresused'] + feature = eval(featuresused) + dataFilePath = request.session['datalocation'] + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + ser_url = service_url.read_monitoring_service_url_params(request) + iterName = request.session['usecaseid'].replace("" "", ""_"") + ModelVersion = request.session['ModelVersion'] + ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion) + pser_url = service_url.read_performance_service_url_params(request) + pser_url = pser_url+'performanceusecaseid='+iterName+'&version='+str(ModelVersion) + if request.POST.get('inputdriftsubmit') == 'trainingdatadrift': + historicadata = request.session['datalocation'] + trainingdf = pd.read_csv(historicadata) + trainingDrift = ea.getDriftDistribution(feature, trainingdf) + newDataDrift = '' + concatDataDrift = '' + drift_msg = '' + driftdata = 'NA' + else: + historicadata = request.session['datalocation'] + trainingdf = pd.read_csv(historicadata) + trainingDrift = '' + type = request.POST.get(""optradio"") + if type == ""url"": + try: + url = request.POST.get('urlpathinput') + newdatadf = pd.read_csv(url) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv') + newdatadf.to_csv(dataFile, index=False) + + request.session['drift_datalocations']= dataFile + driftdata = request.session['drift_datalocations'] + except Exception as e: + request.session['currentstate'] = 0 + e = str(e) + if e.find(""tokenizing"")!=-1: + error = ""This is not an open source URL to access data"" + elif e.find(""connection"")!=-1: + error = ""Can not access the URL through HCL network, please try with other network"" + else: + error = 'Please provide a correct URL' + context = {'error': error,'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'emptycsv':'emptycsv','s3buckets': get_s3_bucket(),'gcsbuckets':get_gcs_bucket(), + 'kafkaSetting':'kafkaSetting','ruuningSetting':'ruuningSetting','usecasetab':usecasetab} + log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+error+', ' + e) + return render(request, 'upload.html', context) + else: + + if request.FILES: + Datapath = request.FILES['DataFilePath'] + from io import StringIO + content = StringIO(Datapath.read().decode('utf-8')) + reader = csv.reader(content) + df = pd.DataFrame(reader) + df.columns = df.iloc[0] + df = df[1:] + ext = str(Datapath).split('.')[-1] + filetimestamp = str(int(time.time())) + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) + else: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + if(os.path.isfile(dataFile) == False): + context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case, + ' ModelStatus': ModelStatus, 'ModelVersion': ModelVersion} + log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Data file does not exist') + return render(request, 'inputdrif.html', context) + request.session['drift_datalocations'] = dataFile + driftdata = request.session['drift_datalocations'] + newdatadf = pd.read_csv(driftdata) + + newDataDrift = ea.getDriftDistribution(feature, trainingdf, newdatadf) + condf = pd.concat([trainingdf, newdatadf], ignore_index=True, sort=True) + concatDataDrift = ea.getDriftDistribution(feature,trainingdf,condf) + drift_msg,htmlPath = Drift(request,historicadata, dataFile, feature) + + if htmlPath != 'NA': + file = open(htmlPath, ""r"",errors='ignore') + driftdata = file.read() + file.close() + else: + driftdata = 'NA' + t2 = time.time() + log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success') + + return render(request, 'inputdrif.html', + {'trainingDrift': trainingDrift, 'newDataDrift': newDataDrift, 'concatDataDrift': concatDataDrift,'usecasetab':usecasetab, + 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'version' :AION_VERSION, + 'selected': 'monitoring', 'drift_msg': drift_msg,'htmlPath':driftdata,'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure}) + + except Exception as inst: + print(inst) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + selected_use_case = request.session['UseCaseName'] + ModelVersion = request.session['ModelVersion'] + ModelStatus = request.session['ModelStatus'] + ser_url = service_url.read_monitoring_service_url_params(request) + iterName = request.session['usecaseid'].replace("" "", ""_"") + ModelVersion = request.session['ModelVersion'] + ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion) + pser_url = service_url.read_performance_service_url_params(request) + pser_url = pser_url+'performanceusecaseid='+iterName+'&version='+str(ModelVersion) + context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': 'Failed to perform drift analysis', 'selected_use_case': selected_use_case,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version' : AION_VERSION} + log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to do drift analysis'+', '+str(inst)) + log.info('Details : '+str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + return render(request, 'inputdrif.html', context) + +def Drift(request,trainingdatalocation, newdatalocation, features): + log = logging.getLogger('log_ux') + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + try: + inputFieldsJson = {""trainingDataLocation"":trainingdatalocation,""currentDataLocation"":newdatalocation} + inputFieldsJson = json.dumps(inputFieldsJson) + iterName = request.session['usecaseid'].replace("" "", ""_"") + ModelVersion = request.session['ModelVersion'] + ser_url = service_url.read_monitoring_service_url_params(request) + ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion) + import requests + try: + #print(inputFieldsJson) + #print(ser_url) + response = requests.post(ser_url,data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + if response.status_code != 200: + outputStr=response.content + return outputStr + except Exception as inst: + print(inst) + if 'Failed to establish a new connection' in str(inst): + Msg = 'AION Service needs to be started' + else: + Msg = 'Error during Drift Analysis' + log.info('Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + Msg+', '+str(inst)) + return Msg + outputStr=response.content + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + decoded_data = json.loads(outputStr) + #print(decoded_data) + htmlPath = 'NA' + if decoded_data['status'] == 'SUCCESS': + data = decoded_data['data'] +" +" htmlPath = decoded_data['htmlPath'] + if 'Message' in data: + Msg = [] + Msg.append(data['Message']) + else: + Msg = data['Affected Columns'] + log.info('Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Success') + else: + Msg = 'Error during Drift Analysis' + htmlPath = 'NA' + log.info('Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' +str(Msg)) + return Msg,htmlPath + except Exception as e: + print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + log.info('Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + str(e)) + log.info('Details : ' +str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + +def Evaluate(request): + from appbe.aion_config import settings + usecasetab = settings() + log = logging.getLogger('log_ux') + try: + from appbe.telemetry import UpdateTelemetry + UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Drift','Yes') + t1 = time.time() + selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) + computeinfrastructure = compute.readComputeConfig() + type = request.POST.get(""optradio"") + ser_url = service_url.read_monitoring_service_url_params(request) + iterName = request.session['usecaseid'].replace("" "", ""_"") + ModelVersion = request.session['ModelVersion'] + ser_url = ser_url+'monitoring?usecaseid='+iterName+'_'+str(ModelVersion) + pser_url = service_url.read_performance_service_url_params(request) + pser_url = pser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion) + if type == ""url"": + try: + url = request.POST.get('urlpathinput') + newdatadf = pd.read_csv(url) + filetimestamp = str(int(time.time())) + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv') + newdatadf.to_csv(dataFile, index=False) + except Exception as e: + request.session['currentstate'] = 0 + e = str(e) + if e.find(""tokenizing"")!=-1: + error = ""This is not an open source URL to access data"" + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+error+', '+str(e)) + elif e.find(""connection"")!=-1: + error = ""Can not access the URL through HCL network, please try with other network"" + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + error +', '+e) + else: + error = 'Please provide a correct URL' + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:' + error+', '+e) + context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': error,'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'emptycsv':'emptycsv','kafkaSetting':'kafkaSetting','ruuningSetting':'ruuningSetting','usecasetab':usecasetab,'version':AION_VERSION} + return render(request, 'upload.html', context) + else: + if request.FILES: + Datapath = request.FILES['DataFilePath'] + from io import StringIO + content = StringIO(Datapath.read().decode('utf-8')) + reader = csv.reader(content) + df = pd.DataFrame(reader) + df.columns = df.iloc[0] + df = df[1:] + + ext = str(Datapath).split('.')[-1] + filetimestamp = str(int(time.time())) + if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) + else: + dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) + with open(dataFile, 'wb+') as destination: + for chunk in Datapath.chunks(): + destination.write(chunk) + destination.close() + if(os.path.isfile(dataFile) == False): + context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'version':AION_VERSION} + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + ' Error : Data file does not exist') + return render(request, 'inputdrif.html', context) + trainingdatalocation = request.session['datalocation'] + inputFieldsJson = {""trainingDataLocation"":trainingdatalocation,""currentDataLocation"":dataFile} + inputFieldsJson = json.dumps(inputFieldsJson) + import requests + try: + #response = requests.post(pser_url,auth=(aion_service_username,aion_service_password),data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + response = requests.post(pser_url,data=inputFieldsJson,headers={""Content-Type"":""application/json"",}) + if response.status_code != 200: + outputStr=response.content + log.info('Performance Drift:' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error: Status code != 200') + return outputStr + except Exception as inst: + if 'Failed to establish a new connection' in str(inst): + Msg = 'AION Service needs to be started' + else: + Msg = 'Error during Drift Analysis' + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' +'0 ' + 'sec' + ' : ' + 'Error : '+Msg+', ' + str(inst)) + return Msg + outputStr=response.content + + outputStr = outputStr.decode('utf-8') + outputStr = outputStr.strip() + + decoded_data = json.loads(outputStr) + + + #print(decoded_data) + if decoded_data['status'] == 'SUCCESS': + htmlPath = decoded_data['htmlPath'] + #print(htmlPath) + if htmlPath != 'NA': + file = open(htmlPath, ""r"",errors='ignore') + driftdata = file.read() + file.close() + else: + driftdata = 'NA' + print(htmlPath) + context = {'status':'SUCCESS','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'htmlPath': driftdata,'selected_use_case': selected_use_case,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + t2 = time.time() + log.info('Performance Drift:' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + str(round(t2-t1)) + 'sec' + ' : ' + 'Success') + return render(request, 'inputdrif.html', context=context) + + else: + driftdata = 'Error' + context = {'status':'ERROR','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'htmlPath': driftdata,'selected_use_case': selected_use_case,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : driftdata = Error') + return render(request, 'inputdrif.html', context=context) + except Exception as e: + print(e) + context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': 'Fail to perform Drift Analysis', 'selected_use_case': selected_use_case,'usecasetab':usecasetab, + 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION} + log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str( + ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Fail to perform Drift Analysis' + ', ' + str(e)) + return render(request, 'inputdrif.html', context=context) from django.shortcuts import render +from django.urls import reverse +from django.http import HttpResponse +from django.shortcuts import redirect +import json +from appbe.dataPath import DEFAULT_FILE_PATH +from appbe.dataPath import DATA_FILE_PATH +from appbe.dataPath import CONFIG_FILE_PATH +from appbe.dataPath import DEPLOY_LOCATION +from appbe.pages import getusercasestatus +import pandas as pd +import numpy as np +from appbe.pages import getversion +import logging +import json +import time +import os +import subprocess +import sys +import base64 +from appbe import compute +import urllib + +AION_VERSION = getversion() + +def Sagemaker(request): + + if request.method == ""POST"": + try: + datafile = request.POST['datap'] + endpoint = request.POST['endpoint'] + awsaccountid = request.POST['awsaccountid'] + accesskeyid = request.POST['accesskeyid'] + secretaccesskey = request.POST['secretaccesskey'] + sessionToken = request.POST['sessionToken'] + region = request.POST['region'] + if (awsaccountid != """" and accesskeyid != """" and secretaccesskey != """" and sessionToken != """" and endpoint != """") : + awsSagemaker = {} + awsSagemaker['awsID'] = awsaccountid + awsSagemaker['accesskeyID'] = request.POST['accesskeyid'] + awsSagemaker['secretAccesskey'] = request.POST['secretaccesskey'] + awsSagemaker['sessionToken'] = request.POST['sessionToken'] + awsSagemaker['region'] = request.POST['region'] + configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + configSettingsJson['awsSagemaker'] = awsSagemaker + if(os.path.exists(datafile)): + inputDataType = datafile.rsplit('.', 1)[1] + if inputDataType.lower() == 'csv': + df = pd.read_csv(datafile) + # df1 = df.iloc[0, :] + df2 = df.head(1) + df3 =df2.to_dict(orient='records')[0] + inputFields = [] + inputFields.append(df3) + # models = models.rsplit('.', 1)[1] + context = {'sagepredict':'sagepredict','endpoint':endpoint,'datafile':datafile,'inputFields':inputFields,'sagemaker':configSettingsJson,'version':AION_VERSION} + else: + context = {'exception':'exception','error':'Data File Error','version':AION_VERSION} + else: + context = {'error': 'Error: Please enter valid input','runtimeerror':'runtimeerror','version':AION_VERSION} + except Exception as e: + context = {'exception':'exception','error':'Exception :'+str(e),'sagepredict':'sagepredict','version':AION_VERSION} + return render(request, 'ConvertOnnx.html',context) + +def Tfliteneural(request): + try: + if request.method == ""POST"": + try: + models = request.POST['model1'] + datafile = request.POST['datafile1'] + if(os.path.isfile(models)): + modelformat = models.rsplit('.', 1)[1] + if(os.path.isfile(models) and os.path.exists(datafile) and modelformat.lower()=='tflite'): + inputDataType = datafile.rsplit('.', 1)[1] + if inputDataType.lower() == 'csv': + df = pd.read_csv(datafile) + df2 = df.head(1) + df3 =df2.to_dict(orient='records')[0] + inputFields = [] + inputFields.append(df3) + context = {'mlalgotf':'mlalgotf','models':models,'datafile':datafile,'inputFields':inputFields,'selected':'mllite','version':AION_VERSION} + elif inputDataType.lower() == 'jpg': + from PIL import Image + img = Image.open(datafile) + string = base64.b64encode(open(datafile, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + context = {'dlalgotf':'dlalgotf','models':models,'datafile':datafile,'im':image_64,'selected':'mllite','version':AION_VERSION} + else: + context={'error':'Either model path or data path does not exists','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION} + except Exception as e: + context={'error':'Exception i.e., '+str(e),'runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION} + return render(request, 'ConvertOnnx.html',context) + except: + context={'error':'Failed to perform TFlite Runtime Prediction','runtimeerror':'runtimeerror','selected':'mllite'} + return render(request, 'ConvertOnnx.html',context) + +def openneural(request): + try: + + if request.method == ""POST"": + models = request.POST['model'] + datafile = request." +"POST['datafile'] + if(os.path.isfile(models)): + modelformat = models.rsplit('.', 1)[1] + if(os.path.isfile(models) and os.path.exists(datafile)) and modelformat.lower()=='onnx': + inputDataType = datafile.rsplit('.', 1)[1] + + if inputDataType.lower() == 'csv': + df = pd.read_csv(datafile) + df2 = df.head(1) + df3 =df2.to_dict(orient='records')[0] + inputFields = [] + inputFields.append(df3) + # models = models.rsplit('.', 1)[1] + context = {'mlalgo':'mlalgo','models':models,'datafile':datafile,'selected':'mllite','inputFields':inputFields,'version':AION_VERSION} + elif inputDataType.lower() == 'jpg': + + from PIL import Image + img = Image.open(datafile) + string = base64.b64encode(open(datafile, ""rb"").read()) + image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) + + context = {'dlalgo':'dlalgo','models':models,'datafile':datafile,'im':image_64,'selected':'mllite','version':AION_VERSION} + else: + context={'error':'Either model path or data path does not exists','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION} + return render(request, 'ConvertOnnx.html',context) + except: + context={'error':'Failed to perform ONNX Runtime Prediction','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION} + return render(request, 'ConvertOnnx.html',context) + +def ConvertOnnx(request): + try: + if request.method == ""POST"": + + modelpath = request.POST['models'] + deploypath = request.POST['deploy'] + outputonnx = request.POST['outputonnx'] + inputtonnx = request.POST['inputtonnx'] + outputonnx = request.POST['outputonnx'] + Features = request.POST['Features'] + modelinput = inputtonnx + modeloutput = outputonnx + + if (os.path.exists(modelpath) == False) and (outputonnx !=""sagemaker"") and (os.path.exists(deploypath) == False): + context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'error2':'error2','convert':'convert','logfile':'','selected':'mllite','version':AION_VERSION} + elif outputonnx !=""sagemaker"": + + filetimestamp = str(int(time.time())) + + convjson = os.path.join(DEFAULT_FILE_PATH, 'conversion.json') + with open(convjson, 'r+') as f: + conv = json.load(f) + f.close() + conv['basic']['modelName'] = 'conversion_'+ str(filetimestamp) + conv['basic']['modelVersion'] = ""1"" + conv['advance']['aionConversionUtility']['modelpath'] = modelpath + conv['advance']['aionConversionUtility']['deployedlocation'] = deploypath + conv['advance']['aionConversionUtility']['numberoffeatures'] = Features + temp = {} + temp['inputModelType'] = inputtonnx + temp['outputModelType'] = outputonnx + inputtype = conv['advance']['aionConversionUtility']['inputModelType'] + outputtype = conv['advance']['aionConversionUtility']['outputModelType'] + for i in list(inputtype.keys()): + conv['advance']['aionConversionUtility']['inputModelType'][i] = 'False' + for i in list(outputtype.keys()): + conv['advance']['aionConversionUtility']['outputModelType'][i] = 'False' + + conv['advance']['aionConversionUtility']['inputModelType'][temp['inputModelType'][0].lower() + temp['inputModelType'][1:]] = 'True' + conv['advance']['aionConversionUtility']['outputModelType'][temp['outputModelType'][0].lower() + temp['outputModelType'][1:]] = 'True' + + + conv = json.dumps(conv) + config_json_filename = os.path.join(CONFIG_FILE_PATH, 'conv' + filetimestamp + '.json') + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(conv) + fpWrite.close() + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py')) + try: + outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','convertmodel','-c',config_json_filename]) + outputStr = outputStr.decode('utf-8') + outputStr= outputStr.replace('\\'','\\""') + #print('ou',outputStr) + outputStr = outputStr.strip() + MLlite = json.loads(outputStr) + logsfile = MLlite['logfiles'] + if MLlite['Convert'] == 'Success': + context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'convert1':'convert1','convert':'convert','logfile':MLlite['logfiles'],'selected':'mllite','version':AION_VERSION} + else: + logfile = logsfile.replace('\\\\','@') + context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'error1':'error1','convert':'convert','logfile':logfile,'selected':'mllite','version':AION_VERSION} + except Exception as e: + print(e) + context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'Notconvert':'Notconvert','convert':'convert','version':AION_VERSION} + elif ( outputonnx ==""sagemaker"") : + configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json') + #print(configFile) + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + configSettingsJson['modelInput'] = request.POST.get('ModelInput') + #print('pushonly:',request.POST.get('sagemaker')) + if request.POST.get('sagemaker') == 'CreateDeploy': + configSettingsJson['sagemakerDeploy'] = 'True' + configSettingsJson['deployExistingModel']['status'] = 'False' + else: + configSettingsJson['sagemakerDeploy'] = 'False' + if request.POST.get('sagemaker') == 'DeployOnly': + configSettingsJson['deployExistingModel']['status'] = 'True' + else: + configSettingsJson['deployExistingModel']['status'] = 'False' + + #configSettingsJson['deployExistingModel']['status'] = request.POST.get('Status') + configSettingsJson['deployExistingModel']['dockerImageName'] = request.POST.get('imagename') + configSettingsJson['deployExistingModel']['deployModeluri'] = request.POST.get('deploymodel') + configSettingsJson['modelOutput']['cloudInfrastructure'] = request.POST.get('problemname') + configSettingsJson['endpointName'] = request.POST.get('endpointname') + + configSettingsJson['awsSagemaker']['awsID'] = request.POST.get('awskeyid1') + configSettingsJson['awsSagemaker']['accesskeyID'] = request.POST.get('accesskey1') + configSettingsJson['awsSagemaker']['secretAccesskey'] = request.POST.get('secretaccess1') + configSettingsJson['awsSagemaker']['sessionToken'] = request.POST.get('token1') + configSettingsJson['awsSagemaker']['region'] = request.POST.get('region1') + configSettingsJson['awsSagemaker']['IAMSagemakerRoleArn'] = request.POST.get('fullaccess') + + conv = json.dumps(configSettingsJson) + ''' + filetimestamp = str(int(time.time())) + config_json_filename = os.path.join(CONFIG_FILE_PATH, 'Sagemaker' + filetimestamp + '.json') + + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(conv) + fpWrite.close() + ''' + from bin.aion_sagemaker import aion_sagemaker + aion_sagemaker(configSettingsJson) + #print(conv) + #scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_sagemaker.py')) + + #outputStr = subprocess.check_output([sys.executable, scriptPath, conv]) + #outputStr = outputStr.decode('utf-8') + #outputStr=outputStr.strip() + #print('kir',outputStr) + + context = {'convert':'convert','sagemaker1':'sagemaker1','mlflow':'mlflow','inputtype':modelinput,'outputtype':modeloutput,'deploy':outputStr,'selected':'mllite','version':AION_VERSION} + + else: + context={'exception':'exception','error':'Please Enter Valid Inputs','selected':'mllite','version':AION_VERSION} + except Exception as e: + print(e) + context={'exception':'exception','error':'Error during Conversion','selected':'mllite','version':AION_VERSION} + return render(request, 'ConvertOnnx.html',context) + +def sageprediction(request): + #print(""=========asdecdefefefefefefefef======="") + values = request.POST['value'] + keys = request.POST['keys'] + endpoint = request.POST['endpointname'] + x = keys.split("","") + y = values.split("","") + dictionary = {key:value for key, value in zip(x,y)} + awsSagemaker={} + awsSagemaker['awsID'] = request.POST['awsaccountid'] + awsSagemaker['accesskeyID'] = request.POST['accesskeyid'] + awsSagemaker['secretAccesskey'] = request.POST['secretaccesskey'] + awsSagemaker['sessionToken'] = request.POST['sessionToken'] + awsSagemaker['region'] = request.POST['region'] + configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json') + f = open(configFile, ""r"") + configSettings = f.read() + f.close() + configSettingsJson = json.loads(configSettings) + awsSagemaker['IAMSagemakerRoleArn'] = configSettingsJson['awsSagemaker']['IAMSagemakerRoleArn'] + configSettingsJson['awsSagemaker'] = awsSagemaker + configSettingsJson['data'] = dictionary + configSettingsJson['endpointName'] = endpoint + configSettingsJson['prediction']['status'] = 'True' + conv = json.dumps(configSettingsJson) + scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_sagemaker.py')) + outputStr = subprocess.check_output([sys.executable, scriptPath, conv]) + outputStr = outputStr.decode('utf-8') + outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) + outputStr=outputStr.strip() + output = json.loads(outputStr) + if output['status'] == 'SUCCESS': + outputStr = output['data'] + outputStr = pd.json_normalize(outputStr) + outputStr = outputStr.to_html() + else: + outputStr = output['msg'] + return HttpResponse(outputStr) + +def runtimeutility(request): + if request.method == ""POST"": + models = request.POST['model'] + datafile = request.POST['datafile'] + inputDataType = datafile.rsplit('.', 1)[1] + + + if inputDataType.lower() == 'csv': + values = request.POST['value'] + keys = request.POST['keys'] + x = keys.split("","") + y = values.split("","") + + dictionary = {key:value for key, value in zip(x,y)} + jsondata = json.dumps(dictionary, indent = 4) + #print(jsondata) + + config_json_filename = os.path.join(DEFAULT_FILE_PATH, 'runtime.json') + #print(config_json_filename) + with open(config_json_filename, ""w"") as fpWrite: + fpWrite.write(jsondata) + fpWrite.close() + + from conversions.runtime_utility import runTimeTesting + #scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','conversions', 'runtime_utility.py')) + config_json_file = os.path.join(DEFAULT_FILE_PATH, 'runtime.json') + #outputStr = subprocess.check_output([sys.executable, scriptPath, models, config_json_file]) + #outputStr = outputStr.decode('utf-8') + outputStr=runTimeTesting(models,config_json_file) + # context = {'outputStr':outputStr,'modeltype':modeltype} + else: + from conversions.runtime_utility import runTimeTesting + outputStr=runTimeTesting(models,datafile) + + return HttpResponse(outputStr) # Generated by Django 3.2.8 on 2023-03-29 05:41 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0008_existusecases_publishtask'), + ] + + operations = [ + migrations.RemoveField( + model_name='existusecases', + name='publishtask', + ), + migrations.AddField( + model_name='existusecases', + name='publishPID', + field=models.IntegerField(default=0), + ), + migrations.AlterField( + model_name='existusecases', + name='Version', + field=models.IntegerField(default=0), + ), + migrations.AlterField( + model_name='existusecases', + name='portNo', + field=models.IntegerField(default=0), + ), + ] + # Generated by Django 3.0.8 on 2020-08-03 12:50 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='existusecases', + name='ModelName', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modelTraining.usecasedetails'), + ), + migrations.AlterField( + model_name='existusecases', + name='id', + field=models.AutoField(primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='" +"usecasedetails', + name='Description', + field=models.CharField(max_length=200), + ), + migrations.AlterField( + model_name='usecasedetails', + name='UsecaseName', + field=models.CharField(max_length=50), + ), + ] + # Generated by Django 3.0.8 on 2020-08-01 17:33 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Existusecases', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('ModelName', models.CharField(max_length=200)), + ('Version', models.IntegerField()), + ('DataFilePath', models.FileField(upload_to=None)), + ('ConfigPath', models.FileField(upload_to=None)), + ('DeployPath', models.FileField(upload_to=None)), + ('Status', models.CharField(max_length=200)), + ], + options={ + 'db_table': 'Existusecases', + }, + ), + migrations.CreateModel( + name='usecasedetails', + fields=[ + ('id', models.AutoField(primary_key=True, serialize=False)), + ('UsecaseName', models.CharField(max_length=20)), + ('Description', models.CharField(max_length=100)), + ], + options={ + 'db_table': 'usecasedetails', + }, + ), + ] + # Generated by Django 3.2.8 on 2023-03-28 18:23 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0006_auto_20230206_1759'), + ] + + operations = [ + migrations.AddField( + model_name='existusecases', + name='driftStatus', + field=models.CharField(default='', max_length=20), + ), + migrations.AddField( + model_name='existusecases', + name='portNo', + field=models.CharField(default='', max_length=5), + ), + migrations.AddField( + model_name='existusecases', + name='publishStatus', + field=models.CharField(default='', max_length=20), + ), + migrations.AlterField( + model_name='existusecases', + name='ProblemType', + field=models.CharField(default='', max_length=20), + ), + ] + # Generated by Django 3.2.8 on 2023-02-06 17:14 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0004_existusecases_problemtype'), + ] + + operations = [ + migrations.AddField( + model_name='usecasedetails', + name='UserDefinedName', + field=models.CharField(default=models.CharField(max_length=50), max_length=50), + ), + ] + # Generated by Django 3.2.8 on 2023-02-06 17:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0005_usecasedetails_userdefinedname'), + ] + + operations = [ + migrations.RemoveField( + model_name='usecasedetails', + name='UserDefinedName', + ), + migrations.AddField( + model_name='usecasedetails', + name='usecaseid', + field=models.CharField(default=models.CharField(max_length=50), max_length=10), + ), + ] + # Generated by Django 3.2.8 on 2023-03-29 18:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0009_auto_20230329_0541'), + ] + + operations = [ + migrations.AddField( + model_name='existusecases', + name='modelType', + field=models.CharField(default='', max_length=40), + ), + ] + # Generated by Django 3.2.8 on 2022-10-28 09:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0003_existusecases_trainouputlocation'), + ] + + operations = [ + migrations.AddField( + model_name='existusecases', + name='ProblemType', + field=models.CharField(default='', max_length=100), + ), + ] + # Generated by Django 3.0.8 on 2020-09-18 12:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0002_auto_20200803_1820'), + ] + + operations = [ + migrations.AddField( + model_name='existusecases', + name='TrainOuputLocation', + field=models.CharField(default='', max_length=200), + ), + ] + # Generated by Django 3.2.8 on 2023-03-28 18:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0007_auto_20230328_1823'), + ] + + operations = [ + migrations.AddField( + model_name='existusecases', + name='publishtask', + field=models.CharField(default='', max_length=500), + ), + ] + # Generated by Django 4.1.7 on 2023-05-17 10:46 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('modelTraining', '0010_existusecases_modeltype'), + ] + + operations = [ + migrations.AddField( + model_name='existusecases', + name='trainingPID', + field=models.IntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='existusecases', + name='ProblemType', + field=models.CharField(blank=True, max_length=20, null=True), + ), + migrations.AlterField( + model_name='existusecases', + name='TrainOuputLocation', + field=models.CharField(blank=True, max_length=200, null=True), + ), + migrations.AlterField( + model_name='existusecases', + name='driftStatus', + field=models.CharField(blank=True, max_length=20, null=True), + ), + migrations.AlterField( + model_name='existusecases', + name='modelType', + field=models.CharField(blank=True, max_length=40, null=True), + ), + migrations.AlterField( + model_name='existusecases', + name='portNo', + field=models.IntegerField(blank=True, null=True), + ), + migrations.AlterField( + model_name='existusecases', + name='publishPID', + field=models.IntegerField(blank=True, null=True), + ), + ] + from django.contrib.staticfiles.management.commands.runserver import Command as RunServer + +class Command(RunServer): + + def check(self, *args, **kwargs): + self.stdout.write(self.style.WARNING(""SKIPPING SYSTEM CHECKS!\\n"")) + + def check_migrations(self, *args, **kwargs): + self.stdout.write(self.style.WARNING(""SKIPPING MIGRATION CHECKS!\\n"")) from django import forms +from modelTraining.models import usecasedetails +import os +class usecasedetailsForm(forms.ModelForm): + class Meta: + model = usecasedetails + fields = ""__all__"" +from modelTraining.models import Existusecases +class ExistusecasesForm(forms.ModelForm): + class Meta: + model = Existusecases + fields = ""__all__"" """""" +Django settings for mpgWebApp project. + +Generated by 'django-admin startproject' using Django 3.0.3. + +For more information on this file, see +https://docs.djangoproject.com/en/3.0/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.0/ref/settings/ +"""""" + +import os +from os.path import expanduser +import platform +from appbe.dataPath import DATA_DIR +#from cloghandler import ConcurrentRotatingFileHandler +sql_database_path = os.path.join(DATA_DIR,'sqlite') +if os.path.isdir(sql_database_path) == False: + os.makedirs(sql_database_path) + +DATA_UPLOAD_MAX_NUMBER_FIELDS = None +DATA_UPLOAD_MAX_MEMORY_SIZE = None +# Build paths inside the project like this: os.path.join(BASE_DIR, ...) +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath())) + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'y8d*&k0jv4c*zu^ykqz$=yyv@(lcmz495uj^()hthjs=x&&g0y' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True +ALLOWED_HOSTS = ['*'] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', + 'appfe.modelTraining', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'appfe.ux.error_handler.ErrorHandlerMiddleware' +] + +ROOT_URLCONF = 'appfe.ux.urls' + + + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [os.path.join(BASE_DIR,'templates')], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'appfe.ux.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.0/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': os.path.join(sql_database_path, 'db.sqlite3'), + } +} + + +# Password validation +# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.0/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.0/howto/static-files/ + +STATIC_URL = '/static/' +STATIC_ROOT=os.path.join(BASE_DIR,'static') + from django.http import HttpResponse +from django.conf import settings +import traceback + + +class ErrorHandlerMiddleware: + + def __init__(self, get_response): + self.get_response = get_response + + def __call__(self, request): + response = self.get_response(request) + return response + + def process_exception(self, request, exception): + if not settings.DEBUG: + if exception: + # Format your message here + message = ""**{url}**\\n\\n{error}\\n\\n````{tb}````"".format( + url=request.build_absolute_uri(), + error=repr(exception), + tb=traceback.format_exc() + ) + # Do now whatever with this message + # e.g. requests.post(, data=message) + + return HttpResponse(""Error processing the request."", status=500) """"""mpgWebApp URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.0/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other" +"_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +"""""" +from django.contrib import admin +from django.urls import path +from django.urls import include, re_path +from appfe.modelTraining import views +from appfe.modelTraining import upload_views +from appfe.modelTraining import bc_views +from appfe.modelTraining import mltest_views +from appfe.modelTraining import train_views +from appfe.modelTraining import dg_views +from appfe.modelTraining import settings_views +from appfe.modelTraining import drift_views +from appfe.modelTraining import landing_views +from appfe.modelTraining import mllite_views +from appfe.modelTraining import trustedAI_views +from appfe.modelTraining import llm_views +from appfe.modelTraining import visualizer_views as v +from appfe.modelTraining import prediction_views +from django.urls import path, re_path +urlpatterns = [ + path('admin/', admin.site.urls), + path('api/', include('appfe.api.urls')), + path('', views.index, name=""index""), + re_path('^$',views.index,name='Homepage'), + re_path('prediction', prediction_views.Prediction, name=""Prediction""), + path('edit/', views.edit), + path('update/', views.update), + path('opentraining//',views.opentraining), + path('opentraininglogs//',landing_views.opentraininglogs), + path('show',views.show,name=""show""), + path('ucdetails/',views.ucdetails,name='ucdetails'), + path('delete/', views.destroy,name='DeleteUseCase'), + path('deleteversion/',views.remove_version,name='RemoveVersion'), + path('deletes3Bucket/', settings_views.removes3bucket,name='removes3bucket'), + path('deleteGcsBucket/', settings_views.removegcsbucket,name='removegcsbucket'), + path('deleteAzureBucket/', settings_views.removeazurebucket,name='removeazurebucket'), + path('publish/',views.publish), + path('createpackagedocker//',views.createpackagedocker), + path('stoptraining',train_views.stoptraining), + path('downloadPackage//',views.downloadpackage), + re_path('startmodelservice',views.startmodelservice,name=""startmodelservice""), + re_path('stopmodelservice',views.stopmodelservice,name=""stopmodelservice""), + path('retrain//', landing_views.retrain), + re_path('computetoAWS',settings_views.computetoAWS,name='computeInfrastructure'), + re_path('computetoLLaMMA7b',settings_views.computetoLLaMMA7b,name='computeInfrastructure'), + re_path('computetoGCPLLaMA13B',settings_views.computetoGCPLLaMA13B,name='computeInfrastructure'), + re_path('help',views.help,name = ""help""), + re_path('mlac_userguide',views.mlac_userguide,name = ""mlac_userguide""), + path('launchmodel//', landing_views.launchmodel), + path('modxplain//', landing_views.modxplain), + path('moddrift//',landing_views.moddrift), + re_path('ConvertOnnx', mllite_views.ConvertOnnx, name=""ConvertOnnx""), + re_path('runtimeutility', mllite_views.runtimeutility, name=""runtimeutility""), + re_path('sagepredict', mllite_views.sageprediction, name=""sageprediction""), + re_path('mlstyles', views.mlstyles, name=""mlstyles""), + re_path('mltrain', views.mltrain, name=""mltrain""), + re_path('usecasefilter', views.usecasefilter, name=""usecasefilter""), + re_path('mlpredict', views.mlpredict, name=""mlpredict""), + re_path('getdataclasses',views.getdataclasses,name=""getdataclasses""), + re_path('usecases', views.AIusecases, name=""AIusecases""), + re_path('modelkafka',views.modelkafka,name=""ModelKafka""), + re_path('AionProblem', views.AionProblem, name=""AionProblem""), + re_path('UQTesting', mltest_views.UQTesting, name=""UQTesting""), + re_path('maaccommand',views.maaccommand,name='MAAC'), + re_path('GCSbucketAdd',settings_views.GCSbucketAdd,name=""gcsbucket""), + re_path('adds3bucket',settings_views.adds3bucket,name=""adds3bucket""), + re_path('azurestorageAdd',settings_views.azurestorageAdd,name=""azurestorageAdd""), + re_path('features', views.features, name=""features""), + re_path('downloadedareport',upload_views.downloadedareport,name=""downloadedareport""), + re_path('downloadxplainreport',views.downloadxplainreport,name=""downloadxplainreport""), + re_path('downlpredictreport',views.downlpredictreport,name=""DownloadPrediction""), + re_path('LoadBasicConfiguration',views.LoadBasicConfiguration,name='LoadBasicConfiguration'), + re_path('LoadAdvanceConfiguration',views.LoadAdvanceConfiguration,name='LoadAdvanceConfiguration'), + re_path('uploaddatafromscript',upload_views.uploaddatafromscript,name='uploaddatafromscript'), + re_path('features', views.features, name=""features""), + re_path('uploadDatafromSatandardDataset',upload_views.uploadDatafromSatandardDataset,name=""uploadDatafromSatandardDataset""), + re_path('uploadDatafromunsupervisedmodel',views.uploadDatafromunsupervisedmodel,name=""uploadDatafromunsupervisedmodel""), + re_path('mltesting',mltest_views.mltesting,name='mltesting'), + re_path('mllite',views.mllite,name=""MLLite""), + re_path('settings',settings_views.settings_page,name=""settings""), + re_path('openneural',mllite_views.openneural,name=""openneural""), + re_path('Tfliteneural',mllite_views.Tfliteneural,name=""Tfliteneural""), + re_path('encryptedpackage',views.encryptedpackage,name='encryptedpackage'), + re_path('ABtesting', mltest_views.ABtest, name=""ABtesting""), + re_path('uploadedData', upload_views.uploadedData, name='uploadedData'), + + # Text Data Labelling using LLM related changes + # -------------------------------------------------------- + re_path('uploadedTextData', llm_views.uploadedTextData, name='uploadedTextData'), + re_path('getTextLabel', llm_views.getTextLabel, name='getTextLabel'), + re_path('downloadTextLabelReport',llm_views.downloadTextLabelReport,name=""downloadTopicReport""), + # -------------------------------------------------------- + + + # QnA Generator using LLM related changes + # -------------------------------------------------------- + re_path('genearateQA', llm_views.genearateQA, name='genearateQA'), + re_path('downloadQnAReport',llm_views.downloadQnAReport,name=""downloadQnAReport""), + # -------------------------------------------------------- + + re_path('advanceconfig', bc_views.savebasicconfig, name='Advance'), + re_path('edaReport',upload_views.EDAReport,name='edareport'), + re_path('readlogfile',views.readlogfile,name=""readlogfile""), + re_path('flcommand',views.flcommand,name=""flcommand""), + re_path('openmlflow',views.mlflowtracking,name=""MLflow""), + re_path('basicconfig',bc_views.basicconfig,name='basicConfig'), + re_path('Advance',views.Advance,name='Advance'), + re_path('uploaddata', views.uploaddata, name='uploaddata'), + re_path('dataupload', views.Dataupload, name='dataupload'), + re_path('trainmodel', train_views.trainmodel, name='next'), + #Sagemaker + re_path('Sagemaker',mllite_views.Sagemaker,name=""Sagemaker""), + re_path('batchlearning',views.batchlearning,name=""batchlearning""), + # EDA Reports changes + re_path('gotoreport', views.gotoreport, name='report'), + re_path('llmmodelevaluate',train_views.llmmodelevaluate, name='llmmodelevaluate'), + # EDA Visualization changes + re_path('getgraph',views.getgraph,name=""getgraph""), + + # Fairness Metrics changes + re_path('getmetrics',views.getmetrics,name=""getmetrics""), + + re_path('getDeepDiveData',views.getDeepDiveData,name=""getDeepDiveData""), + + # 12686:Data Distribution related Changes + re_path('getDataDistribution',views.getDataDistribution,name=""getDataDistribution""), + re_path('licensekey',views.licensekey,name=""licensekey""), + # -------------------------------- Graviton-Integration Changes S T A R T -------------------------------- + re_path('getuserdata',views.getuserdata,name=""getuserdata""), + re_path('getdataservice',views.getdataservice,name=""getdataservice""), + # ------------------------------------------------ E N D ------------------------------------------------- + + re_path('getdataimbalance',views.getdataimbalance,name=""getdataimbalance""), + + re_path('trainresult',train_views.trainresult,name='trainresult'), + re_path('LoadDataForSingleInstance',views.LoadDataForSingleInstance,name='LoadDataForSingleInstance'), + re_path('PredictForSingleInstance',prediction_views.PredictForSingleInstance,name='PredictForSingleInstance'), + re_path('stateTransitionSettings',views.stateTransitionSettings,name='stateTransitionSettings'), + re_path('instancepredict',views.instancepredict,name='predict'), + re_path('onnxruntime',views.onnxruntime,name='onnxruntime'), + re_path('home',views.Dataupload,name='manage'), + re_path('show',views.show,name=""show""), + re_path('delete',views.show,name=""delete""), + re_path('inputdrift', landing_views.inputdrift, name='inputdrift'), + re_path('dotextSummarization',views.dotextSummarization,name='textSummarization'), + re_path('outputdrift', views.outputdrift, name='outputdrift'), + re_path('xplain', v.xplain, name='xplain'), + re_path('sensitivity', trustedAI_views.sensitivityAnalysis, name='sensitivity'), + re_path('fairnesmetrics', trustedAI_views.fairnesmetrics, name='fairnesmetrics'), + re_path('handlefairness', trustedAI_views.handlefairness, name='handlefairness'), + re_path('performance', trustedAI_views.performance_metrics, name='performance'), + re_path('uquncertainty', trustedAI_views.uquncertainty, name='uquncertainty'), + re_path('uqtransparency', trustedAI_views.uqtransparency, name='uqtransparency'), + re_path('RLpath',views.RLpath,name='RLpath'), + path('opendetailedlogs//', views.opendetailedlogs, name='logfile'), + path('downloadlogfile//',views.downloadlogfile), + path('openmodelevaluation/',views.openmodelevaluation,name='openmodelevaluation'), + re_path('startPublishServices',settings_views.startPublishServices,name=""PublishService""), + re_path('startKafka',settings_views.startKafka,name='startKafka'), + re_path('startService',views.startService,name='startService'), + re_path('startTracking',views.startTracking,name=""startTracking""), + re_path('Drift', drift_views.Drift, name='Drift'), + re_path('Distribution', drift_views.Distribution, name='Distribution'), + re_path('newfile', views.newfile, name='newfile'), + re_path('Evaluate', drift_views.Evaluate, name='Evaluate'), + re_path('qlearning',views.qlearning,name='qlearning'), + re_path('listfiles',upload_views.listfiles,name='listfiles'), + #url('actionavalanche',views.actionavalanche,name='actionavalanche'), + re_path('sqlAlchemy',upload_views.sqlAlchemy,name='sqlAlchemy'), + re_path('submitquery',upload_views.submitquery,name='submitquery'), + re_path('validatecsv',upload_views.validatecsv,name='validatecsv'), + path('ObjLabelAdd/',views.ObjLabelAdd), + path('objectlabel/',views.objectlabel), + path('imagelabel/',views.imagelabel), + path('ObjLabelRemove/',views.ObjLabelRemove), + re_path('objectlabelling',views.objectlabelling,name='objectlabelling'), + re_path('imagelabelling',views.imagelabelling,name='imagelabelling'), + re_path('ObjLabelDiscard',views.ObjLabelDiscard,name='ObjLabelDiscard'), + re_path('ObjLabelNext',views.ObjLabelNext,name='ObjLabelNext'), + re_path('ObjLabelPrev',views.ObjLabelPrev,name=""ObjLabelPrev""), + re_path('saveaionconfig',settings_views.saveaionconfig,name='saveaionconfig'), + re_path('savegravitonconfig',settings_views.savegravitonconfig,name='savegravitonconfig'), + re_path('saveopenaiconfig',settings_views.saveopenaiconfig,name=""saveopenaiconfig""), + re_path('getvalidateddata',views.getvalidateddata,name=""getvalidateddata""), + re_path('updateawsconfig',settings_views.amazonec2settings,name=""amazonec2settings""), + re_path('updategcpconfig',settings_views.gcpcomputesettings,name=""gcpcomputesettings""), + re_path('localsetings',views.localsetings,name=""localsetings""), + re_path('ImgLabelNext',views.ImgLabelNext,name='ImgLabelNext'), + re_path('objectlabeldone',views.objectlabeldone,name='ObjectLabeling'), + re_path(r'^get_table_list', upload_views.get_table_list, name='get_table_list'), + re_path(r'^getdatasetname', views.getdatasetname, name='getdatasetname'), + re_path(r'" +"^get_tables_fields_list', upload_views.get_tables_fields_list, name='get_tables_fields_list'), + re_path(r'^validate_query', upload_views.validate_query, name='validate_query'), + re_path(r'^trigger_DAG', views.trigger_DAG, name = 'trigger_DAG'), + # The home page + path('dataoperations', views.dataoperations, name='dataoperations'), + path('summarization',views.summarization,name='summarization'), + path('datalabel', views.datalabel, name='datalabel'), + path('upload_and_read_file_data', views.upload_and_read_file_data, name='upload_and_read_file_data'), + path('apply_rule', views.apply_rule, name='apply_rule'), + path('apply_rule_ver2', views.apply_rule_ver2, name='apply_rule_ver2'), + path('download_result_dataset', views.download_result_dataset, name='download_result_dataset'), + path('get_sample_result_of_individual_rule', views.get_sample_result_of_individual_rule, + name='get_sample_result_of_individual_rule'), + path('get_sample_result_of_individual_rule_ver2', views.get_sample_result_of_individual_rule_ver2, + name='get_sample_result_of_individual_rule_ver2'), + path('upload_and_read_test_data', views.upload_and_read_test_data, name='upload_and_read_test_data'), + path('get_label_and_weightage', views.get_label_and_weightage, name='get_label_and_weightage'), + path('datagenrate', dg_views.datagenrate, name='datagenrate'), + path('generateconfig', dg_views.generateconfig, name='generateconfig'), + path('StationarySeasonalityTest', views.StationarySeasonalityTest, name='StationarySeasonalityTest'), + + path('modelcompare', views.modelcompare, name='modelcompare'), + path('textsummarization', views.textsummarization, name='textsummarization'), + path('azureOpenAiDavinci', llm_views.azureOpenAiDavinci, name='azureOpenAiDavinci'), + path('azureOpenAiDavinciSumarization', llm_views.azureOpenAiDavinciSumarization, name='azureOpenAiDavinciSumarization'), + # LLM Testing + path('llmtesting', views.llmtesting, name='llmtesting'), + path('llmtestingresult', views.llmtestingresult, name='llmtestingresult'), + re_path('llmtestreport',views.llmtestreport,name=""llmtestreport""), + # Code Clone Detection + + path('codeclonedetectionresult', views.codeclonedetectionresult, name='codeclonedetectionresult'), + re_path('codeclonereport',views.codeclonereport,name=""codeclonereport""), + re_path('evaluateprompt',views.evaluatepromptmetrics,name=""evaluatepromptmetrics""), + path('libraries', views.libraries, name='libraries'), #To display libraries +] + + +#df=pd.read_csv(""C:\\Project\\Analytics\\Deployment\\germancredit_9\\germancreditdata.csv"") +# +#bool_cols = [col for col in df if np.isin(df[col].dropna().unique(), [0, 1]).all()] +# +#bool_cols + """""" +WSGI config for ux project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ +"""""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ux.settings') + +application = get_wsgi_application() + """""" +ASGI config for ux project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ +"""""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ux.settings') + +application = get_asgi_application() + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +#from sklearn.externals import joblib +import joblib +# import pyreadstat +# import sys +# import math +import time +import pandas as pd +import numpy as np +from sklearn.metrics import confusion_matrix +from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score +from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LinearRegression +import argparse +import json +import os +import pathlib +from tensorflow.keras.models import load_model +# from tensorflow.keras import backend as K +import tensorflow as tf +# from sklearn.decomposition import LatentDirichletAllocation +from pathlib import Path +#from aionUQ import aionUQ +from uq_main import aionUQ +import os +from datetime import datetime +from sklearn.model_selection import train_test_split +parser = argparse.ArgumentParser() +parser.add_argument('savFile') +parser.add_argument('csvFile') +parser.add_argument('features') +parser.add_argument('target') +args = parser.parse_args() +from appbe.dataPath import DEPLOY_LOCATION + +if ',' in args.features: + args.features = [x.strip() for x in args.features.split(',')] +else: + args.features = args.features.split("","") +models = args.savFile +if Path(models).is_file(): +# if Path(args.savFile.is_file()): + model = joblib.load(args.savFile) + # print(model.__class__.__name__) + # print('class:',model.__class__) + # print(type(model).__name__) + + # try: + # print('Classess=',model.classes_) + + # except: + # print(""Classess=N/A"") + + # print('params:',model.get_params()) + + # try: + # print('fea_imp =',model.feature_importances_) + + # except: + # print(""fea_imp =N/A"") + + ProblemName = model.__class__.__name__ + Params = model.get_params() + # print(""ProblemName: \\n"",ProblemName) + # print(""Params: \\n"",Params) + + # print('ProblemName:',model.__doc__) + # print(type(ProblemName)) + if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']: + Problemtype = 'Classification' + else : + Problemtype = 'Regression' + + if Problemtype == 'Classification': + + df = pd.read_csv(args.csvFile) + object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object'] + df = df.drop(object_cols, axis=1) + df = df.dropna(axis=1) + df = df.reset_index(drop=True) + modelfeatures = args.features + # dfp = df[modelfeatures] + tar = args.target + # target = df[tar] + y=df[tar] + X = df.drop(tar, axis=1) + #for dummy test,train values pass + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar) + #accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,""uqtest"") + accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification() + # print(""UQ Classification: \\n"",output_jsonobject) + print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per) + print(""End of UQ Classification.\\n"") + + else: + + df = pd.read_csv(args.csvFile) + modelfeatures = args.features + # print(""modelfeatures: \\n"",modelfeatures) + # print(""type modelfeatures: \\n"",type(modelfeatures)) + dfp = df[modelfeatures] + tar = args.target + target = df[tar] + #Not used, just dummy X,y split + y=df[tar] + X = df.drop(tar, axis=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar) + total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression() + print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject) + print(""End of UQ reg\\n"") + +elif Path(models).is_dir(): + + + os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' + os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + model = load_model(models) + ProblemName = model.__class__.__name__ + Problemtype = 'Classification' + # print('class:',model.__class__) + # print('class1',model.__class__.__name__) + # print(model.summary()) + # print('ProblemName1:',model.get_config()) + + + + + def Params(model: tf.keras.Model): + Params = [] + model.Params(print_fn=lambda x: Params.append(x)) + return '\\n'.join(Params) + + + + df = pd.read_csv(args.csvFile) + modelfeatures = args.features + dfp = df[modelfeatures] + tar = args.target + target = df[tar] + df3 = dfp.astype(np.float32) + predic = model.predict(df3) + if predic.shape[-1] > 1: + predic = np.argmax(predic, axis=-1) + else: + predic = (predic > 0.5).astype(""int32"") + + matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) + matrixconfusion = matrixconfusion.to_json(orient='index') + + classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() + classificationreport = round(classificationreport,2) + classificationreport = classificationreport.to_json(orient='index') + output = {} + output[""Precision""] = ""%.3f"" % precision_score(target, predic,average='weighted') + output[""Recall""] = ""%.3f"" % recall_score(target, predic,average='weighted') + output[""Accuracy""] = ""%.3f"" % accuracy_score(target, predic) + output[""ProblemName""] = ProblemName + output[""Params""] = Params + output[""Problemtype""] = Problemtype + output[""Confusionmatrix""] = matrixconfusion + output[""classificationreport""] = classificationreport + print(json.dumps(output)) + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import logging +logging.getLogger('tensorflow').disabled = True +import json +#from nltk.corpus import stopwords +from collections import Counter +from matplotlib import pyplot +import sys +import os +import json +import matplotlib.pyplot as plt +from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression +from uq360.algorithms.ucc_recalibration import UCCRecalibration +from sklearn import datasets +from sklearn.model_selection import train_test_split +import pandas as pd +from uq360.metrics.regression_metrics import compute_regression_metrics +import numpy as np +from sklearn.metrics import accuracy_score +from sklearn.metrics import precision_score +from sklearn.metrics import recall_score +from sklearn.metrics import f1_score +from sklearn.metrics import roc_curve +# from math import sqrt +from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error +# from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature +from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by" +"_feature + +#Added libs from MLTest +import sys +import time +from sklearn.metrics import confusion_matrix +from pathlib import Path +import logging +# import json + +class aionUQ: + # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): + def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation): + # #printprint(""Inside aionUQ \\n"") + try: + #print(""Inside aionUQ init\\n "") + self.data=df + self.dfFeatures=dfp + self.uqconfig_base=Params + self.uqconfig_meta=Params + self.targetFeature=targetfeature + self.target=target + self.selectedfeature=modelfeatures + self.y=self.target + self.X=self.dfFeatures + self.log = logging.getLogger('eion') + self.basemodel=model + self.model_name=ProblemName + self.Deployment = os.path.join(deployLocation,'log','UQ') + os.makedirs(self.Deployment,exist_ok=True) + self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph') + os.makedirs(self.uqgraphlocation,exist_ok=True) + except Exception as e: + self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus): + from sklearn.model_selection import train_test_split + # To get each class values and uncertainty + if (aionstatus.lower() == 'aionuq'): + X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest + # y_val = y_train.append(y_test) + else: + # y_val = self.y + df=self.data + y=df[self.targetFeature] + X = df.drop(self.targetFeature, axis=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + + # from sklearn.tree import DecisionTreeRegressor + # from sklearn.linear_model import LinearRegression,Lasso,Ridge + # from sklearn import linear_model + # from sklearn.ensemble import RandomForestRegressor + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='picp' + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + y_hat_total_mean=np.mean(y_hat) + y_hat_lb_total_mean=np.mean(y_hat_lb) + y_hat_ub_total_mean=np.mean(y_hat_ub) + mpiw_20_per=(y_hat_total_mean*20/100) + mpiw_lower_range = y_hat_total_mean - mpiw_20_per + mpiw_upper_range = y_hat_total_mean + mpiw_20_per + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) + self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) + self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) + self.log.info('Model total picp_percentage : '+str(picp_percentage)) + return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range + + def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): + try: + global x_feature,y_feature + if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): + x_feature=''.join(map(str, self.selectedfeature)) + else: + x_feature= str(self.selectedfeature) + # self.selectedfeature=str(self.selectedfeature) + X_test=np.squeeze(X_test) + y_feature=str(self.targetFeature) + pred_dict = {x_feature: X_test, + 'y': y_test, + 'y_mean': y_mean, + 'y_upper': y_upper, + 'y_lower': y_lower + } + pred_df = pd.DataFrame(data=pred_dict) + pred_df_sorted = pred_df.sort_values(by=x_feature) + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed') + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') + plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') + plt.legend() + plt.xlabel(x_feature) + plt.ylabel(y_feature) + plt.title('UQ Confidence Interval Plot.') + # plt.savefig('uq_test_plt.png') + if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'): + os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png') + plt.savefig(str(self.Deployment)+'/uq_test_plt.png') + plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png') + plt.clf() + plt.cla() + plt.close() + pltreg=plot_picp_by_feature(X_test, y_test, + y_lower, y_upper, + xlabel=x_feature) + #pltreg.savefig('x.png') + pltr=pltreg.figure + if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'): + os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png') + pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') + pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png') + plt.clf() + plt.cla() + plt.close() + except Exception as e: + # #print(""display exception: \\n"",e) + self.log.info(' '+str(e)) + + def classUncertainty(self,pred,score): + try: + outuq = {} + classes = np.unique(pred) + for c in classes: + ids = pred == c + class_score = score[ids] + predc = 'Class_'+str(c) + outuq[predc]=np.mean(class_score) + x = np.mean(class_score) + #Uncertaininty in percentage + x=x*100 + self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x))) + return outuq + except Exception as e: + # #print(""display exception: \\n"",e) + self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus): + try: + # print(""Inside uqMain_BBMClassification\\n"") + # print(""lenth of x_train {}, x_test {}, y_train {}, y_test {}"".format(x_train, x_test, y_train, y_test)) + aionstatus = str(aionstatus) + if (aionstatus.lower() == 'aionuq'): + X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test + else: + X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification + from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics + from sklearn.ensemble import GradientBoostingClassifier + from sklearn.linear_model import LogisticRegression + from sklearn.linear_model import SGDClassifier + from sklearn.naive_bayes import GaussianNB + from sklearn.tree import DecisionTreeClassifier + from sklearn.ensemble import RandomForestClassifier + from sklearn.svm import SVC + from xgboost import XGBClassifier + from lightgbm import LGBMClassifier + from sklearn.neighbors import KNeighborsClassifier + + base_modelname=__class__.__name__ + base_config = self.uqconfig_base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + #print(model_name) + try: + #geting used features + model_used_features=self.basemodel.feature_names_in_ + self.log.info(""Base model used training features are (UQ Testing): \\n""+str(model_used_features)) + except: + pass + model_params=self.basemodel.get_params() + uq_scoring_param='accuracy' + basemodel=None + if (model_name == ""GradientBoostingClassifier""): + basemodel=GradientBoostingClassifier + elif (model_name == ""SGDClassifier""): + basemodel=SGDClassifier + elif (model_name == ""GaussianNB""): + basemodel=GaussianNB + elif (model_name == ""DecisionTreeClassifier""): + basemodel=DecisionTreeClassifier + elif(model_name == ""RandomForestClassifier""): + basemodel=RandomForestClassifier + elif (model_name == ""SVC""): + basemodel=SVC + elif(model_name == ""KNeighborsClassifier""): + basemodel=KNeighborsClassifier + elif(model_name.lower() == ""logisticregression""): + basemodel=LogisticRegression + elif(model_name == ""XGBClassifier""): + basemodel=XGBClassifier + elif(model_name == ""LGBMClassifier""): + basemodel=LGBMClassifier + else: + basemodel=LogisticRegression + + calibrated_md" +"l=None + if (model_name == ""SVC""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SVC(**model_params) + calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_mdl.fit(X_train, y_train) + basepredict = calibrated_mdl.predict(X_test) + predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] + elif (model_name == ""SGDClassifier""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SGDClassifier(**model_params) + calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_mdl.fit(X_train, y_train) + basepredict = calibrated_mdl.predict(X_test) + predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] + else: + from sklearn.calibration import CalibratedClassifierCV + base_mdl = basemodel(**model_params) + calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3) + basemodelfit = calibrated_mdl.fit(X_train, y_train) + basepredict = calibrated_mdl.predict(X_test) + predprob_base=calibrated_mdl.predict_proba(X_test)[:, :] + cal_model_params=calibrated_mdl.get_params() + + acc_score_base=accuracy_score(y_test, basepredict) + base_estimator_calibrate = cal_model_params['base_estimator'] + + + uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel, + base_config=model_params, meta_config=model_params) + + try: + X_train=X_train[model_used_features] + X_test=X_test[model_used_features] + except: + pass + uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) + # uqmodel_fit = uq_model.fit(X_train, y_train) + y_t_pred, y_t_score = uq_model.predict(X_test) + acc_score=accuracy_score(y_test, y_t_pred) + test_accuracy_perc=round(100*acc_score) + if(aionstatus == ""aionuq""): + test_accuracy_perc=round(test_accuracy_perc,2) + #uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0. + uq_aurrrc=0 + pass + else: + bbm_c_plot = plot_risk_vs_rejection_rate( + y_true=y_test, + y_prob=predprob_base, + selection_scores=y_t_score, + y_pred=y_t_pred, + plot_label=['UQ_risk_vs_rejection'], + risk_func=accuracy_score, + num_bins = 10 ) + # This done by kiran, need to uncomment for GUI integration. + # bbm_c_plot_sub = bbm_c_plot[4] + bbm_c_plot_sub = bbm_c_plot + if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'): + os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') + # bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') + re_plot=plot_reliability_diagram(y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + plot_label=['UQModel reliability_diagram'], + num_bins=10 ) + # This done by kiran, need to uncomment for GUI integration. + # re_plot_sub = re_plot[4] + re_plot_sub = re_plot + if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'): + os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png') + # re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png') + + uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + selection_scores=y_t_score, + attributes=None, + risk_func=accuracy_score,subgroup_ids=None, return_counts=False, + num_bins=10) + uq_aurrrc=uq_aurrrc + test_accuracy_perc=round(test_accuracy_perc) + #metric_all=compute_classification_metrics(y_test, y_prob, option='all') + metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') + #expected_calibration_error + uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False) + # uq_aurrrc=uq_aurrrc + confidence_score=acc_score_base-uq_ece + ece_confidence_score=round(confidence_score,2) + # Model uncertainty using ECE score + # model_uncertainty_ece = 1-ece_confidence_score + #Uncertainty Using model inherent predict probability + + mean_predprob_total=np.mean(y_t_score) + model_confidence=mean_predprob_total + model_uncertainty = 1-mean_predprob_total + + model_confidence = round(model_confidence,2) + # To get each class values and uncertainty + if (aionstatus.lower() == 'aionuq'): + y_val = np.append(y_train,y_test) + else: + y_val = self.y + self.log.info('------------------> Model Confidence Score '+str(model_confidence)) + outuq = self.classUncertainty(y_t_pred,y_t_score) + + # Another way to get conf score + model_uncertainty_per=round((model_uncertainty*100),2) + model_confidence_per=round((model_confidence*100),2) + acc_score_per = round((acc_score*100),2) + uq_ece_per=round((uq_ece*100),2) + output={} + recommendation = """" + if (uq_ece > 0.5): + # RED text + recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).' + else: + # self.log.info('Model has good ECE score and accuracy, ready to deploy.\\n.') + if (uq_ece <= 0.1 and model_confidence >= 0.9): + # Green Text + recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' + else: + # Orange + recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. ' + #Adding each class uncertainty value + classoutput = {} + for k,v in outuq.items(): + classoutput[k]=(str(round((v*100),2))) + output['classes'] = classoutput + output['ModelConfidenceScore']=(str(model_confidence_per)) + output['ExpectedCalibrationError']=str(uq_ece_per) + output['ModelUncertainty']=str(model_uncertainty_per) + output['Recommendation']=recommendation + # output['user_msg']='Please check the plot for more understanding of model uncertainty' + #output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4) + output['Accuracy']=str(acc_score_per) + output['Problem']= 'Classification' + #self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %')) + # #print(""Prediction mean for the given model:"",np.mean(y_hat),""\\n"") + #self.log.info(recommendation) + #self.log.info(""Model_confidence_score: "" +str(confidence_score)) + #self.log.info(""Model_uncertainty: "" +str(round(model_uncertainty,2))) + #self.log.info('Please check the plot for more understanding of model uncertainty.\\n.') + uq_jsonobject = json.dumps(output) + with open(str(self.Deployment)+""/uq_classification_log.json"", ""w"") as f: + json.dump(output, f) + return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per + except Exception as inst: + self.log.info('\\n < ---------- UQ Model Execution Failed Start--------->') + self.log.info('\\n<------Model Execution failed!!!.' + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + self.log.info('\\n < ---------- Model Execution Failed End --------->') + def aion_confidence_plot(self,df): + df=df + df = df.sort_values(by=self.selectedfeature) + best_values=df.Best_values.to_list() + best_upper=df.Best__upper.to_list() + best_lower=df.Best__lower.to_list() + Total_Upper_PI=df.Total_Upper_PI.to_list() + Total_Low_PI=df.Total_Low_PI.to_list() + Obseved = df.Observed.to_list() + + plt.plot(df[x_feature], df['Observed'], 'o', label='Observed') + plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey') + plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey') + plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') + plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) + plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) + plt.legend() + plt.xlabel(self.selectedfeature) + plt.ylabel(self.targetFeature) + plt.title('UQ Best & Good Area Plot') + if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'): + os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png') + plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png') + plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') + + + def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus): + aionstatus = str(aionstatus) + # if (aionstatus.lower() == 'aionuq'): + # X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test + # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) + # else: + # X_train, X_test, y_train, y_test = train_test_split(self.X, self.y," +"test_size=0.3, random_state=0) + + # modelName = """" + self.log.info(' ') + try: + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression + import pandas as pd + base_modelname=__class__.__name__ + base_config = self.uqconfig_base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + model_params=self.basemodel.get_params() + # #print(""model_params['criterion']: \\n"",model_params['criterion']) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + # modelname='sklearn.linear_model'+'.'+model_name + # X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest + #Geeting trained model name and to use the model in BlackboxMetamodelRegression + from sklearn.tree import DecisionTreeRegressor + from sklearn.linear_model import LinearRegression,Lasso,Ridge + from sklearn.ensemble import RandomForestRegressor + if (model_name == ""DecisionTreeRegressor""): + basemodel=DecisionTreeRegressor + elif (model_name == ""LinearRegression""): + basemodel=LinearRegression + elif (model_name == ""Lasso""): + basemodel=Lasso + elif (model_name == ""Ridge""): + basemodel=Ridge + elif(model_name == ""RandomForestRegressor""): + basemodel=RandomForestRegressor + else: + basemodel=LinearRegression + + if (aionstatus.lower() == 'aionuq'): + X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) + else: + X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus) + + + + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='picp' + + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + # #print(""X_train.shape: \\n"",X_train.shape) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + self.log.info(' '+str(observed_alphas_picp)) + self.log.info(' '+str(observed_widths_mpiw)) + # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” + #metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss. + metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) + metric_used='' + for k,v in metric_all.items(): + metric_used=str(round(v,2)) + self.log.info(' '+str(metric_all)) + # Determine the confidence level and recommentation to the tester + # test_data=y_test + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + #Calculate total uncertainty for all features + # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) + # df1=self.data + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) + recommendation="""" + output={} + + if (observed_alphas_picp >= 0.95 and total_picp >= 0.75): + # Add GREEN text + self.log.info('Model has good confidence for the selected feature, ready to deploy.\\n.') + recommendation = ""Model has good confidence score, ready to deploy."" + elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)): + # Orange + recommendation = ""Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."" + self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') + + else: + # RED text + recommendation = ""Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."" + self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') + + #Build uq json info dict + output['ModelConfidenceScore']=(str(total_picp_percentage)+'%') + output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%') + output['SelectedFeatureConfidence']=(str(picp_percentage)+'%') + output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%') + output['PredictionIntervalCoverageProbability']=observed_alphas_picp + output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw) + output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) + output['Recommendation']=str(recommendation) + output['Metric']=uq_scoring_param + output['Score']=metric_used + output['Problemtype']= 'Regression' + self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %')) + self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %')) + #self.log.info('Please check the plot for more understanding of model uncertainty.\\n.') + #self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub) + uq_jsonobject = json.dumps(output) + with open(str(self.Deployment)+""/uq_reg_log.json"", ""w"") as f: + json.dump(output, f) + #To get best and medium UQ range of values from total predict interval + y_hat_m=y_hat.tolist() + y_hat_lb=y_hat_lb.tolist() + upper_bound=y_hat_ub.tolist() + y_hat_ub=y_hat_ub.tolist() + for x in y_hat_lb: + y_hat_ub.append(x) + total_pi=y_hat_ub + medium_UQ_range = y_hat_ub + best_UQ_range= y_hat.tolist() + ymean_upper=[] + ymean_lower=[] + y_hat_m=y_hat.tolist() + for i in y_hat_m: + y_hat_m_range= (i*20/100) + x=i+y_hat_m_range + y=i-y_hat_m_range + ymean_upper.append(x) + ymean_lower.append(y) + min_best_uq_dist=round(min(best_UQ_range)) + max_best_uq_dist=round(max(best_UQ_range)) + # initializing ranges + list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) + list_best = y_hat_m + X_test = np.squeeze(X_test) + ''' + uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, + 'Best__upper':ymean_upper, + 'Best__lower':ymean_lower, + 'Total_Low_PI': y_hat_lb, + 'Total_Upper_PI': upper_bound, + } + + print(uq_dict) + uq_pred_df = pd.DataFrame(data=uq_dict) + uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') + uq_pred_df_sorted.to_csv(str(self.Deployment)+""/uq_pred_df.csv"",index = False) + csv_path=str(self.Deployment)+""/uq_pred_df.csv"" + df=pd.read_csv(csv_path) + self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.') + #Callconfidence olot fn only for UQTest interface + + if (aionstatus.lower() == 'aionuq'): + #No need to showcase confidence plot for aion main + pass + else: + self.aion_confidence_plot(df) + ''' + return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject) + + except Exception as inst: + exc = {""status"":""FAIL"",""message"":str(inst).strip('""')} + out_exc = json.dumps(exc) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) + + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' + +import logging +logging.getLogger('tensorflow').disabled = True +import json +#from nltk.corpus import stopwords +from collections import Counter +from matplotlib import pyplot +import sys +import os +import matplotlib.pyplot as plt +from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression +from sklearn import datasets +from sklearn.model_selection import train_test_split +import pandas as pd +from uq360.metrics.regression_metrics import compute_regression_metrics +import numpy as np +from sklearn" +".metrics import accuracy_score +from sklearn.metrics import precision_score +from sklearn.metrics import recall_score +from sklearn.metrics import f1_score +from sklearn.metrics import roc_curve +from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error +from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature +import sys +import time +from sklearn.metrics import confusion_matrix +from pathlib import Path +import logging +import logging.config +from os.path import expanduser +import platform +from sklearn.utils import shuffle + +class aionUQ: + # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): + def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature): + try: + self.data=df + self.dfFeatures=dfp + self.uqconfig_base=Params + self.uqconfig_meta=Params + self.targetFeature=targetfeature + self.log = logging.getLogger('aionUQ') + self.target=target + self.selectedfeature=modelfeatures + self.y=self.target + self.X=self.dfFeatures + from appbe.dataPath import DEPLOY_LOCATION + self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time())))) + os.makedirs(self.Deployment,exist_ok=True) + self.basemodel=model + self.model_name=ProblemName + # self.X, self.y = shuffle(self.X, self.y) + X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0) + self.xtrain = X_train + self.xtest = X_test + self.ytrain = y_train + self.ytest = y_test + # self.deployLocation=deployLocation + + + except Exception as e: + # self.log.info(' '+str(e)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) + # self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) + + def totalUncertainty(self,df,basemodel,model_params): + try: + # from sklearn.model_selection import train_test_split + # df=self.data + # y=df[self.targetFeature] + # X = df.drop(self.targetFeature, axis=1) + if (isinstance(self.selectedfeature,list)): + selectedfeature=[self.selectedfeature[0]] + selectedfeature=' '.join(map(str,selectedfeature)) + if (isinstance(self.targetFeature,list)): + targetFeature=[self.targetFeature[0]] + targetFeature=' '.join(map(str,targetFeature)) + X = self.data[selectedfeature] + y = self.data[targetFeature] + X = X.values.reshape((-1,1)) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + # from sklearn.tree import DecisionTreeRegressor + # from sklearn.linear_model import LinearRegression,Lasso,Ridge + # from sklearn import linear_model + # from sklearn.ensemble import RandomForestRegressor + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='picp' + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + y_hat_total_mean=np.mean(y_hat) + y_hat_lb_total_mean=np.mean(y_hat_lb) + y_hat_ub_total_mean=np.mean(y_hat_ub) + mpiw_20_per=(y_hat_total_mean*20/100) + mpiw_lower_range = y_hat_total_mean - mpiw_20_per + mpiw_upper_range = y_hat_total_mean + mpiw_20_per + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + # self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) + # self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) + # self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) + # self.log.info('Model total picp_percentage : '+str(picp_percentage)) + except Exception as e: + print(""totalUncertainty fn error: \\n"",e) + + return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range + + def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): + try: + global x_feature,y_feature + if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): + x_feature=','.join(map(str, self.selectedfeature)) + else: + x_feature= str(self.selectedfeature) + # self.selectedfeature=str(self.selectedfeature) + + X_test=np.squeeze(X_test) + y_feature=str(self.targetFeature) + pred_dict = {x_feature: X_test, + 'y': y_test, + 'y_mean': y_mean, + 'y_upper': y_upper, + 'y_lower': y_lower + } + pred_df = pd.DataFrame(data=pred_dict) + x_feature1 = x_feature.split(',') + pred_df_sorted = pred_df.sort_values(by=x_feature1) + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y'], 'o', label='Observed') + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') + plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') + plt.legend() + plt.xlabel(x_feature1[0]) + plt.ylabel(y_feature) + plt.title('UQ Confidence Interval Plot.') + # plt.savefig('uq_test_plt.png') + ''' + if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'): + os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') + ''' + plt.savefig(str(self.Deployment)+'/uq_test_plt.png') + #plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') + confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png') + plt.clf() + plt.cla() + plt.close() + pltreg=plot_picp_by_feature(X_test, y_test, + y_lower, y_upper, + xlabel=x_feature) + #pltreg.savefig('x.png') + pltr=pltreg.figure + ''' + if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'): + os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') + ''' + pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') + picpPlot = os.path.join(self.Deployment,'picp_per_feature.png') + #pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') + plt.clf() + plt.cla() + plt.close() + except Exception as e: + print(""display exception: \\n"",e) + # self.log.info(' '+str(e)) + return confidencePlot,picpPlot + + def classUncertainty(self,predprob_base): + # from collections import Counter + predc=""Class_"" + classes = np.unique(self.y) + total = len(self.y) + list_predprob=[] + counter = Counter(self.y) + #for loop for test class purpose + for k,v in counter.items(): + n_samples = len(self.y[self.y==k]) + per = ((v/total) * 100) + prob_c=predprob_base[:,int(k)] + list_predprob.append(prob_c) + # #print(""Class_{} : {}/{} percentage={}% \\n"".format(k,n_samples,total,per )) + outuq={} + for k in classes: + predc += str(k) + mean_predprob_class=np.mean(list_predprob[int(k)]) + uncertainty=1-mean_predprob_class + predc+='_Uncertainty' + outuq[predc]=uncertainty + predc=""Class_"" + return outuq + + + def uqMain_BBMClassification(self): + # self.log.info(' ') + # import matplotlib.pyplot as plt + try: + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification + except: + ##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification. + from uq360.algorithms.blackbox_metamodel import MetamodelClassification + + # from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics + from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics + # from sklearn import datasets + # from sklearn.model_selection import train_test_split + # from sklearn.metrics import accuracy_score + from sklearn.ensemble import GradientBoostingClassifier + from sklearn.linear_model import LogisticRegression + from sklearn.linear_model import SGDClassifier + from sklearn.naive_bayes import GaussianNB + from sklearn.tree import DecisionTreeClassifier + from sklearn.ensemble import RandomForestClassifier + from sklearn.svm import SVC + from sklearn.neighbors import KNeighborsClassifier + # from sklearn.linear_model import LogisticRegression + + # import pandas as pd + base_modelname=__class__.__name__ + base_config = self.uqconfig_base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + model_params=self.basemodel.get_params() + try: + #geting used features + model_used_features=self.basemodel.feature_names_in_ + except: + pass + X_train, X_test, y_train, y_test = self.xt" +"rain,self.xtest,self.ytrain,self.ytest + uq_scoring_param='accuracy' + basemodel=None + if (model_name == ""GradientBoostingClassifier""): + basemodel=GradientBoostingClassifier + elif (model_name == ""SGDClassifier""): + basemodel=SGDClassifier + elif (model_name == ""GaussianNB""): + basemodel=GaussianNB + elif (model_name == ""DecisionTreeClassifier""): + basemodel=DecisionTreeClassifier + elif(model_name == ""RandomForestClassifier""): + basemodel=RandomForestClassifier + elif (model_name == ""SVC""): + basemodel=SVC + elif(model_name == ""KNeighborsClassifier""): + basemodel=KNeighborsClassifier + elif(model_name == ""LogisticRegression""): + basemodel=LogisticRegression + else: + basemodel=LogisticRegression + + try: + try: + ##Removed meta_config because leave meta model config as default ml model params + uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) + except: + uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) + except: + ##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification + try: + ##Removed meta_config because leave meta model config as default ml model params + uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) + except: + uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) + + # this will fit both the base and the meta model + try: + X_train=X_train[model_used_features] + X_test=X_test[model_used_features] + except: + pass + + uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) + # uqmodel_fit = uq_model.fit(X_train, y_train) + #Test data pred, score + y_t_pred, y_t_score = uq_model.predict(X_test) + #predict probability + # uq_pred_prob=uq_model.predict_proba(X_test) + # predprob_base=basemodel.predict_proba(X_test)[:, :] + #if (model_name == ""SVC"" or model_name == ""SGDClassifier""): + # if model_name in ['SVC','SGDClassifier']: + if (model_name == ""SVC""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SVC(**model_params) + calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_svc.fit(X_train, y_train) + basepredict = basemodel.predict(X_test) + predprob_base = calibrated_svc.predict_proba(X_test)[:, :] + elif (model_name == ""SGDClassifier""): + from sklearn.calibration import CalibratedClassifierCV + basemodel=SGDClassifier(**model_params) + calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) + calibrated_svc.fit(X_train, y_train) + basepredict = basemodel.predict(X_test) + predprob_base = calibrated_svc.predict_proba(X_test)[:, :] + else: + base_mdl = basemodel(**model_params) + basemodelfit = base_mdl.fit(X_train, y_train) + basepredict = base_mdl.predict(X_test) + predprob_base=base_mdl.predict_proba(X_test)[:, :] + + acc_score=accuracy_score(y_test, y_t_pred) + test_accuracy_perc=round(100*acc_score) + + ''' + bbm_c_plot = plot_risk_vs_rejection_rate( + y_true=y_test, + y_prob=predprob_base, + selection_scores=y_t_score, + y_pred=y_t_pred, + plot_label=['UQ_risk_vs_rejection'], + risk_func=accuracy_score, + num_bins = 10 ) + + # This done by kiran, need to uncomment for GUI integration. + try: + bbm_c_plot_sub = bbm_c_plot[4] + bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png') + riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png') + except Exception as e: + print(e) + pass + riskPlot = '' + ''' + riskPlot = '' + ''' + try: + re_plot=plot_reliability_diagram(y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + plot_label=['UQModel reliability_diagram'], + num_bins=10) + # This done by kiran, need to uncomment for GUI integration. + re_plot_sub = re_plot[4] + # re_plot_sub = re_plot + re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png') + reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png') + except Exception as e: + print(e) + pass + reliability_plot = '' + ''' + reliability_plot = '' + uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, + y_prob=predprob_base, + y_pred=y_t_pred, + selection_scores=y_t_score, + attributes=None, + risk_func=accuracy_score,subgroup_ids=None, return_counts=False, + num_bins=10) + uq_aurrrc=uq_aurrrc + test_accuracy_perc=round(test_accuracy_perc) + + #metric_all=compute_classification_metrics(y_test, y_prob, option='all') + metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') + #expected_calibration_error + uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False) + uq_aurrrc=uq_aurrrc + confidence_score=acc_score-uq_ece + + ece_confidence_score=round(confidence_score,2) + # Model uncertainty using ECE score + # model_uncertainty_ece = 1-ece_confidence_score + # #print(""model_uncertainty1: \\n"",model_uncertainty_ece) + + #Uncertainty Using model inherent predict probability + mean_predprob_total=np.mean(predprob_base) + model_uncertainty = 1-mean_predprob_total + model_confidence=mean_predprob_total + model_confidence = round(model_confidence,2) + + + # To get each class values and uncertainty + outuq = self.classUncertainty(predprob_base) + # Another way to get conf score + model_uncertainty_per=round((model_uncertainty*100),2) + # model_confidence_per=round((model_confidence*100),2) + model_confidence_per=round((ece_confidence_score*100),2) + acc_score_per = round((acc_score*100),2) + uq_ece_per=round((uq_ece*100),2) + + output={} + recommendation = """" + + if (uq_ece > 0.5): + # RED text + recommendation = 'Model has high ece (expected calibration error) score compare to threshold (50%),not good to deploy. Add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).' + msg = 'Bad' + else: + # self.log.info('Model has good ECE score and accuracy, ready to deploy.\\n.') + + if (uq_ece <= 0.1 and model_confidence >= 0.9): + # Green Text + recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' + msg = 'Best' + else: + # Orange + recommendation = 'Model has average confidence score (ideal is >90% confidence) and good ECE score (ideal is <10% error).Model can be improved by adding more training data across all feature ranges and re-training the model.' + msg = 'Good' + #Adding each class uncertainty value + output['Problem']= 'Classification' + output['recommend']= 'recommend' + output['msg']= msg + output['UQ_Area_Under_Risk_Rejection_Rate_Curve']=round(uq_aurrrc,4) + + output['Model_Total_Confidence']=(str(model_confidence_per)+str('%')) + output['Expected_Calibration_Error']=(str(uq_ece_per)+str('%')) + output['Model_Total_Uncertainty']=(str(model_uncertainty_per)+str('%')) + # output['Risk Plot'] = str(riskPlot) + # output['Reliability Plot'] = str(reliability_plot) + for k,v in outuq.items(): + output[k]=(str(round((v*100),2))+str(' %')) + output['Recommendation']=recommendation + # output['user_msg']='Please check the plot for more understanding of model uncertainty' + output['Metric_Accuracy_Score']=(str(acc_score_per)+str(' %')) + + outputs = json.dumps(output) + + with open(str(self.Deployment)+""/uq_classification_log.json"", ""w"") as f: + json.dump(output, f) + return test_accuracy_perc,uq_ece,outputs + + def aion_confidence_plot(self,df): + try: + global x_feature + df=df + df = df.sort_values(by=self.selectedfeature) + best_values=df.Best_values.to_list() + best_upper=df.Best__upper.to_list() + best_lower=df.Best__lower.to_list() + Total_Upper_PI=df.Total_Upper_PI.to_list() + Total_Low_PI=df.Total_Low_PI.to_list() + Obseved = df.Observed.to_list() + x_feature1 = x_feature.split(',') + plt.plot(df[x_feature1[0]], df['Observed'], 'o', label='Observed') + plt.plot(df[x_feature1[0]], df['Best__upper'],'r--', lw=2, color='grey') + plt.plot(df[x_feature1[0]], df['Best__lower'],'r--', lw=2, color='grey') + plt.plot(df[x_feature1[0]], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') + + plt.fill_between(df[x_feature1[0]], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) + plt.fill_between(df[x_feature1[0]],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) + plt.legend() + plt.xlabel(x_feature1[0]) + plt.ylabel(self.targetFeature) + plt.title('UQ Best & Good Area Plot') + ''' + if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png'): + os.remove(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png') + + plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png') + ''' + plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') + uq_confidence_plt = os.path.join(str(self.Deployment),'uq_confidence_plt.png') + except Exception as inst: + print('-----------dsdas->',inst) + uq_confidence_plt = '' + return uq_confidence_plt + def uqMain_BBMRegression(self): + # modelName = """" + # self.log.info(' ') + try: + + from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression + import pandas as pd + base_modelname=__class__.__name__ + base_config = self.uqconfig_" +"base + meta_config = self.uqconfig_base + model_name=self.basemodel.__class__.__name__ + model_params=self.basemodel.get_params() + # #print(""model_params['criterion']: \\n"",model_params['criterion']) + key = 'criterion' + #if key in model_params: + try: + #if model_params.has_key(key): + if key in model_params: + if (model_params['criterion']): + uq_scoring_param=model_params.get('criterion') + elif(model_params['criterion'] == None): + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + else: + uq_scoring_param='picp' + pass + except Exception as inst: + uq_scoring_param='picp' + + # modelname='sklearn.linear_model'+'.'+model_name + # self.xtrain = self.xtrain.values.reshape((-1,1)) + # self.xtest = self.xtest.values.reshape((-1,1)) + if (isinstance(self.selectedfeature,list)): + selectedfeature=[self.selectedfeature[0]] + selectedfeature=' '.join(map(str,selectedfeature)) + if (isinstance(self.targetFeature,list)): + targetFeature=[self.targetFeature[0]] + targetFeature=' '.join(map(str,targetFeature)) + + X = self.data[selectedfeature] + y = self.data[targetFeature] + X = X.values.reshape((-1,1)) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) + #Geeting trained model name and to use the model in BlackboxMetamodelRegression + from sklearn.tree import DecisionTreeRegressor + from sklearn.linear_model import LinearRegression,Lasso,Ridge + from sklearn.ensemble import RandomForestRegressor + if (model_name == ""DecisionTreeRegressor""): + basemodel=DecisionTreeRegressor + elif (model_name == ""LinearRegression""): + basemodel=LinearRegression + elif (model_name == ""Lasso""): + basemodel=Lasso + elif (model_name == ""Ridge""): + basemodel=Ridge + elif(model_name == ""RandomForestRegressor""): + basemodel=RandomForestRegressor + else: + basemodel=LinearRegression + + if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: + if (uq_scoring_param.lower() == 'picp'): + uq_scoring_param='prediction interval coverage probability score (picp)' + else: + uq_scoring_param=uq_scoring_param + else: + uq_scoring_param='prediction interval coverage probability score (picp)' + uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) + # this will fit both the base and the meta model + uqmodel_fit = uq_model.fit(X_train, y_train) + y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) + from uq360.metrics import picp, mpiw + observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) + observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) + picp_percentage= round(observed_alphas_picp*100) + Uncertainty_percentage=round(100-picp_percentage) + # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” + metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) + metric_used='' + for k,v in metric_all.items(): + metric_used=str(round(v,2)) + + # Determine the confidence level and recommentation to the tester + # test_data=y_test + observed_alphas_picp=round(observed_alphas_picp,2) + observed_widths_mpiw=round(observed_widths_mpiw,2) + #Calculate total uncertainty for all features + # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) + # df1=self.data + total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params) + recommendation="""" + observed_widths_mpiw = round((observed_widths_mpiw/1000000)*100) + if observed_widths_mpiw > 100: + observed_widths_mpiw = 100 + output={} + + if (observed_alphas_picp >= 0.90 and total_picp >= 0.75): + # GREEN text + recommendation = ""Model has good confidence and MPIW score, ready to deploy."" + msg='Good' + elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.90) and (total_picp >= 0.50)): + # Orange + recommendation = "" Model has average confidence compare to threshold (ideal is both model confidence and MPIW should be >90%) .Model can be improved by adding more training data across all feature ranges and re-training the model."" + msg = 'Average' + + else: + # RED text + recommendation = ""Model has less confidence compare to threshold (ideal is both model confidence and MPIW should be >90%), need to be add more input data across all feature ranges and retrain base model, also try with different regression algorithms/ensembling."" + msg = 'Bad' + + + #Build uq json info dict + output['Model_total_confidence']=(str(total_picp_percentage)+'%') + output['Model_total_Uncertainty']=(str(total_Uncertainty_percentage)+'%') + output['Selected_feature_confidence']=(str(picp_percentage)+'%') + output['Selected_feature_Uncertainty']=(str(Uncertainty_percentage)+'%') + output['Prediction_Interval_Coverage_Probability']=observed_alphas_picp + output['Mean_Prediction_Interval_Width']=str(observed_widths_mpiw)+'%' + output['Desirable_MPIW_range']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) + output['Recommendation']=str(recommendation) + output['Metric_used']=uq_scoring_param + output['Metric_value']=metric_used + output['Problem']= 'Regression' + output['recommend']= 'recommend' + output['msg'] = msg + + with open(str(self.Deployment)+""/uq_reg_log.json"", ""w"") as f: + json.dump(output, f) + #To get best and medium UQ range of values from total predict interval + y_hat_m=y_hat.tolist() + y_hat_lb=y_hat_lb.tolist() + upper_bound=y_hat_ub.tolist() + y_hat_ub=y_hat_ub.tolist() + for x in y_hat_lb: + y_hat_ub.append(x) + total_pi=y_hat_ub + medium_UQ_range = y_hat_ub + best_UQ_range= y_hat.tolist() + ymean_upper=[] + ymean_lower=[] + y_hat_m=y_hat.tolist() + for i in y_hat_m: + y_hat_m_range= (i*20/100) + x=i+y_hat_m_range + y=i-y_hat_m_range + ymean_upper.append(x) + ymean_lower.append(y) + + + min_best_uq_dist=round(min(best_UQ_range)) + max_best_uq_dist=round(max(best_UQ_range)) + # initializing ranges + list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) + list_best = y_hat_m + ''' + print(X_test) + print(X_test) + X_test = np.squeeze(X_test) + print(x_feature) + ''' + uq_dict = pd.DataFrame(X_test) + #print(uq_dict) + uq_dict['Observed'] = y_test + uq_dict['Best_values'] = y_hat_m + uq_dict['Best__upper'] = ymean_upper + uq_dict['Best__lower'] = ymean_lower + uq_dict['Total_Low_PI'] = y_hat_lb + uq_dict['Total_Upper_PI'] = upper_bound + ''' + uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, + 'Best__upper':ymean_upper, + 'Best__lower':ymean_lower, + 'Total_Low_PI': y_hat_lb, + 'Total_Upper_PI': upper_bound, + }''' + uq_pred_df = pd.DataFrame(data=uq_dict) + uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') + uq_pred_df_sorted.to_csv(str(self.Deployment)+""/uq_pred_df.csv"",index = False) + csv_path=str(self.Deployment)+""/uq_pred_df.csv"" + df=pd.read_csv(csv_path) + + # self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.') + # confidenceplot = self.aion_confidence_plot(df) + # output['Confidence Plot']= confidenceplot + uq_jsonobject = json.dumps(output) + print(""UQ regression problem training completed...\\n"") + return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject + + except Exception as inst: + print('-------',inst) + exc = {""status"":""FAIL"",""message"":str(inst).strip('""')} + out_exc = json.dumps(exc) + + + + ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import pandas as pd +import numpy as np +import os +import datetime, time, timeit +from sklearn.model_selection import KFold +from sklearn.metrics import confusion_matrix +from sklearn.metrics import accuracy_score +from sklearn.metrics import classification_report +import pickle +import logging + +class recommendersystem(): + def __init__(self,features,svd_params): + self.features = features + self.svd_input = svd_params + self.log = logging.getLogger('eion') + print (""recommendersystem starts \\n"") + + #To extract dict key,values + def extract_params(self,dict): + self.dict=dict + for k,v in self.dict.items(): + return k,v + + + + def recommender_model(self,df,outputfile): + from sklearn.metrics.pairwise import cosine_similarity + from utils.file_ops import save_csv + USER_ITEM_MATRIX = 'user_item_matrix' + ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix' + selectedColumns = self.features.split(',') + data = pd.DataFrame() + for i in range(0,len(selectedColumns)): + data[selectedColumns[i]] = df[selectedColumns[i]] + dataset = data + self.log.info('-------> Top(5) Rows') + self.log.info(data.head(5)) + start = time.time() + self.log.info('\\n----------- Recommender System Training Starts -----------') + #--------------- Task 11190:recommender system changes Start ---Usnish------------------# + # selectedColumns = ['userId', 'movieId', 'rating'] + df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns" +"[2], 'count')).reset_index() + + self.log.info('-------> Top 10 most rated Items:') + self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10)) + + matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2]) + relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv') + matrix.to_csv(relative_file) + item_similarity_cosine = cosine_similarity(matrix.fillna(0)) + item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId')) + self.log.info('---------> Item-Item Similarity matrix created:') + self.log.info(item_similarity_cosine.head(5)) + + relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv') + save_csv(item_similarity_cosine,relative_file) + + + # --------------- recommender system changes End ---Usnish------------------# + + + + executionTime=time.time() - start + self.log.info(""------->Execution Time: ""+str(executionTime)) + self.log.info('----------- Recommender System Training End -----------\\n') + + return ""filename"",matrix,""NA"","""","""" ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +''' +import numpy as np +import pickle +import pandas as pd +import sys +import time +import os +from os.path import expanduser +import platform +from sklearn.preprocessing import binarize +import logging +import tensorflow as tf +from sklearn.model_selection import train_test_split +from tensorflow.keras import preprocessing +from sklearn.metrics import roc_auc_score +from sklearn.metrics import accuracy_score +from tensorflow.keras.preprocessing.sequence import pad_sequences +from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda +import tensorflow.keras.backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.layers import Concatenate +from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D +from sklearn.metrics.pairwise import cosine_similarity, cosine_distances + +import tensorflow.keras.backend as K +from tensorflow.keras.models import Model, Sequential +from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers + +## Keras subclassing based siamese network +class siameseNetwork(Model): + def __init__(self, activation,inputShape, num_iterations): + self.activation=activation + self.log = logging.getLogger('eion') + super(siameseNetwork, self).__init__() + i1 = layers.Input(shape=inputShape) + i2 = layers.Input(shape=inputShape) + featureExtractor = self.build_feature_extractor(inputShape, num_iterations) + f1 = featureExtractor(i1) + f2 = featureExtractor(i2) + #distance vect + distance = layers.Concatenate()([f1, f2]) + cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) + c_loss=cosine_loss(f1, f2) + similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2]) + outputs = layers.Dense(1, activation=""sigmoid"")(distance) + self.model = Model(inputs=[i1, i2], outputs=outputs) + ##Build dense sequential layers + def build_feature_extractor(self, inputShape, num_iterations): + layers_config = [layers.Input(inputShape)] + for i, n_units in enumerate(num_iterations): + layers_config.append(layers.Dense(n_units)) + layers_config.append(layers.Dropout(0.2)) + layers_config.append(layers.BatchNormalization()) + layers_config.append(layers.Activation(self.activation)) + model = Sequential(layers_config, name='feature_extractor') + + return model + + def call(self, x): + return self.model(x) + +def euclidean_distance(vectors): + (f1, f2) = vectors + sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True) + return K.sqrt(K.maximum(sumSquared, K.epsilon())) + +def cosine_similarity(vectors): + (f1, f2) = vectors + f1 = K.l2_normalize(f1, axis=-1) + f2 = K.l2_normalize(f2, axis=-1) + return K.mean(f1 * f2, axis=-1, keepdims=True) + + + +def cos_dist_output_shape(shapes): + shape1, shape2 = shapes + return (shape1[0],1) + + +class eion_similarity_siamese: + def __init__(self): + self.log = logging.getLogger('eion') + + def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file): + try: + self.log.info('-------> Read Embedded File') + home = expanduser(""~"") + if platform.system() == 'Windows': + modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity') + else: + modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity') + if os.path.isdir(modelsPath) == False: + os.makedirs(modelsPath) + embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt') + if not os.path.exists(embedding_file_path): + from pathlib import Path + import urllib.request + import zipfile + location = modelsPath + local_file_path = os.path.join(location,""glove.6B.zip"") + file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path) + with zipfile.ZipFile(local_file_path, 'r') as zip_ref: + zip_ref.extractall(location) + os.unlink(os.path.join(location,""glove.6B.zip"")) + if os.path.isfile(os.path.join(location,""glove.6B.50d.txt"")): + os.unlink(os.path.join(location,""glove.6B.50d.txt"")) + if os.path.isfile(os.path.join(location,""glove.6B.300d.txt"")): + os.unlink(os.path.join(location,""glove.6B.300d.txt"")) + if os.path.isfile(os.path.join(location,""glove.6B.200d.txt"")): + os.unlink(os.path.join(location,""glove.6B.200d.txt"")) + X = df[[col1,col2]] + Y = df[targetColumn] + testPercentage = testPercentage + self.log.info('\\n-------------- Test Train Split ----------------') + if testPercentage == 0: + xtrain=X + ytrain=Y + xtest=X + ytest=Y + else: + testSize=testPercentage/100 + self.log.info('-------> Split Type: Random Split') + self.log.info('-------> Train Percentage: '+str(testSize)) + X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize) + self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') + self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->') + self.log.info('-------------- Test Train Split End ----------------\\n') + self.log.info('\\n-------------- Train Validate Split ----------------') + X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42) + self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') + self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->') + self.log.info('-------------- Train Validate Split End----------------\\n') + self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') + train_sentence1 = pipe.texts_to_sequences(X_train[col1].values) + train_sentence2 = pipe.texts_to_sequences(X_train[col2].values) + val_sentence1 = pipe.texts_to_sequences(X_val[col1].values) + val_sentence2 = pipe.texts_to_sequences(X_val[col2].values) + len_vec = [len(sent_vec) for sent_vec in train_sentence1] + max_len = np.max(len_vec) + len_vec = [len(sent_vec) for sent_vec in train_sentence2] + if (max_len < np.max(len_vec)): + max_len = np.max(len_vec) + train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post') + train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post') + val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post') + val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post') + y_train = y_train.values + y_val = y_val.values + activation = str(conf['activation']) + model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10]) + model.compile( + loss=""binary_crossentropy"", + optimizer=optimizers.Adam(learning_rate=0.0001), + metrics=[""accuracy""]) + es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True) + rlp = callbacks.ReduceLROnPlateau( + monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1 + ) + x_valid=X_val + y_valid=y_val + n_epoch = int(conf['num_epochs']) + batch_size = int(conf['batch_size']) + similarityIndex = conf['similarityIndex'] + model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size, + validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp]) + scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0) + self.log.info('-------> Model Score Matrix: Accuracy') + self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) + self.log.info('Status:- |... Algorithm applied: SIAMESE') + test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) + test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) + test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') + test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') + prediction = model.predict([test_sentence1, test_sentence2 ]) + n_epoch = conf['num_epochs'] + batch_size = conf['batch_size'] + activation = conf['activation'] + similarityIndex = conf['similarityIndex'] + self.log.info('-------> similarityIndex : '+str(similarityIndex)) + prediction = np.where(prediction > similarityIndex,1,0) + rocauc_sco = roc_auc_score(y_test,prediction) + acc_sco = accuracy_score(y_test, prediction) + predict_df = pd.DataFrame() + predict_df['actual'] = y_test + predict_df['predict'] = prediction + predict_df.to_csv(predicted_data_file) + self.log.info('-------> Model Score Matrix: Accuracy') + self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) + self.log.info('Status:- |... Algorithm applied: SIAMESE') + test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) + test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) + test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') + test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') + prediction = model.predict([test_sentence1, test_sentence2 ]) + prediction = np.where(prediction > similarityIndex,1,0) + rocauc_sco = roc_auc_score(y_test,prediction) + acc_sco = accuracy_score(y_test, prediction) + predict_df = pd.DataFrame() + predict_df['actual'] = y_test + predict_df['predict'] = prediction + predict_df.to_csv(predicted_data_file) + self.log.info(""predict_df: \\n""+str(predict_df)) + sco = acc_sco + self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco)) + self.log.info('Status:- |... Testing Score: '+str(acc_sco)) + self." +"log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco)) + matrix = '""Accuracy"":'+str(acc_sco)+',""ROC AUC"":'+str(rocauc_sco) + + prediction = model.predict([train_sentence1, train_sentence2]) + prediction = np.where(prediction > similarityIndex,1,0) + train_rocauc_sco = roc_auc_score(y_train,prediction) + train_acc_sco = accuracy_score(y_train, prediction) + self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco)) + self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco)) + trainmatrix = '""Accuracy"":'+str(train_acc_sco)+',""ROC AUC"":'+str(train_rocauc_sco) + model_tried = '{""Model"":""SIAMESE"",""Score"":'+str(sco)+'}' + saved_model = 'textsimilarity_'+iterName+'_'+iterVersion + # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav') + # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5') + ## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model. + filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion) + model.save(filename) + # model.save_weights(filename) + model_name = 'SIAMESE MODEL' + + return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex) + except Exception as inst: + self.log.info(""SIAMESE failed "" + str(inst)) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) ''' +* +* ============================================================================= +* COPYRIGHT NOTICE +* ============================================================================= +* @ Copyright HCL Technologies Ltd. 2021, 2022,2023 +* Proprietary and confidential. All information contained herein is, and +* remains the property of HCL Technologies Limited. Copying or reproducing the +* contents of this file, via any medium is strictly prohibited unless prior +* written permission is obtained from HCL Technologies Limited. +* +'''"