content
stringlengths 255
17.2k
|
---|
<s> import argparse
import sys
import os
import subprocess
INSTALL = 'install'
LINUXINSTALL = 'linuxinstall'
FE_MIGRATE = 'migrateappfe'
LAUNCH_KAFKA = 'launchkafkaconsumer'
RUN_LOCAL_MLAC_PIPELINE = 'runpipelinelocal'
BUILD_MLAC_CONTAINER = 'buildmlaccontainerlocal'
CONVERT_MODEL = 'convertmodel'
START_MLFLOW = 'mlflow'
COMMON_SERVICE = 'service'
TRAINING = 'training'
TRAINING_AWS = 'trainingonaws'
TRAINING_DISTRIBUTED = 'distributedtraining'
START_APPF = 'appfe'
ONLINE_TRAINING = 'onlinetraining'
TEXT_SUMMARIZATION = 'textsummarization'
GENERATE_MLAC = 'generatemlac'
AWS_TRAINING = 'awstraining'
LLAMA_7B_TUNING = 'llama7btuning'
LLM_PROMPT = 'llmprompt'
LLM_TUNING = 'llmtuning'
LLM_PUBLISH = 'llmpublish'
LLM_BENCHMARKING = 'llmbenchmarking'
TELEMETRY_PUSH = 'pushtelemetry'
def aion_aws_training(confFile):
from hyperscalers.aion_aws_training import awsTraining
status = awsTraining(confFile)
print(status)
def aion_training(confFile):
from bin.aion_pipeline import aion_train_model
status = aion_train_model(confFile)
print(status)
def aion_awstraining(config_file):
from hyperscalers import aws_instance
print(config_file)
aws_instance.training(config_file)
def aion_generatemlac(ConfFile):
from bin.aion_mlac import generate_mlac_code
status = generate_mlac_code(ConfFile)
print(status)
def aion_textsummarization(confFile):
from bin.aion_text_summarizer import aion_textsummary
status = aion_textsummary(confFile)
def aion_oltraining(confFile):
from bin.aion_online_pipeline import aion_ot_train_model
status = aion_ot_train_model(confFile)
print(status)
def do_telemetry_sync():
from appbe.telemetry import SyncTelemetry
SyncTelemetry()
def aion_llm_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image):
from llm.llm_inference import LLM_publish
LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image)
def aion_migratefe(operation):
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append(operation)
execute_from_command_line(argi)
def aion_appfe(url,port):
#manage_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'manage.py')
#subprocess.check_call([sys.executable,manage_location, "runserver","%s:%s"%(url,port)])
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append('runaion')
argi.append("%s:%s"%(url,port))
execute_from_command_line(argi)
def aion_linux_install(version):
from install import linux_dependencies
linux_dependencies.process(version)
def aion_install(version):
from install import dependencies
dependencies.process(version)
def aion_service(ip,port,username,password):
from bin.aion_service import start_server
start_server(ip,port,username,password)
def aion_distributedLearning(confFile):
from distributed_learning import learning
learning.training(confFile)
def aion_launchkafkaconsumer():
from mlops import kafka_consumer
kafka_consumer.launch_kafka_consumer()
def aion_start_mlflow():
from appbe.dataPath import DEPLOY_LOCATION
import platform
import shutil
from os.path import expanduser
mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','Scripts','mlflow.exe'))
print(mlflowpath)
home = expanduser("~")
if platform.system() == 'Windows':
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
outputStr = subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION])
else:
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
subprocess.check_call(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION])
def aion_model_conversion(config_file):
from conversions import model_convertions
model_convertions.convert(config_file)
def aion_model_buildMLaCContainer(config):
from mlops import build_container
build_container.local_docker_build(config)
def aion_model_runpipelinelocal(config):
from mlops import local_pipeline
local_pipeline.run_pipeline(config)
def aion_llm_tuning(config):
from llm.llm_tuning import run
run(config)
def aion_llm_prompt(cloudconfig,instanceid,prompt):
from llm.aws_instance_api import LLM_predict
LLM_predict(cloudconfig,instanceid,prompt)
def llm_bench_marking(hypervisor,instanceid,model,usecaseid,eval):
print(eval)
from llm.bench_marking import bench_mark
bench_mark(hypervisor,instanceid,model,usecaseid,eval)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configPath', help='Config File Path')
parser.add_argument('-i', '--instanceid', help='instanceid')
parser.add_argument('-hv', '--hypervisor', help='hypervisor')
parser.add_argument('-md', '--model', help='model')
parser.add_argument('-uc', '--usecase', help='usecase')
parser.add_argument('-cc', '--cloudConfigPath', help='Cloud Config File Path')
parser.add_argument('-m', '--module', help='MODULE=TRAINING, APPFE, ONLINETRAINING,DISTRIBUTEDTRAINING')
parser.add_argument('-ip', '--ipaddress', help='URL applicable only for APPFE method ')
parser.add_argument('-p', '--port', help='APP Front End Port applicable only for APPFE method ')
parser.add_argument('-ac', '--appfecommand', help='APP Front End Command ')
parser.add_argument('-un','--username', help="USERNAME")
parser.add_argument('-passw','--password', help="PASSWORD")
parser.add_argument('-j', '--jsoninput', help='JSON Input')
parser.add_argument('-v', '--version', help='Installer Version')
parser.add_argument('-pf', '--prompt', help='Prompt File')
parser.add_argument('-r', '--region', help='REGION NAME')
parser.add_argument('-im', '--image', help='IMAGE NAME')
parser.add_argument('-e', '--eval', help='evaluation for code or doc', default='doc')
args = parser.parse_args()
if args.module.lower() == TRAINING:
aion_training(args.configPath)
elif args.module.lower() == TRAINING_AWS:
aion_awstraining(args.configPath)
elif args.module.lower() == TRAINING_DISTRIBUTED:
aion_distributedLearning(args.configPath)
elif args.module.lower() == START_APPF:
aion_appfe(args.ipaddress,args.port)
elif args.module.lower() == ONLINE_TRAINING:
aion_oltraining(args.configPath)
elif args.module.lower() == TEXT_SUMMARIZATION:
aion_textsummarization(args.configPath)
elif args.module.lower() == GENERATE_MLAC:
aion_generatemlac(args.configPath)
elif args.module.lower() == COMMON_SERVICE:
aion_service(args.ipaddress,args.port,args.username,args.password)
elif args.module.lower() == START_MLFLOW:
aion_mlflow()
elif args.module.lower() == CONVERT_MODEL:
aion_model_conversion(args.configPath)
elif args.module.lower() == BUILD_MLAC_CONTAINER:
aion_model_buildMLaCContainer(args.jsoninput)
elif args.module.lower() == RUN_LOCAL_MLAC_PIPELINE:
aion_model_runpipelinelocal(args.jsoninput)
elif args.module.lower() == LAUNCH_KAFKA:
aion_launchkafkaconsumer()
elif args.module.lower() == INSTALL:
aion_install(args.version)
elif args.module.lower() == LINUXINSTALL:
aion_linux_install(args.version)
elif args.module.lower() == FE_MIGRATE:
aion_migratefe('makemigrations')
aion_migratefe('migrate')
elif args.module.lower() == AWS_TRAINING:
aion_aws_training(args.configPath)
elif args.module.lower() == LLAMA_7B_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_PROMPT:
aion_llm_prompt(args.cloudConfigPath,args.instanceid,args.prompt)
elif args.module.lower() == LLM_PUBLISH:
aion_llm_publish(args.cloudConfigPath,args.instanceid,args.hypervisor,args.model,args.usecase,args.region,args.image)
elif args.module.lower() == LLM_BENCHMARKING:
llm_bench_marking(args.hypervisor,args.instanceid,args.model,args.usecase, args.eval)
elif args.module.lower() == TELEMETRY_PUSH:
do_telemetry_sync()<s> import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
from .bin.aion_pipeline import aion_train_model
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime, time, timeit
import argparse
import logging
logging.getLogger('tensorflow').disabled = True
import math
import shutil
import re
from datetime import datetime as dt
import warnings
from config_manager.pipeline_config import AionConfigManager
import pandas as pd
import numpy as np
import sklearn
import string
from records import pushrecords
import logging
from pathlib import Path
from pytz import timezone
from config_manager.config_gen import code_configure
import joblib
from sklearn.model_selection import train_test_split
from config_manager.check_config import config_validate
from utils.file_ops import save_csv_compressed,save_csv,save_chromadb
LOG_FILE_NAME = 'model_training_logs.log'
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = True
def getversion():
configFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config')
version = 'NA'
for file in os.listdir(configFolder):
if file.endswith(".var"):
version = file.rsplit('.', 1)
version = version[0]
break
return version
AION_VERSION = getversion()
def pushRecordForTraining():
try:
status,msg = pushrecords.enterRecord(AION_VERSION)
except Exception as e:
print("Exception", e)
status = False
msg = str(e)
return status,msg
def mlflowSetPath(path,experimentname):
import mlflow
url = "file:" + str(Path(path).parent.parent) + "/mlruns"
mlflow.set_tracking_uri(url)
mlflow.set_experiment(str(experimentname))
def set_log_handler( basic, mode='w'):
deploy_loc = Path(basic.get('deployLocation'))
log_file_parent = deploy_loc/basic['modelName']/basic['modelVersion']/'log'
log_file_parent.mkdir(parents=True, exist_ok=True)
log_file = log_file_parent/LOG_FILE_NAME
filehandler = logging.FileHandler(log_file, mode,'utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in |
log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
class server():
def __init__(self):
self.response = None
self.features=[]
self.mFeatures=[]
self.emptyFeatures=[]
self.textFeatures=[]
self.vectorizerFeatures=[]
self.wordToNumericFeatures=[]
self.profilerAction = []
self.targetType = ''
self.matrix1='{'
self.matrix2='{'
self.matrix='{'
self.trainmatrix='{'
self.numericalFeatures=[]
self.nonNumericFeatures=[]
self.similarGroups=[]
self.dfcols=0
self.dfrows=0
self.method = 'NA'
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.modelSelTopFeatures=[]
self.topFeatures=[]
self.allFeatures=[]
def startScriptExecution(self, config_obj, codeConfigure, log):
oldStdout = sys.stdout
model_training_details = ''
model_tried=''
learner_type = ''
topics = {}
pred_filename = ''
numericContinuousFeatures=''
discreteFeatures=''
sessonal_freq = ''
additional_regressors = ''
threshold=-1
targetColumn = ''
numericalFeatures =''
nonNumericFeatures=''
categoricalFeatures=''
dataFolderLocation = ''
featureReduction = 'False'
original_data_file = ''
normalizer_pickle_file = ''
pcaModel_pickle_file = ''
bpca_features= []
apca_features = []
lag_order = 1
profiled_data_file = ''
trained_data_file = ''
predicted_data_file=''
dictDiffCount={}
cleaning_kwargs = {}
grouperbyjson = ''
rowfilterexpression=''
featureEngineeringSelector = 'false'
conversion_method = ''
params={}
loss_matrix='binary_crossentropy'
optimizer='Nadam'
numericToLabel_json='[]'
preprocessing_pipe=''
firstDocFeature = ''
secondDocFeature = ''
padding_length = 30
pipe = None
scalertransformationFile=None
column_merge_flag = False
merge_columns = []
score = 0
profilerObj = None
imageconfig=''
labelMaps={}
featureDataShape=[]
normFeatures = []
preprocess_out_columns = []
preprocess_pipe = None
label_encoder = None
unpreprocessed_columns = []
import pickle
iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings()
inlierLabels=config_obj.getEionInliers()
scoreParam = config_obj.getScoringCreteria()
noofforecasts = config_obj.getNumberofForecasts()
datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures()
filter_expression = config_obj.getFilterExpression()
refined_filter_expression = ""
sa_images = []
model_tried = ''
deploy_config = {}
iterName = iterName.replace(" ", "_")
deployFolder = deployLocation
usecaseLocation,deployLocation,dataFolderLocation,imageFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile,reduction_data_file = config_obj.createDeploymentFolders(deployFolder,iterName,iterVersion)
outputLocation=deployLocation
mlflowSetPath(deployLocation,iterName+'_'+iterVersion)
# mlflowSetPath shut down the logger, so set again
set_log_handler( config_obj.basic, mode='a')
xtrain=pd.DataFrame()
xtest=pd.DataFrame()
log.info('Status:-|... AION Training Configuration started')
startTime = timeit.default_timer()
try:
output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}}
problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textsummarizationStatus,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus, objectDetectionStatus,stateTransitionStatus, similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus = config_obj.getModulesDetails()
status, error_id, msg = config_obj.validate_config()
if not status:
if error_id == 'fasttext':
raise ValueError(msg)
VideoProcessing = False
if(problem_type.lower() in ['classification','regression']):
if(targetFeature == ''):
output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"}
return output
from transformations.dataReader import dataReader
objData = dataReader()
DataIsFolder = False
folderdetails = config_obj.getFolderSettings()
if os.path.isfile(dataLocation):
log.info('Status:-|... AION Loading Data')
dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier)
status,msg = save_csv_compressed(dataFrame,original_data_file)
if not status:
log.info('CSV File Error: '+str(msg))
elif os.path.isdir(dataLocation):
if problem_type.lower() == 'summarization':
from document_summarizer import summarize
keywords, pretrained_type, embedding_sz = summarize.get_params()
dataFrame = summarize.to_dataframe(dataLocation,keywords, deploy_loc, pretrained_type, embedding_sz)
problem_type = 'classification'
targetFeature = 'label'
scoreParam = 'Accuracy'
elif folderdetails['fileType'].lower() == 'document':
dataFrame, error = objData.documentsTodf(dataLocation, folderdetails['labelDataFile'])
if error:
log.info(error)
elif folderdetails['fileType'].lower() == 'object':
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
intermediateLocation = os.path.join(deployLocation,'intermediate')
os.mkdir(intermediateLocation)
AugEnabled,keepAugImages,operations,augConf = config_obj.getEionImageAugmentationConfiguration()
dataFrame, n_class = objData.createTFRecord(dataLocation, intermediateLocation, folderdetails['labelDataFile'], testPercentage,AugEnabled,keepAugImages,operations, "objectdetection",augConf) #Unnati
DataIsFolder = True
else:
datafilelocation = os.path.join(dataLocation,folderdetails['labelDataFile'])
dataFrame = objData.csvTodf(datafilelocation,delimiter,textqualifier)
DataIsFolder = True
if textSimilarityStatus or similarityIdentificationStatus or contextualSearchStatus:
similaritydf = dataFrame
filter = config_obj.getfilter()
if filter != 'NA':
dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame)
timegrouper = config_obj.gettimegrouper()
grouping = config_obj.getgrouper()
if grouping != 'NA':
dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame)
elif timegrouper != 'NA':
dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame)
if timeseriesStatus or anomalyDetectionStatus:
from utils.validate_inputs import dataGarbageValue
status,msg = dataGarbageValue(dataFrame,datetimeFeature)
if status.lower() == 'error':
raise ValueError(msg)
if not DataIsFolder:
if timeseriesStatus:
if(modelFeatures != 'NA' and datetimeFeature != ''):
if datetimeFeature:
if isinstance(datetimeFeature, list): #to handle if time series having multiple time column
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature
else:
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',')
if datetimeFeature not in modelFeatures:
modelFeatures = modelFeatures+','+datetimeFeature
dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature)
elif survival_analysis_status or anomalyDetectionStatus:
if(modelFeatures != 'NA'):
if datetimeFeature != 'NA' and datetimeFeature != '':
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',')
if datetimeFeature not in modelFeatures:
modelFeatures = modelFeatures+','+datetimeFeature
dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature)
else:
dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature)
log.info('\\n-------> First Ten Rows of Input Data: ')
log.info(dataFrame.head(10))
self.dfrows=dataFrame.shape[0]
self.dfcols=dataFrame.shape[1]
log.info('\\n-------> Rows: '+str(self.dfrows))
log.info('\\n-------> Columns: '+str(self.dfcols))
topFeatures=[]
profilerObj = None
normalizer=None
dataLoadTime = timeit.default_timer() - startTime
log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime))
if timeseriesStatus:
if datetimeFeature != 'NA' and datetimeFeature != '':
preproces_config = config_obj.basic.get('preprocessing',{}).get('timeSeriesForecasting',{})
if preproces_config:
from transformations.preprocess import timeSeries as ts_preprocess
preprocess_obj = ts_preprocess( preproces_config,datetimeFeature, log)
dataFrame = preprocess_obj.run( dataFrame)
log.info('-------> Input dataFrame(5 Rows) after preprocessing: ')
log.info(dataFrame.head(5))
deploy_config['preprocess'] = {}
deploy_config['preprocess']['code'] = preprocess_obj.get_code()
if profiler_status:
log.info('\\n================== Data Profiler has started ==================')
log.info('Status:-|... AION feature transformation started')
from transformations.dataProfiler import profiler as dataProfiler
dp_mlstart = time.time()
profilerJson = config_obj.getEionProfilerConfigurarion()
log.info('-------> Input dataFrame(5 Rows): ')
log.info(dataFrame.head(5))
log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape))
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
if DataIsFolder:
if folderdetails['type'].lower() != 'objectdetection':
profilerObj = dataProfiler(dataFrame)
topFeatures,VideoProcessing,tfrecord_directory = profilerObj.folderPreprocessing(dataLocation,folderdetails,deployLocation)
elif textSimilarityStatus:
firstDocFeature = config_obj.getFirstDocumentFeature()
secondDocFeature = config_obj.getSecondDocumentFeature()
profilerObj = dataProfiler(dataFrame,targetFeature, data_path=dataFolderLocation)
dataFrame,pipe,targetColumn,topFeatures = profilerObj.textSimilarityStartProfiler(firstDocFeature,secondDocFeature)
elif recommenderStatus:
profilerObj = dataProfiler(dataFrame)
dataFrame = profilerObj.recommenderStartProfiler(modelFeatures)
else:
if deeplearner_status or learner_status:
if (problem_type.lower() != 'clustering') and (problem_type.lower() != 'topicmodelling'):
if targetFeature != '':
try:
biasingDetail = config_obj.getDebiasingDetail()
if len(biasingDetail) > 0:
if biasingDetail['FeatureName'] != 'None':
protected_feature = biasingDetail['FeatureName']
privileged_className = biasingDetail['ClassName']
target_feature = biasingDetail['TargetFeature']
algorithm = biasingDetail['Algorithm']
from debiasing.DebiasingManager import DebiasingManager
mgrObj = DebiasingManager()
log.info('Status:-|... Debiasing transformation started')
transf_dataFrame = mgrObj.Bias_Mitigate(dataFrame, protected_feature, privileged_className, target_feature, algorithm)
log.info('Status:-|... Debiasing transformation completed')
dataFrame = transf_dataFrame
except Exception as e:
print(e)
pass
# ---------------------------------------------- ----------------------------------------------
targetData = dataFrame[targetFeature]
featureData = dataFrame[dataFrame.columns.difference([targetFeature])]
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
xtrain,ytrain,xtest,ytest = self.split_into_train_test_data(featureData,targetData,testPercentage,log,problem_type.lower())
xtrain.reset_index(drop=True,inplace=True)
ytrain.reset_index(drop=True,inplace=True)
xtest.reset_index(drop=True,inplace=True)
ytest.reset_index(drop=True,inplace=True)
dataFrame = xtrain
dataFrame[targetFeature] = ytrain
encode_target_problems = ['classification','anomalyDetection', 'timeSeriesAnomalyDetection'] #task 11997
if problem_type == 'survivalAnalysis' and dataFrame[targetFeature].nunique() > 1:
encode_target_problems.append('survivalAnalysis')
if timeseriesStatus: #task 12627 calling data profiler without target feature specified separately (i.e) profiling is done for model features along with target features
profilerObj = dataProfiler(dataFrame, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation)
else:
profilerObj = dataProfiler(dataFrame, target=targetFeature, encode_target= problem_type in encode_target_problems, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) #task 12627
dataFrame |
, preprocess_pipe, label_encoder = profilerObj.transform()
preprocess_out_columns = dataFrame.columns.tolist()
if not timeseriesStatus: #task 12627 preprocess_out_columns goes as output_columns in target folder script/input_profiler.py, It should contain the target feature also as it is what is used for forecasting
if targetFeature in preprocess_out_columns:
preprocess_out_columns.remove(targetFeature)
for x in unpreprocessed_columns:
preprocess_out_columns.remove(x)
if label_encoder:
joblib.dump(label_encoder, Path(deployLocation)/'model'/'label_encoder.pkl')
labelMaps = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))
codeConfigure.update_config('train_features',list(profilerObj.train_features_type.keys()))
codeConfigure.update_config('text_features',profilerObj.text_feature)
self.textFeatures = profilerObj.text_feature
deploy_config['profiler'] = {}
deploy_config['profiler']['input_features'] = list(profilerObj.train_features_type.keys())
deploy_config['profiler']['output_features'] = preprocess_out_columns
deploy_config['profiler']['input_features_type'] = profilerObj.train_features_type
deploy_config['profiler']['word2num_features'] = profilerObj.wordToNumericFeatures
deploy_config['profiler']['unpreprocessed_columns'] = unpreprocessed_columns
deploy_config['profiler']['force_numeric_conv'] = profilerObj.force_numeric_conv
if self.textFeatures:
deploy_config['profiler']['conversion_method'] = config_obj.get_conversion_method()
if anomalyDetectionStatus and datetimeFeature != 'NA' and datetimeFeature != '':
if unpreprocessed_columns:
dataFrame.set_index( unpreprocessed_columns[0], inplace=True)
log.info('-------> Data Frame Post Data Profiling(5 Rows): ')
log.info(dataFrame.head(5))
if not xtest.empty:
if targetFeature != '':
non_null_index = ytest.notna()
ytest = ytest[non_null_index]
xtest = xtest[non_null_index]
if profilerObj.force_numeric_conv:
xtest[ profilerObj.force_numeric_conv] = xtest[profilerObj.force_numeric_conv].apply(pd.to_numeric,errors='coerce')
xtest.astype(profilerObj.train_features_type)
if unpreprocessed_columns:
xtest_unprocessed = xtest[unpreprocessed_columns]
xtest = preprocess_pipe.transform(xtest)
if not isinstance(xtest, np.ndarray):
xtest = xtest.toarray()
xtest = pd.DataFrame(xtest, columns=preprocess_out_columns)
if unpreprocessed_columns:
xtest[unpreprocessed_columns] = xtest_unprocessed
if survival_analysis_status:
xtest.astype({x:'float' for x in unpreprocessed_columns})
xtrain.astype({x:'float' for x in unpreprocessed_columns})
#task 11997 removed setting datetime column as index of dataframe code as it is already done before
if label_encoder:
ytest = label_encoder.transform(ytest)
if preprocess_pipe:
if self.textFeatures:
from text.textProfiler import reset_pretrained_model
reset_pretrained_model(preprocess_pipe) # pickle is not possible for fasttext model ( binary)
joblib.dump(preprocess_pipe, Path(deployLocation)/'model'/'preprocess_pipe.pkl')
self.features=topFeatures
if targetColumn in topFeatures:
topFeatures.remove(targetColumn)
self.topFeatures=topFeatures
if normalizer != None:
normalizer_file_path = os.path.join(deployLocation,'model','normalizer_pipe.sav')
normalizer_pickle_file = 'normalizer_pipe.sav'
pickle.dump(normalizer, open(normalizer_file_path,'wb'))
log.info('Status:-|... AION feature transformation completed')
dp_mlexecutionTime=time.time() - dp_mlstart
log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime))
log.info('================== Data Profiling completed ==================\\n')
else:
datacolumns=list(dataFrame.columns)
if targetFeature in datacolumns:
datacolumns.remove(targetFeature)
if not timeseriesStatus and not anomalyDetectionStatus and not inputDriftStatus and not outputDriftStatus and not imageClassificationStatus and not associationRuleStatus and not objectDetectionStatus and not stateTransitionStatus and not textsummarizationStatus:
self.textFeatures,self.vectorizerFeatures,pipe,column_merge_flag,merge_columns = profilerObj.checkForTextClassification(dataFrame)
self.topFeatures =datacolumns
if(pipe is not None):
preprocessing_pipe = 'pppipe'+iterName+'_'+iterVersion+'.sav'
ppfilename = os.path.join(deployLocation,'model','pppipe'+iterName+'_'+iterVersion+'.sav')
pickle.dump(pipe, open(ppfilename, 'wb'))
status, msg = save_csv_compressed(dataFrame,profiled_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
if selector_status:
log.info("\\n================== Feature Selector has started ==================")
log.info("Status:-|... AION feature engineering started")
fs_mlstart = time.time()
selectorJson = config_obj.getEionSelectorConfiguration()
if self.textFeatures:
config_obj.updateFeatureSelection(selectorJson, codeConfigure, self.textFeatures)
log.info("-------> For vectorizer 'feature selection' is disabled and all the features will be used for training")
from feature_engineering.featureSelector import featureSelector
selectorObj = featureSelector()
dataFrame,targetColumn,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,self.similarGroups,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pcaModel,bpca_features,apca_features,featureEngineeringSelector = selectorObj.startSelector(dataFrame, selectorJson,self.textFeatures,targetFeature,problem_type)
if(str(pcaModel) != 'None'):
featureReduction = 'True'
status, msg = save_csv(dataFrame,reduction_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
pcaFileName = os.path.join(deployLocation,'model','pca'+iterName+'_'+iterVersion+'.sav')
pcaModel_pickle_file = 'pca'+iterName+'_'+iterVersion+'.sav'
pickle.dump(pcaModel, open(pcaFileName, 'wb'))
if not xtest.empty:
xtest = pd.DataFrame(pcaModel.transform(xtest),columns= apca_features)
if targetColumn in self.topFeatures:
self.topFeatures.remove(targetColumn)
fs_mlexecutionTime=time.time() - fs_mlstart
log.info('-------> COMPUTING: Total Feature Selection Execution Time '+str(fs_mlexecutionTime))
log.info('================== Feature Selection completed ==================\\n')
log.info("Status:-|... AION feature engineering completed")
if deeplearner_status or learner_status:
log.info('Status:-|... AION training started')
ldp_mlstart = time.time()
balancingMethod = config_obj.getAIONDataBalancingMethod()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
modelType = problem_type.lower()
targetColumn = targetFeature
if modelType == "na":
if self.targetType == 'categorical':
modelType = 'classification'
elif self.targetType == 'continuous':
modelType = 'regression'
else:
modelType='clustering'
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
features =datacolumns
featureData = dataFrame[features]
if(modelType == 'clustering') or (modelType == 'topicmodelling'):
xtrain = featureData
ytrain = pd.DataFrame()
xtest = featureData
ytest = pd.DataFrame()
elif (targetColumn!=''):
xtrain = dataFrame[features]
ytrain = dataFrame[targetColumn]
else:
pass
categoryCountList = []
if modelType == 'classification':
if(mlobj.checkForClassBalancing(ytrain) >= 1):
xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod)
valueCount=targetData.value_counts()
categoryCountList=valueCount.tolist()
ldp_mlexecutionTime=time.time() - ldp_mlstart
log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime))
if learner_status:
base_model_score=0
log.info('\\n================== ML Started ==================')
log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum()))
mlstart = time.time()
log.info('-------> Target Problem Type:'+ self.targetType)
learner_type = 'ML'
learnerJson = config_obj.getEionLearnerConfiguration()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
anomalyDetectionStatus = False
anomalyMethod =config_obj.getEionanomalyModels()
if modelType.lower() == "anomalydetection" or modelType.lower() == "timeseriesanomalydetection": #task 11997
anomalyDetectionStatus = True
if anomalyDetectionStatus == True :
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
if datetimeFeature in datacolumns:
datacolumns.remove(datetimeFeature)
self.features = datacolumns
from learner.anomalyDetector import anomalyDetector
anomalyDetectorObj=anomalyDetector()
model_type ="anomaly_detection"
saved_model = model_type+'_'+iterName+'_'+iterVersion+'.sav'
if problem_type.lower() == "timeseriesanomalydetection": #task 11997
anomalyconfig = config_obj.getAIONTSAnomalyDetectionConfiguration()
modelType = "TimeSeriesAnomalyDetection"
else:
anomalyconfig = config_obj.getAIONAnomalyDetectionConfiguration()
testPercentage = config_obj.getAIONTestTrainPercentage()
##Multivariate feature based anomaly detection status from gui (true/false)
mv_featurebased_selection = config_obj.getMVFeaturebasedAD()
mv_featurebased_ad_status=str(mv_featurebased_selection['uniVariate'])
model,estimator,matrix,trainmatrix,score,labelMaps=anomalyDetectorObj.startanomalydetector(dataFrame,targetColumn,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status) #Unnati
score = 'NA'
if(self.matrix != '{'):
self.matrix += ','
self.matrix += matrix
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
scoreParam = 'NA'
scoredetails = f'{{"Model":"{model}","Score":"{score}"}}'
if model_tried != '':
model_tried += ','
model_tried += scoredetails
model = anomalyMethod
else:
log.info('-------> Target Problem Type:'+ self.targetType)
log.info('-------> Target Model Type:'+ modelType)
if(modelType == 'regression'):
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
if(modelType == 'classification'):
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
scoreParam = scoreParam.lower()
codeConfigure.update_config('scoring_criteria',scoreParam)
modelParams,modelList = config_obj.getEionLearnerModelParams(modelType)
status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=mlobj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,'MB',codeConfigure,featureEngineeringSelector,config_obj.getModelEvaluationConfig(),imageFolderLocation)
#Getting model,data for ensemble calculation
e_model=loaded_model
base_model_score=score
if(self.matrix != '{'):
self.matrix += ','
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
self.matrix += matrix
mlexecutionTime=time.time() - mlstart
log.info('-------> Total ML Execution Time '+str(mlexecutionTime))
log.info('================== ML Completed ==================\\n')
if deeplearner_status:
learner_type = 'DL'
log.info('Status:- |... AION DL training started')
from dlearning.deeplearning import deeplearning
dlobj = deeplearning()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
log.info('\\n================== DL Started ==================')
dlstart |
= time.time()
deeplearnerJson = config_obj.getEionDeepLearnerConfiguration()
targetColumn = targetFeature
method = deeplearnerJson['optimizationMethod']
optimizationHyperParameter = deeplearn |
_inv[:, targetColIndx]
predout = predout.reshape(len(pred_1d),1)
#y_future.append(predout)
col = targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(predout)),columns=col)
for i in range(0, len(predout)):
pred.iloc[i] = predout[i]
predictions = pred
log.info("-------> Predictions")
log.info(predictions)
forecast_output = predictions.to_json(orient='records')
elif (model.lower() == 'mlp' or model.lower() == 'lstm'):
sfeatures.remove(datetimeFeature)
self.features = sfeatures
if len(sfeatures) == 1:
xt = xtrain[self.features].values
else:
xt = xtrain[self.features].values
with open(scalertransformationFile, 'rb') as f:
loaded_scaler_model = pickle.load(f)
f.close()
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
pred_data = xt
y_future = []
for i in range(no_of_prediction):
pdata = pred_data[-lag_order:]
if model.lower() == 'mlp':
pdata = pdata.reshape((1,lag_order))
else:
pdata = pdata.reshape((1,lag_order, len(sfeatures)))
if (len(sfeatures) > 1):
pred = loaded_model.predict(pdata)
predout = loaded_scaler_model.inverse_transform(pred)
y_future.append(predout)
pred_data=np.append(pred_data,pred,axis=0)
else:
pred = loaded_model.predict(pdata)
predout = loaded_scaler_model.inverse_transform(pred)
y_future.append(predout.flatten()[-1])
pred_data = np.append(pred_data,pred)
col = targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(y_future)),columns=col)
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
predictions = pred
log.info("-------> Predictions")
log.info(predictions)
forecast_output = predictions.to_json(orient='records')
else:
pass
log.info('Status:-|... AION TimeSeries Forecasting completed') #task 11997
log.info("------ Forecast Prediction End -------------\\n")
log.info('================ Time Series Forecasting Completed ================\\n') #task 11997
if recommenderStatus:
log.info('\\n================ Recommender Started ================ ')
log.info('Status:-|... AION Recommender started')
learner_type = 'RecommenderSystem'
model_type = 'RecommenderSystem'
modelType = model_type
model = model_type
targetColumn=''
datacolumns=list(dataFrame.columns)
self.features=datacolumns
svd_params = config_obj.getEionRecommenderConfiguration()
from recommender.item_rating import recommendersystem
recommendersystemObj = recommendersystem(modelFeatures,svd_params)
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
saved_model,rmatrix,score,trainingperformancematrix,model_tried = recommendersystemObj.recommender_model(dataFrame,outputLocation)
scoreParam = 'NA' #Task 11190
log.info('Status:-|... AION Recommender completed')
log.info('================ Recommender Completed ================\\n')
if textsummarizationStatus:
log.info('\\n================ text Summarization Started ================ ')
log.info('Status:-|... AION text Summarization started')
modelType = 'textsummarization'
model_type = 'textsummarization'
learner_type = 'Text Summarization'
modelName='TextSummarization'
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from scipy import spatial
model = model_type
dataLocationTS,deployLocationTS,KeyWordsTS,pathForKeywordFileTS = config_obj.getEionTextSummarizationConfig()
#print("dataLocationTS",dataLocationTS)
#print("deployLocationTS",deployLocationTS)
#print("KeyWordsTS",KeyWordsTS)
#print("pathForKeywordFileTS",pathForKeywordFileTS)
#PreTrained Model Download starts-------------------------
from appbe.dataPath import DATA_DIR
preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
models = {'glove':{50:'glove.6B.50d.w2vformat.txt'}}
supported_models = [x for y in models.values() for x in y.values()]
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
Path(modelsPath).mkdir(parents=True, exist_ok=True)
p = Path(modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
selected_model="glove.6B.50d.w2vformat.txt"
if selected_model not in modelsDownloaded:
print("Model not in folder, downloading")
import urllib.request
location = Path(modelsPath)
local_file_path = location/f"glove.6B.50d.w2vformat.txt"
urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.50d.w2vformat.txt', local_file_path)
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
model = AutoModelForSeq2SeqLM.from_pretrained("sshleifer/distilbart-cnn-12-6")
tokenizer.save_pretrained(preTrainedModellocation)
model.save_pretrained(preTrainedModellocation)
#PreTrained Model Download ends-----------------------
deployLocationData=deployLocation+"\\\\data\\\\"
modelLocation=Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'/'glove.6B.50d.w2vformat.txt'
KeyWordsTS=KeyWordsTS.replace(",", " ")
noOfKeyword = len(KeyWordsTS.split())
keywords = KeyWordsTS.split()
embeddings = {}
word = ''
with open(modelLocation, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
readData=pd.read_csv(pathForKeywordFileTS,encoding='utf-8',encoding_errors= 'replace')
for i in range(noOfKeyword):
terms=(sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6]
readData = readData.append({'Keyword': keywords[i]}, ignore_index=True)
for j in range(len(terms)):
readData = readData.append({'Keyword': terms[j]}, ignore_index=True)
deployLocationDataKwDbFile=deployLocationData+"keywordDataBase.csv"
readData.to_csv(deployLocationDataKwDbFile,encoding='utf-8',index=False)
datalocation_path=dataLocationTS
path=Path(datalocation_path)
fileList=os.listdir(path)
textExtraction = pd.DataFrame()
textExtraction['Sentences']=""
rowIndex=0
for i in range(len(fileList)):
fileName=str(datalocation_path)+"\\\\"+str(fileList[i])
if fileName.endswith(".pdf"):
print("\\n files ",fileList[i])
from pypdf import PdfReader
reader = PdfReader(fileName)
number_of_pages = len(reader.pages)
text=""
textOutputForFile=""
OrgTextOutputForFile=""
for i in range(number_of_pages) :
page = reader.pages[i]
text1 = page.extract_text()
text=text+text1
import nltk
tokens = nltk.sent_tokenize(text)
for sentence in tokens:
sentence=sentence.replace("\\n", " ")
if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) :
continue
textExtraction.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
if fileName.endswith(".txt"):
print("\\n txt files",fileList[i])
data=[]
with open(fileName, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
sentences=str1.split(".")
count=0
for sentence in sentences:
count += 1
textExtraction.at[rowIndex+i,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
df=textExtraction
#print("textExtraction",textExtraction)
deployLocationDataPreProcessData=deployLocationData+"preprocesseddata.csv"
save_csv_compressed(deployLocationDataPreProcessData, df, encoding='utf-8')
df['Label']=0
kw=pd.read_csv(deployLocationDataKwDbFile,encoding='utf-8',encoding_errors= 'replace')
Keyword_list = kw['Keyword'].tolist()
for i in df.index:
for x in Keyword_list:
if (str(df["Sentences"][i])).find(x) != -1:
df['Label'][i]=1
break
deployLocationDataPostProcessData=deployLocationData+"postprocesseddata.csv"
#df.to_csv(deployLocationDataPostProcessData,encoding='utf-8')
save_csv_compressed(deployLocationDataPostProcessData, df, encoding='utf-8')
labelledData=df
train_df=labelledData
labelencoder = LabelEncoder()
train_df['Sentences'] = labelencoder.fit_transform(train_df['Sentences'])
X = train_df.drop('Label',axis=1)
y = train_df['Label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
Classifier = RandomForestClassifier(n_estimators = 10, random_state = 42)
modelTs=Classifier.fit(X, y)
import pickle
deployLocationTS=deployLocation+"\\\\model\\\\"+iterName+'_'+iterVersion+'.sav'
deployLocationTS2=deployLocation+"\\\\model\\\\"+"classificationModel.sav"
pickle.dump(modelTs, open(deployLocationTS, 'wb'))
pickle.dump(modelTs, open(deployLocationTS2, 'wb'))
print("\\n trainModel Ends")
saved_model = 'textsummarization_'+iterName+'_'+iterVersion
log.info('Status:-|... AION text summarization completed')
model = learner_type
log.info('================ text summarization Completed ================\\n')
if survival_analysis_status:
sa_method = config_obj.getEionanomalyModels()
labeldict = {}
log.info('\\n================ SurvivalAnalysis Started ================ ')
log.info('Status:-|... AION SurvivalAnalysis started')
log.info('\\n================ SurvivalAnalysis DataFrame ================ ')
log.info(dataFrame)
from survival import survival_analysis
from learner.machinelearning import machinelearning
sa_obj = survival_analysis.SurvivalAnalysis(dataFrame, preprocess_pipe, sa_method, targetFeature, datetimeFeature, filter_expression, profilerObj.train_features_type)
if sa_obj != None:
predict_json = sa_obj.learn()
if sa_method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']:
predicted = sa_obj.models[0].predict(dataFrame[datetimeFeature])
status, msg = save_csv(predicted,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
self.features = [datetimeFeature]
elif sa_method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']:
predicted = sa_obj.models[0].predict_cumulative_hazard(dataFrame)
datacolumns = list(dataFrame.columns)
targetColumn = targetFeature
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
self.features = datacolumns
score = sa_obj.score
scoreParam = 'Concordance_Index'
status,msg = save_csv(predicted,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
model = sa_method
modelType = "SurvivalAnalysis"
model_type = "SurvivalAnalysis"
modelName = sa_method
i = 1
for mdl in sa_obj.models:
saved_model = "%s_%s_%s_%d.sav"%(model_type,sa_method,iterVersion,i)
pickle.dump(mdl, open(os.path.join(deployLocation,'model',saved_model), 'wb')),
i+=1
p = 1
for plot in sa_obj.plots:
img_name = "%s_%d.png"%(sa_method,p)
img_location = os.path.join(imageFolderLocation,img_name |
)
plot.savefig(img_location,bbox_inches='tight')
sa_images.append(img_location)
p+=1
log.info('Status:-|... AION SurvivalAnalysis completed')
log.info('\\n================ SurvivalAnalysis Completed ================ ')
if visualizationstatus:
visualizationJson = config_obj.getEionVisualizationConfiguration()
log.info('\\n================== Visualization Recommendation Started ==================')
visualizer_mlstart = time.time()
from visualization.visualization import Visualization
visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfrows,self.dfcols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file)
visualizationObj.visualizationrecommandsystem()
visualizer_mlexecutionTime=time.time() - visualizer_mlstart
log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime))
log.info('================== Visualization Recommendation Started ==================\\n')
if similarityIdentificationStatus or contextualSearchStatus:
datacolumns=list(dataFrame.columns)
features = modelFeatures.split(",")
if indexFeature != '' and indexFeature != 'NA':
iFeature = indexFeature.split(",")
for ifea in iFeature:
if ifea not in features:
features.append(ifea)
for x in features:
dataFrame[x] = similaritydf[x]
#get vectordb(chromadb) status selected
if similarityIdentificationStatus:
learner_type = 'similarityIdentification'
else:
learner_type = 'contextualSearch'
vecDBCosSearchStatus = config_obj.getVectorDBCosSearchStatus(learner_type)
if vecDBCosSearchStatus:
status, msg = save_chromadb(dataFrame, config_obj, trained_data_file, modelFeatures)
if not status:
log.info('Vector DB File Error: '+str(msg))
else:
status, msg = save_csv(dataFrame,trained_data_file)
if not status:
log.info('CSV File Error: '+str(msg))
self.features = datacolumns
model_type = config_obj.getAlgoName(problem_type)
model = model_type #bug 12833
model_tried = '{"Model":"'+model_type+'","FeatureEngineering":"NA","Score":"NA","ModelUncertainty":"NA"}'
modelType = learner_type
saved_model = learner_type
score = 'NA'
if deploy_status:
if str(model) != 'None':
log.info('\\n================== Deployment Started ==================')
log.info('Status:-|... AION Creating Prediction Service Start')
deployer_mlstart = time.time()
deployJson = config_obj.getEionDeployerConfiguration()
deploy_name = iterName+'_'+iterVersion
from prediction_package.model_deploy import DeploymentManager
if textsummarizationStatus :
deploy = DeploymentManager()
deploy.deployTSum(deployLocation,preTrainedModellocation)
codeConfigure.save_config(deployLocation)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Deployer completed')
log.info('================== Deployment Completed ==================')
else:
deploy = DeploymentManager()
deploy.deploy_model(deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deployLocation,self.features,self.profilerAction,dataLocation,labelMaps,column_merge_flag,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer_pickle_file,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,self.method,deployFolder,iterName,iterVersion,self.wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,noofforecasts,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,deploy_config)
codeConfigure.update_config('deploy_path',os.path.join(deployLocation,'publish'))
codeConfigure.save_config(deployLocation)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Creating Prediction Service completed')
log.info('================== Deployment Completed ==================')
if not outputDriftStatus and not inputDriftStatus:
from transformations.dataProfiler import set_features
self.features = set_features(self.features,profilerObj)
self.matrix += '}'
self.trainmatrix += '}'
print(model_tried)
model_tried = eval('['+model_tried+']')
matrix = eval(self.matrix)
trainmatrix = eval(self.trainmatrix)
deployPath = deployLocation.replace(os.sep, '/')
if survival_analysis_status:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"survivalProbability":json.loads(predict_json),"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,"imageLocation":str(sa_images),"LogFile":logFileName}}
elif not timeseriesStatus:
try:
json.dumps(params)
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}}
except:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}}
else:
if config_obj.summarize:
modelType = 'Summarization'
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,'forecasts':json.loads(forecast_output),"LogFile":logFileName}}
if bool(topics) == True:
output_json['topics'] = topics
with open(outputjsonFile, 'w',encoding='utf-8') as f:
json.dump(output_json, f)
f.close()
output_json = json.dumps(output_json)
log.info('\\n------------- Summary ------------')
log.info('------->No of rows & columns in data:('+str(self.dfrows)+','+str(self.dfcols)+')')
log.info('------->No of missing Features :'+str(len(self.mFeatures)))
log.info('------->Missing Features:'+str(self.mFeatures))
log.info('------->Text Features:'+str(self.textFeatures))
log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures)))
log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures))
if threshold == -1:
log.info('------->Threshold: NA')
else:
log.info('------->Threshold: '+str(threshold))
log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps))
for i in range(0,len(self.similarGroups)):
log.info('------->Similar Groups '+str(i+1)+' '+str(self.similarGroups[i]))
if((learner_type != 'TS') & (learner_type != 'AR')):
log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape))
log.info('------->Features Used for Modeling:'+str(self.features))
log.info('------->Target Feature: '+str(targetColumn))
log.info('------->Best Model Score :'+str(score))
log.info('------->Best Parameters:'+str(params))
log.info('------->Type of Model :'+str(modelType))
log.info('------->Best Model :'+str(model))
log.info('------------- Summary ------------\\n')
log.info('Status:-|... AION Model Training Successfully Done')
except Exception as inst:
log.info('server code execution failed !....'+str(inst))
log.error(inst, exc_info = True)
output_json = {"status":"FAIL","message":str(inst).strip('"'),"LogFile":logFileName}
output_json = json.dumps(output_json)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
executionTime = timeit.default_timer() - startTime
log.info('\\nTotal execution time(sec) :'+str(executionTime))
log.info('\\n------------- Output JSON ------------')
log.info('aion_learner_status:'+str(output_json))
log.info('------------- Output JSON ------------\\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return output_json
def split_into_train_test_data(self,featureData,targetData,testPercentage,log,modelType='classification'): #Unnati
log.info('\\n-------------- Test Train Split ----------------')
if testPercentage == 0 or testPercentage == 100: #Unnati
xtrain=featureData
ytrain=targetData
xtest=pd.DataFrame()
ytest=pd.DataFrame()
else:
testSize= testPercentage/100 #Unnati
if modelType == 'regression':
log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42)
else:
try:
log.info('-------> Split Type: Stratify Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,random_state=42)
except Exception as ValueError:
count_unique = targetData.value_counts()
feature_with_single_count = count_unique[ count_unique == 1].index.tolist()
error = f"The least populated class in {feature_with_single_count} has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2"
raise Exception(error) from ValueError
except:
log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42)
log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') #Unnati
log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->')
log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->')
log.info('-------------- Test Train Split End ----------------\\n')
return(xtrain,ytrain,xtest,ytest)
def aion_train_model(arg):
warnings.filterwarnings('ignore')
config_path = Path( arg)
with open( config_path, 'r') as f:
config = json.load( f)
log = set_log_handler(config['basic'])
log.info('************* Version - v'+AION_VERSION+' *************** \\n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
log.info(msg)
try:
config_validate(arg)
valid, msg = pushRecordForTraining()
if valid:
serverObj = server()
configObj = AionConfigManager()
codeConfigure = code_configure()
codeConfigure.create_config(config)
readConfistatus,msg = configObj.readConfigurationFile(config)
if(readConfistatus == False):
raise ValueError( msg)
output = serverObj.startScriptExecution(configObj, codeConfigure, log)
else:
output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')}
output = json.dumps(output)
print( f"\\naion_learner_status:{output}\\n")
log.info( f"\\naion_learner_status:{output}\\n")
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(f"\\naion_learner_status:{output}\\n")
log.info( f"\\naion_learner_ |
status:{output}\\n")
return output
if __name__ == "__main__":
aion_train_model( sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import shutil
import subprocess
import sys
import glob
import json
def publish(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
model = jsonData['modelName']
version = jsonData['modelVersion']
deployFolder = jsonData['deployLocation']
model = model.replace(" ", "_")
deployedPath = os.path.join(deployFolder,model+'_'+version)
deployedPath = os.path.join(deployedPath,'WHEELfile')
whlfilename='na'
if os.path.isdir(deployedPath):
for file in os.listdir(deployedPath):
if file.endswith(".whl"):
whlfilename = os.path.join(deployedPath,file)
if whlfilename != 'na':
subprocess.check_call([sys.executable, "-m", "pip", "uninstall","-y",model])
subprocess.check_call([sys.executable, "-m", "pip", "install", whlfilename])
status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status == 'Running':
service_stop(json.dumps(jsonData))
service_start(json.dumps(jsonData))
output_json = {'status':"SUCCESS"}
output_json = json.dumps(output_json)
else:
output_json = {'status':'Error','Msg':'Installation Package not Found'}
output_json = json.dumps(output_json)
return(output_json)
def check_service_running(model,serviceFolder):
model = model.replace(" ", "_")
filename = model+'_service.py'
modelservicefile = os.path.join(serviceFolder,filename)
status = 'File Not Exist'
ip = ''
port = ''
pid = ''
if os.path.exists(modelservicefile):
status = 'File Exist'
import psutil
for proc in psutil.process_iter():
pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections'])
if 'python' in pinfo['name']:
if filename in pinfo['cmdline'][1]:
status = 'Running'
pid = pinfo['pid']
for x in pinfo['connections']:
ip = x.laddr.ip
port = x.laddr.port
return(status,pid,ip,port)
def service_stop(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status == 'Running':
import psutil
p = psutil.Process(int(pid))
p.terminate()
time.sleep(2)
output_json = {'status':'SUCCESS'}
output_json = json.dumps(output_json)
return(output_json)
def service_start(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
model = jsonData['modelName']
version = jsonData['modelVersion']
ip = jsonData['ip']
port = jsonData['port']
deployFolder = jsonData['deployLocation']
serviceFolder = jsonData['serviceFolder']
model = model.replace(" ", "_")
deployLocation = os.path.join(deployFolder,model+'_'+version)
org_service_file = os.path.abspath(os.path.join(os.path.dirname(__file__),'model_service.py'))
filename = model+'_service.py'
modelservicefile = os.path.join(serviceFolder,filename)
status = 'File Not Exist'
if os.path.exists(modelservicefile):
status = 'File Exist'
r = ([line.split() for line in subprocess.check_output("tasklist").splitlines()])
for i in range(len(r)):
if filename in r[i]:
status = 'Running'
if status == 'File Not Exist':
shutil.copy(org_service_file,modelservicefile)
with open(modelservicefile, 'r+') as file:
content = file.read()
file.seek(0, 0)
line = 'from '+model+' import aion_performance'
file.write(line+"\\n")
line = 'from '+model+' import aion_drift'
file.write(line+ "\\n")
line = 'from '+model+' import featureslist'
file.write(line+ "\\n")
line = 'from '+model+' import aion_prediction'
file.write(line+ "\\n")
file.write(content)
file.close()
status = 'File Exist'
if status == 'File Exist':
status,pid,ipold,portold = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status != 'Running':
command = "python "+modelservicefile+' '+str(port)+' '+str(ip)
os.system('start cmd /c "'+command+'"')
time.sleep(2)
status = 'Running'
output_json = {'status':'SUCCESS','Msg':status}
output_json = json.dumps(output_json)
return(output_json)
if __name__ == "__main__":
aion_publish(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
#from autogluon.tabular import TabularDataset, TabularPredictor
#from autogluon.core.utils.utils import setup_outputdir
#from autogluon.core.utils.loaders import load_pkl
#from autogluon.core.utils.savers import save_pkl
import datetime, time, timeit
from datetime import datetime as dt
import os.path
import json
import io
import shutil
import sys
#from Gluon_MultilabelPredictor import MultilabelPredictor
class MultilabelPredictor():
""" Tabular Predictor for predicting multiple columns in table.
Creates multiple TabularPredictor objects which you can also use individually.
You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)`
Parameters
----------
labels : List[str]
The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object.
path : str
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors.
problem_types : List[str]
The ith element is the `problem_type` for the ith TabularPredictor stored in this object.
eval_metrics : List[str]
The ith element is the `eval_metric` for the ith TabularPredictor stored in this object.
consider_labels_correlation : bool
Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others.
If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion).
Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels.
kwargs :
Arguments passed into the initialization of each TabularPredictor.
"""
multi_predictor_file = 'multilabel_predictor.pkl'
def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs):
if len(labels) < 2:
raise ValueError("MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column).")
self.path = setup_outputdir(path, warn_if_exist=False)
self.labels = labels
#print(self.labels)
self.consider_labels_correlation = consider_labels_correlation
self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label
if eval_metrics is None:
self.eval_metrics = {}
else:
self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))}
problem_type = None
eval_metric = None
for i in range(len(labels)):
label = labels[i]
path_i = self.path + "Predictor_" + label
if problem_types is not None:
problem_type = problem_types[i]
if eval_metrics is not None:
eval_metric = self.eval_metrics[i]
self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs)
def fit(self, train_data, tuning_data=None, **kwargs):
""" Fits a separate TabularPredictor to predict each of the labels.
Parameters
----------
train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame
See documentation for `TabularPredictor.fit()`.
kwargs :
Arguments passed into the `fit()` call for each TabularPredictor.
"""
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
train_data_og = train_data.copy()
if tuning_data is not None:
tuning_data_og = tuning_data.copy()
save_metrics = len(self.eval_metrics) == 0
for i in range(len(self.labels)):
label = self.labels[i]
predictor = self.get_predictor(label)
if not self.consider_labels_correlation:
labels_to_drop = [l for l in self.labels if l!=label]
else:
labels_to_drop = [self.labels[j] for j in range(i+1,len(self.labels))]
train_data = train_data_og.drop(labels_to_drop, axis=1)
if tuning_data is not None:
tuning_data = tuning_data_og.drop(labels_to_drop, axis=1)
print(f"Fitting TabularPredictor for label: {label} ...")
predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs)
self.predictors[label] = predictor.path
if save_metrics:
self.eval_metrics[label] = predictor.eval_metric
self.save()
def eval_metrics(self):
return(self.eval_metrics)
def predict(self, data, **kwargs):
""" Returns DataFrame with label columns containing predictions for each label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`.
kwargs :
Arguments passed into the predict() call for each TabularPredictor.
"""
return self._predict(data, as_proba=False, **kwargs)
def predict_proba(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`.
kwargs :
Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call).
"""
return self._predict(data, as_proba=True, **kwargs)
def evaluate(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`.
kwargs :
Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call).
"""
data = self._get_data(data)
eval_dict = {}
for label in self.labels:
print(f"Evaluating TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
eval_dict[label] = predictor.evaluate(data, **kwargs)
if self.consider_labels_correlation:
data[label] = predictor.predict(data, **kwargs)
return eval_dict
def save(self):
""" Save MultilabelPredictor to disk. """
for label in self.labels:
if not isinstance(self.predictors[label], str):
self.predictors[label] = self.predictors[label].path
save_pkl.save(path=self.path+self.multi_predictor_file, object=self)
print(f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path |
}')")
@classmethod
def load(cls, path):
""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """
path = os.path.expanduser(path)
if path[-1] != os.path.sep:
path = path + os.path.sep
return load_pkl.load(path=path+cls.multi_predictor_file)
def get_predictor(self, label):
""" Returns TabularPredictor which is used to predict this label. """
predictor = self.predictors[label]
if isinstance(predictor, str):
return TabularPredictor.load(path=predictor)
return predictor
def _get_data(self, data):
if isinstance(data, str):
return TabularDataset(data)
return data.copy()
def _predict(self, data, as_proba=False, **kwargs):
data = self._get_data(data)
if as_proba:
predproba_dict = {}
for label in self.labels:
print(f"Predicting with TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
if as_proba:
predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs)
data[label] = predictor.predict(data, **kwargs)
if not as_proba:
return data[self.labels]
else:
return predproba_dict
def aion_train_gluon(arg):
configFile = arg
with open(configFile, 'rb') as cfile:
data = json.load(cfile)
cfile.close()
rootElement = data['basic']
modelname = rootElement['modelName']
version = rootElement['modelVersion']
dataLocation = rootElement['dataLocation']
deployFolder = rootElement['deployLocation']
analysisType = rootElement['analysisType']
testPercentage = data['advance']['testPercentage']
deployLocation = os.path.join(deployFolder,modelname+'_'+version)
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
logLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logLocation)
except OSError as e:
pass
etcLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcLocation)
except OSError as e:
pass
logFileName=os.path.join(deployLocation,'log','model_training_logs.log')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('************* Version - v1.2.0 *************** \\n')
msg = '-------> Execution Start Time: '+ dt.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
log.info(msg)
dataLabels = rootElement['targetFeature'].split(',')
# Create and Write the config file used in Prediction
# ----------------------------------------------------------------------------#
tdata = TabularDataset(dataLocation)
#train_data = tdata
train_data = tdata.sample(frac = 0.8)
test_data = tdata.drop(train_data.index)
if rootElement['trainingFeatures'] != '':
trainingFeatures = rootElement['trainingFeatures'].split(',')
else:
trainingFeatures = list(train_data.columns)
features = trainingFeatures
for x in dataLabels:
if x not in features:
features.append(x)
indexFeature = rootElement['indexFeature']
if indexFeature != '':
indexFeature = indexFeature.split(',')
for x in indexFeature:
if x in features:
features.remove(x)
dateTimeFeature = rootElement['dateTimeFeature']
if dateTimeFeature != '':
dateTimeFeature = dateTimeFeature.split(',')
for x in dateTimeFeature:
if x in features:
features.remove(x)
train_data = train_data[features]
test_data = test_data[features]
configJsonFile = {"targetFeature":dataLabels,"features":",".join([feature for feature in features])}
configJsonFilePath = os.path.join(deployLocation,'etc','predictionConfig.json')
if len(dataLabels) == 1 and analysisType['multiLabelPrediction'] == "False":
dataLabels = rootElement['targetFeature']
with io.open(configJsonFilePath, 'w', encoding='utf8') as outfile:
str_ = json.dumps(configJsonFile, ensure_ascii=False)
outfile.write(str_)
# ----------------------------------------------------------------------------#
if analysisType['multiLabelPrediction'] == "True":
# Copy and Write the Predictiion script file into deployment location
# ----------------------------------------------------------------------------#
srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiLabelPrediction.py')
dstFile = os.path.join(deployLocation,'aion_predict.py')
shutil.copy(srcFile,dstFile)
# ----------------------------------------------------------------------------#
labels = dataLabels # which columns to predict based on the others
#problem_types = dataProblem_types # type of each prediction problem
save_path = os.path.join(deployLocation,'ModelPath') # specifies folder to store trained models
time_limit = 5 # how many seconds to train the TabularPredictor for each label
log.info('Status:-|... AION Gluon Start')
try:
if len(labels) < 2:
log.info('Status:-|... AION Evaluation Error: Target should be multiple column')
# ----------------------------------------------------------------------------#
output = {'status':'FAIL','message':'Number of target variable should be 2 or more than 2'}
else:
multi_predictor = MultilabelPredictor(labels=labels, path=save_path)
multi_predictor.fit(train_data, time_limit=time_limit)
log.info('Status:-|... AION Gluon Stop')
log.info('Status:-|... AION Evaluation Start')
trainevaluations = multi_predictor.evaluate(train_data)
testevaluations = multi_predictor.evaluate(test_data)
best_model = {}
for label in labels:
predictor_class = multi_predictor.get_predictor(label)
predictor_class.get_model_best()
best_model[label] = predictor_class.get_model_best()
log.info('Status:-|... AION Evaluation Stop')
# ----------------------------------------------------------------------------#
output = {'status':'SUCCESS','data':{'ModelType':'MultiLabelPrediction','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'ACCURACY','deployLocation':deployLocation,'matrix':trainevaluations,'testmatrix':testevaluations,'BestModel':best_model, 'LogFile':logFileName}}
except Exception as inst:
log.info('Status:-|... AION Gluon Error')
output = {"status":"FAIL","message":str(inst).strip('"')}
if analysisType['multiModalLearning'] == "True":
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.text import TextPredictor
# check the system and then set the equivelent flag
# ----------------------------------------------------------------------------#
os.environ["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"] = "0"
if get_gpu_count() == 0:
os.environ["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"] = "1"
# ----------------------------------------------------------------------------#
# Copy and Write the Predictiion script file into deployment location
# ----------------------------------------------------------------------------#
srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiModalPrediction.py')
dstFile = os.path.join(deployLocation,'aion_predict.py')
shutil.copy(srcFile,dstFile)
time_limit = None # set to larger value in your applications
save_path = os.path.join(deployLocation,'text_prediction')
predictor = TextPredictor(label=dataLabels, path=save_path)
predictor.fit(train_data, time_limit=time_limit)
log.info('Status:-|... AION Gluon Stop')
log.info('Status:-|... AION Evaluation Start')
trainevaluations = predictor.evaluate(train_data)
log.info('Status:-|... AION Evaluation Stop')
# ----------------------------------------------------------------------------#
output = {'status':'SUCCESS','data':{'ModelType':'MultiModelLearning','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'SCORE','deployLocation':deployLocation,'matrix':trainevaluations,'LogFile':logFileName}}
output = json.dumps(output)
print("\\n")
print("aion_learner_status:",output)
print("\\n")
log.info('\\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return(output)
if __name__ == "__main__":
aion_train_gluon(sys.argv[1])<s> import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
<s> #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
#from SocketServer import ThreadingMixIn
from socketserver import ThreadingMixIn
from functools import partial
from http.server import SimpleHTTPRequestHandler, test
import base64
from appbe.dataPath import DEPLOY_LOCATION
'''
from augustus.core.ModelLoader import ModelLoader
from augustus.strict import modelLoader
'''
import pandas as pd
import os,sys
from os.path import expanduser
import platform
import numpy as np
import configparser
import threading
import subprocess
import argparse
from functools import partial
import re
import cgi
from datetime import datetime
import json
import sys
from datetime import datetime
user_records = {}
class LocalModelData(object):
models = {}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
username = kwargs.pop("username")
password = kwargs.pop("password")
self._auth = base64.b64encode(f"{username}:{password}".encode()).decode()
super().__init__(*args)
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="Test"')
self.send_header("Content-type", "text/html")
self.end_headers()
def do_POST(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
if self.headers.get("Authorization") == None:
self.do_AUTHHEAD()
resp = "Authentication Failed: Auth Header Not Present"
resp=resp.encode()
self.wfile.write(resp)
elif self.headers.get("Authorization") == "Basic " + self._auth:
length = int(self.headers.get('content-length'))
#data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
data = self.rfile.read(length)
#print(data)
#keyList = list(data.keys())
#print(keyList[0])
model = self.path.split('/')[-2]
operation = self.path.split('/')[-1]
home = expanduser("~")
#data = json.loads(data)
dataStr = data
model_path = os.path.join(DEPLOY_LOCATION,model)
isdir = os.path.isdir(model_path)
if isdir:
if operation.lower() == 'predict':
predict_path = os.path.join(model_path,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'spredict':
try:
predict_path = os.path.join(model_path,'aion_spredict.py')
print(predict_path)
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
except Exception as e:
print(e)
elif operation.lower() == 'features':
predict_path = os.path.join(model_path,'featureslist.py')
outputStr = subprocess.check_output([sys |
.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'explain':
predict_path = os.path.join(model_path,'explainable_ai.py')
outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'monitoring':
predict_path = os.path.join(model_path,'aion_ipdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'performance':
predict_path = os.path.join(model_path,'aion_opdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'pattern_anomaly_predict':
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
configfilename = os.path.join(model_path,'datadetails.json')
filename = os.path.join(model_path,'clickstream.json')
clusterfilename = os.path.join(model_path,'stateClustering.csv')
probfilename = os.path.join(model_path,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
outputStr = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}'
elif operation.lower() == 'pattern_anomaly_settings':
data = json.loads(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
filename = os.path.join(model_path,'clickstream.json')
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
outputStr = '{"Status":"SUCCESS"}'
else:
outputStr = "{'Status':'Error','Msg':'Operation not supported'}"
else:
outputStr = "{'Status':'Error','Msg':'Model Not Present'}"
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
self.do_AUTHHEAD()
self.wfile.write(self.headers.get("Authorization").encode())
resp = "Authentication Failed"
resp=resp.encode()
self.wfile.write(resp)
else:
print("python ==> else1")
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print("PYTHON ######## REQUEST ####### ENDED")
return
def getModelFeatures(self,modelSignature):
datajson = {'Body':'Gives the list of features'}
home = expanduser("~")
if platform.system() == 'Windows':
predict_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'featureslist.py')
else:
predict_path = os.path.join(home,'HCLT','AION','target',modelSignature,'featureslist.py')
if(os.path.isfile(predict_path)):
outputStr = subprocess.check_output([sys.executable,predict_path])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'features:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
displaymsg = outputStr
#displaymsg = json.dumps(displaymsg)
return(True,displaymsg)
else:
displaymsg = "{'status':'ERROR','msg':'Unable to fetch featuers'}"
return(False,displaymsg)
def getFeatures(self,modelSignature):
datajson = {'Body':'Gives the list of features'}
urltext = '/AION/UseCase_Version/features'
if modelSignature != '':
status, |
displaymsg = self.getModelFeatures(modelSignature)
if status:
urltext = '/AION/'+modelSignature+'/features'
else:
displaymsg = json.dumps(datajson)
else:
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Output: {displaymsg}.
""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def features_help(self,modelSignature):
home = expanduser("~")
if platform.system() == 'Windows':
display_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'display.json')
else:
display_path = os.path.join(home,'HCLT','AION','target',modelSignature,'display.json')
#display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json')
datajson = {'Body':'Data Should be in JSON Format'}
if(os.path.isfile(display_path)):
with open(display_path) as file:
config = json.load(file)
file.close()
datajson={}
for feature in config['numericalFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Numeric Value'
for feature in config['nonNumericFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Category Value'
for feature in config['textFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Category Value'
displaymsg = json.dumps(datajson)
return(displaymsg)
def predict_help(self,modelSignature):
if modelSignature != '':
displaymsg = self.features_help(modelSignature)
urltext = '/AION/'+modelSignature+'/predict'
else:
datajson = {'Body':'Data Should be in JSON Format'}
displaymsg = json.dumps(datajson)
urltext = '/AION/UseCase_Version/predict'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def performance_help(self,modelSignature):
if modelSignature != '':
urltext = '/AION/'+modelSignature+'/performance'
else:
urltext = '/AION/UseCase_Version/performance'
datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: HTML File Path.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def monitoring_help(self,modelSignature):
if modelSignature != '':
urltext = '/AION/'+modelSignature+'/monitoring'
else:
urltext = '/AION/UseCase_Version/monitoring'
datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: Affected Columns. HTML File Path.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def explain_help(self,modelSignature):
if modelSignature != '':
displaymsg = self.features_help(modelSignature)
urltext = '/AION/'+modelSignature+'/explain'
else:
datajson = {'Body':'Data Should be in JSON Format'}
displaymsg = json.dumps(datajson)
urltext = '/AION/UseCase_Version/explain'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: anchor (Local Explanation),prediction,forceplot,multidecisionplot.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def help_text(self,modelSignature):
predict_help = self.predict_help(modelSignature)
explain_help = self.explain_help(modelSignature)
features_help = self.getFeatures(modelSignature)
monitoring_help = self.monitoring_help(modelSignature)
performance_help = self.performance_help(modelSignature)
msg="""
Following URL:
Prediction
{predict_help}
Local Explaination
{explain_help}
Features
{features_help}
Monitoring
{monitoring_help}
Performance
{performance_help}
""".format(predict_help=predict_help,explain_help=explain_help,features_help=features_help,monitoring_help=monitoring_help,performance_help=performance_help)
return msg
def do_GET(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
helplist = self.path.split('/')[-1]
print(helplist)
if helplist.lower() == 'help':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.help_text(model)
elif helplist.lower() == 'predict':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.predict_help(model)
elif helplist.lower() == 'explain':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.explain_help(model)
elif helplist.lower() == 'monitoring':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.monitoring_help(model)
elif helplist.lower() == 'performance':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.performance_help(model)
elif helplist.lower() == 'features':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
status,msg = self.getModelFeatures(model)
else:
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =helplist
msg = self.help_text(model)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port,username,password):
handler_class = partial(HTTPRequestHandler,username=username,password=password,)
self.server = ThreadedHTTPServer((ip,port), handler_class)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
def start_server(ip,port,username,password):
server = SimpleHttpServer(ip,int(port),username,password)
print('HTTP Server Running...........')
server.start()
server.waitForThread()
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import os
from pathlib import Path
os.chdir(Path(__file__).parent)
import json
import shutil
from mlac.timeseries import app as ts_app
from mlac.ml import app as ml_app
import traceback
def create_test_file(config):
code_file = 'aionCode.py'
text = """
from pathlib import Path
import subprocess
import sys
import json
import argparse
def run_pipeline(data_path):
print('Data Location:', data_path)
cwd = Path(__file__).parent
monitor_file = str(cwd/'ModelMonitoring'/'{code_file}')
load_file = str(cwd/'DataIngestion'/'{code_file}')
transformer_file = str(cwd/'DataTransformation'/'{code_file}')
selector_file = str(cwd/'FeatureEngineering'/'{code_file}')
train_folder = cwd
register_file = str(cwd/'ModelRegistry'/'{code_file}')
deploy_file = str(cwd/'ModelServing'/'{code_file}')
print('Running modelMonitoring')
cmd = [sys.executable, monitor_file, '-i', data_path]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running dataIngestion')
cmd = [sys.executable, load_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running DataTransformation')
cmd = [sys.executable, transformer_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running FeatureEngineering')
cmd = [sys.executable, selector_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
train_models = [f for f in train_folder.iterdir() if 'ModelTraining' in f.name]
for model in train_models:
print('Running',model.name)
cmd = [sys.executable, str(model/'{code_file}')]
train_result = subprocess.check_output(cmd)
train_result = train_result.decode('utf-8')
print(train_result)
print('Running ModelRegistry')
cmd = [sys.executable, register_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running ModelServing')
cmd = [sys.executable, deploy_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputPath', help='path of the input data')
args = parser.parse_args()
if args.inputPath:
filename = args.inputPath
else:
filename = r"{filename}"
try:
print(run_pipeline(filename))
except Exception as e:
print(e)
""".format(filename=config['dataLocation'],search='{"Status":',code_file=code_file)
deploy_path = Path(config["deploy_path"])/'MLaC'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"run_pipeline.py"
with open(py_file, "w") as f:
f.write(text)
def is_module_in_req_file(mod, folder):
status = False
if (Path(folder)/'requirements.txt').is_file():
with open(folder/'requirements.txt', 'r') as f:
status = mod in f.read()
return status
def copy_local_modules(config):
deploy_path = Path(config["deploy_path"])
local_modules_location = config.get("local_modules_location", None)
if local_modules_location:
folder_loc = local_modules_location
else:
folder_loc = Path(__file__).parent/'local_modules'
if not folder_loc.exists():
folder_loc = None
if folder_loc:
file = folder_loc/'config.json'
if file.exists():
with open(file, 'r') as f:
data = json.load(f)
for key, values in data.items():
local_module = folder_loc/key
if local_module.exists():
for folder in values:
target_folder = Path(deploy_path)/'MLaC'/folder
if target_folder.is_dir():
if is_module_in_req_file(key, target_folder):
shutil.copy(local_module, |
target_folder)
def validate(config):
error = ''
if 'error' in config.keys():
error = config['error']
return error
def generate_mlac_code(config):
with open(config, 'r') as f:
config = json.load(f)
error = validate(config)
if error:
raise ValueError(error)
if config['problem_type'] in ['classification','regression']:
return generate_mlac_ML_code(config)
elif config['problem_type'].lower() == 'timeseriesforecasting': #task 11997
return generate_mlac_TS_code(config)
def generate_mlac_ML_code(config):
try:
ml_app.run_loader(config)
ml_app.run_transformer(config)
ml_app.run_selector(config)
ml_app.run_trainer(config)
ml_app.run_register(config)
ml_app.run_deploy(config)
ml_app.run_drift_analysis(config)
copy_local_modules(config)
create_test_file(config)
status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')}
except Exception as Inst:
status = {'Status':'Failure','msg':str(Inst)}
traceback.print_exc()
status = json.dumps(status)
return(status)
def generate_mlac_TS_code(config):
try:
ts_app.run_loader(config)
ts_app.run_transformer(config)
ts_app.run_selector(config)
ts_app.run_trainer(config)
ts_app.run_register(config)
ts_app.run_deploy(config)
ts_app.run_drift_analysis(config)
create_test_file(config)
status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')}
except Exception as Inst:
status = {'Status':'Failure','msg':str(Inst)}
traceback.print_exc()
status = json.dumps(status)
return(status)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import joblib
import time
import pandas as pd
import numpy as np
import argparse
import json
import os
import pathlib
from pathlib import Path
from uncertainties.uq_main import aionUQ
import os
from datetime import datetime
from os.path import expanduser
import platform
import logging
class run_uq:
def __init__(self,modelfeatures,modelFile,csvFile,target):
self.modelfeatures=modelfeatures
self.modelFile=modelFile
self.csvFile=csvFile
self.target=target
##UQ classification fn
def getUQclassification(self,model,ProblemName,Params):
df = pd.read_csv(self.csvFile)
# # object_cols = [col for col, col_type in df.dtypes.iteritems() if col_type == 'object'] -- Fix for python 3.8.11 update (in 2.9.0.8)
object_cols = [col for col, col_type in zip(df.columns,df.dtypes) if col_type == 'object']
df = df.drop(object_cols, axis=1)
df = df.dropna(axis=1)
df = df.reset_index(drop=True)
modelfeatures = self.modelfeatures
#tar = args.target
# target = df[tar]
y=df[self.target].values
y = y.flatten()
X = df.drop(self.target, axis=1)
try:
uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,self.target)
accuracy,uq_ece,output_jsonobject=uqObj.uqMain_BBMClassification()
except Exception as e:
print("uq error",e)
# print("UQ Classification: \\n",output_jsonobject)
# print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per)
#print(output_jsonobject)
return accuracy,uq_ece,output_jsonobject
##UQ regression fn
def getUQregression(self,model,ProblemName,Params):
df = pd.read_csv(self.csvFile)
modelfeatures = self.modelfeatures
dfp = df[modelfeatures]
tar = self.target
target = df[tar]
uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression()
return total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject
def uqMain(self,model):
#print("inside uq main.\\n")
reg_status=""
class_status=""
algorithm_status=""
try:
model=model
if Path(self.modelFile).is_file():
ProblemName = model.__class__.__name__
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecisionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','GradientBoostingClassifier']:
Problemtype = 'Classification'
elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor']:
Problemtype = 'Regression'
else:
Problemtype = "None"
if Problemtype.lower() == 'classification':
try:
Params = model.get_params()
accuracy,uq_ece,output = self.getUQclassification(model,ProblemName,Params)
class_status="SUCCESS"
#print(output)
except Exception as e:
print(e)
class_status="FAILED"
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
elif Problemtype.lower() == 'regression' :
try:
Params = model.get_params()
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,output = self.getUQregression(model,ProblemName,Params)
#print(uq_jsonobject)
reg_status="SUCCESS"
except Exception as e:
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
reg_status="FAILED"
else:
try:
output={}
output['Problem']="None"
output['msg']="Uncertainty Quantification not supported for this algorithm."
output = json.dumps(output)
algorithm_status="FAILED"
except:
algorithm_status="FAILED"
except Exception as e:
print(e)
reg_status="FAILED"
class_status="FAILED"
algorithm_status="FAILED"
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
return class_status,reg_status,algorithm_status,output
def aion_uq(modelFile,dataFile,features,targetfeatures):
try:
from appbe.dataPath import DEPLOY_LOCATION
uqLogLocation = os.path.join(DEPLOY_LOCATION,'logs')
try:
os.makedirs(uqLogLocation)
except OSError as e:
if (os.path.exists(uqLogLocation)):
pass
else:
raise OSError('uqLogLocation error.')
filename_uq = 'uqlog_'+str(int(time.time()))
filename_uq=filename_uq+'.log'
filepath = os.path.join(uqLogLocation, filename_uq)
print(filepath)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionUQ')
log.setLevel(logging.INFO)
log.info('************* Version - v1.7.0 *************** \\n')
if isinstance(features, list):
modelfeatures = features
else:
if ',' in features:
modelfeatures = [x.strip() for x in features.split(',')]
else:
modelfeatures = features.split(',')
model = joblib.load(modelFile)
uqobj = run_uq(modelfeatures,modelFile,dataFile,targetfeatures)
class_status,reg_status,algorithm_status,output=uqobj.uqMain(model)
if (class_status.lower() == 'failed'):
log.info('uq classifiction failed./n')
elif (class_status.lower() == 'success'):
log.info('uq classifiction success./n')
else:
log.info('uq classifiction not used../n')
if (reg_status.lower() == 'failed'):
log.info('uq regression failed./n')
elif (reg_status.lower() == 'success'):
log.info('uq regression success./n')
else:
log.info('uq regression not used./n')
if (algorithm_status.lower() == 'failed'):
log.info('Problem type issue, UQ only support classification and regression. May be selected algorithm not supported by Uncertainty Quantification currently./n')
except Exception as e:
log.info('uq test failed.n'+str(e))
#print(e)
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
return(output)
#Sagemaker main fn call
if __name__=='__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('savFile')
parser.add_argument('csvFile')
parser.add_argument('features')
parser.add_argument('target')
args = parser.parse_args()
home = expanduser("~")
if platform.system() == 'Windows':
uqLogLocation = os.path.join(home,'AppData','Local','HCLT','AION','uqLogs')
else:
uqLogLocation = os.path.join(home,'HCLT','AION','uqLogs')
try:
os.makedirs(uqLogLocation)
except OSError as e:
if (os.path.exists(uqLogLocation)):
pass
else:
raise OSError('uqLogLocation error.')
# self.sagemakerLogLocation=str(sagemakerLogLocation)
filename_uq = 'uqlog_'+str(int(time.time()))
filename_uq=filename_uq+'.log'
# filename = 'mlopsLog_'+Time()
filepath = os.path.join(uqLogLocation, filename_uq)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionUQ')
log.setLevel(logging.DEBUG)
if ',' in args.features:
args.features = [x.strip() for x in args.features.split(',')]
else:
args.features = args.features.split(',')
modelFile = args.savFile
modelfeatures = args.features
csvFile = args.csvFile
target=args.target
model = joblib.load(args.savFile)
##Main uq function call
uqobj = run_uq(modelfeatures,modelFile,csvFile,target)
class_status,reg_status,algorithm_status,output=uqobj.uqMain(model)
if (class_status.lower() == 'failed'):
log.info('uq classifiction failed./n')
elif (class_status.lower() == 'success'):
log.info('uq classifiction success./n')
else:
log.info('uq classifiction not used../n')
if (reg_status.lower() == 'failed'):
log.info('uq regression failed./n')
elif (reg_status.lower() == 'success'):
log.info('uq regression success./n')
else:
log.info('uq regression not used./n')
if (algorithm_status.lower() == 'failed'):
msg = 'Uncertainty Quantification not supported for this algorithm'
log.info('Algorithm not supported by Uncertainty Quantification./n')
output = {'Problem':'None','msg':str(msg)}
output = json.dumps(output)
except Exception as e:
log.info('uq test failed.n'+str(e))
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
#print(e)
print(output)<s> import json
import logging
import os
import shutil
import time
import sys
from sys import platform
from distutils.util import strtobool
from config_manager.pipeline_config import AionConfigManager
from summarizer import Summarizer
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class AionTextManager:
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
def readTextfile(self,dataPath):
#dataPath=self.[baisc][]
file = open(dataPath, "r")
data = file.read()
return data
#print(data)
def generateSummary(self,data,algo,stype):
bert_model = Summarizer()
if stype == "large":
bert_summary = ''.join(bert_model(data, min_length=300))
return(bert_summary)
elif stype == "medium":
bert_summary = ''.join(bert_model(data, min_length=150))
return(bert_summary)
elif stype == "small":
bert_summary = ''.join(bert_model(data, min_length=60))
return(bert_summary)
def aion_textsummary(arg):
Obj = AionTextManager()
configObj = AionConfigManager()
readConfistatus,msg = configObj.readConfigurationFile(arg)
dataPath = configObj.getTextlocation()
text_data = Obj.readTextfile(data |
Path)
getAlgo, getMethod = configObj.getTextSummarize()
summarize = Obj.generateSummary(text_data, getAlgo, getMethod)
output = {'status':'Success','summary':summarize}
output_json = json.dumps(output)
return(output_json)
if __name__ == "__main__":
aion_textsummary(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime, time, timeit
import logging
logging.getLogger('tensorflow').disabled = True
import shutil
import warnings
from config_manager.online_pipeline_config import OTAionConfigManager
from records import pushrecords
import logging
import mlflow
from pathlib import Path
from pytz import timezone
def pushRecordForOnlineTraining():
try:
from appbe.pages import getversion
status,msg = pushrecords.enterRecord(AION_VERSION)
except Exception as e:
print("Exception", e)
status = False
msg = str(e)
return status,msg
def mlflowSetPath(path,experimentname):
import mlflow
url = "file:" + str(Path(path).parent.parent) + "/mlruns"
mlflow.set_tracking_uri(url)
mlflow.set_experiment(str(experimentname))
class server():
def __init__(self):
self.response = None
self.dfNumCols=0
self.dfNumRows=0
self.features=[]
self.mFeatures=[]
self.emptyFeatures=[]
self.vectorizerFeatures=[]
self.wordToNumericFeatures=[]
self.profilerAction = []
self.targetType = ''
self.matrix1='{'
self.matrix2='{'
self.matrix='{'
self.trainmatrix='{'
self.numericalFeatures=[]
self.nonNumericFeatures=[]
self.similarGroups=[]
self.method = 'NA'
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.modelSelTopFeatures=[]
self.topFeatures=[]
self.allFeatures=[]
def startScriptExecution(self, config_obj):
rowfilterexpression = ''
grouperbyjson = ''
model_tried=''
learner_type = ''
topics = {}
numericContinuousFeatures=''
discreteFeatures=''
threshold=-1
targetColumn = ''
categoricalFeatures=''
dataFolderLocation = ''
original_data_file = ''
profiled_data_file = ''
trained_data_file = ''
predicted_data_file=''
featureReduction = 'False'
reduction_data_file=''
params={}
score = 0
labelMaps={}
featureDataShape=[]
self.riverModels = []
self.riverAlgoNames = ['Online Logistic Regression', 'Online Softmax Regression', 'Online Decision Tree Classifier', 'Online KNN Classifier', 'Online Linear Regression', 'Online Bayesian Linear Regression', 'Online Decision Tree Regressor','Online KNN Regressor']
#ConfigSettings
iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings()
scoreParam = config_obj.getScoringCreteria()
datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures()
iterName = iterName.replace(" ", "_")
deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile = config_obj.createDeploymentFolders(deployLocation,iterName,iterVersion)
#Mlflow
mlflowSetPath(deployLocation,iterName+'_'+iterVersion)
#Logger
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('************* Version - v2.2.5 *************** \\n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
log.info(msg)
startTime = timeit.default_timer()
try:
output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}}
#ConfigSetting
problemType,targetFeature,profilerStatus,selectorStatus,learnerStatus,visualizationstatus,deployStatus = config_obj.getModulesDetails()
selectorStatus = False
if(problemType.lower() in ['classification','regression']):
if(targetFeature == ''):
output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"}
return output
#DataReading
from transformations.dataReader import dataReader
objData = dataReader()
if os.path.isfile(dataLocation):
dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier)
dataFrame.rename(columns=lambda x:x.strip(), inplace=True)
#FilterDataframe
filter = config_obj.getfilter()
if filter != 'NA':
dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame)
#GroupDataframe
timegrouper = config_obj.gettimegrouper()
grouping = config_obj.getgrouper()
if grouping != 'NA':
dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame)
elif timegrouper != 'NA':
dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame)
#KeepOnlyModelFtrs
dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature)
log.info('\\n-------> First Ten Rows of Input Data: ')
log.info(dataFrame.head(10))
self.dfNumRows=dataFrame.shape[0]
self.dfNumCols=dataFrame.shape[1]
dataLoadTime = timeit.default_timer() - startTime
log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime))
if profilerStatus:
log.info('\\n================== Data Profiler has started ==================')
log.info('Status:-|... AION feature transformation started')
dp_mlstart = time.time()
profilerJson = config_obj.getEionProfilerConfigurarion()
log.info('-------> Input dataFrame(5 Rows): ')
log.info(dataFrame.head(5))
log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape))
from incremental.incProfiler import incProfiler
incProfilerObj = incProfiler()
dataFrame,targetColumn,self.mFeatures,self.numericalFeatures,self.nonNumericFeatures,labelMaps,self.configDict,self.textFeatures,self.emptyFeatures,self.wordToNumericFeatures = incProfilerObj.startIncProfiler(dataFrame,profilerJson,targetFeature,deployLocation,problemType)
self.features = self.configDict['allFtrs']
log.info('-------> Data Frame Post Data Profiling(5 Rows): ')
log.info(dataFrame.head(5))
log.info('Status:-|... AION feature transformation completed')
dp_mlexecutionTime=time.time() - dp_mlstart
log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime))
log.info('================== Data Profiling completed ==================\\n')
dataFrame.to_csv(profiled_data_file,index=False)
selectorStatus = False
if learnerStatus:
log.info('Status:-|... AION Learner data preparation started')
ldp_mlstart = time.time()
testPercentage = config_obj.getAIONTestTrainPercentage()
balancingMethod = config_obj.getAIONDataBalancingMethod()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
modelType = problemType.lower()
targetColumn = targetFeature
if modelType == "na":
if self.targetType == 'categorical':
modelType = 'classification'
elif self.targetType == 'continuous':
modelType = 'regression'
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
features =datacolumns
featureData = dataFrame[features]
if targetColumn != '':
targetData = dataFrame[targetColumn]
xtrain,ytrain,xtest,ytest = mlobj.split_into_train_test_data(featureData,targetData,testPercentage,modelType)
categoryCountList = []
if modelType == 'classification':
if(mlobj.checkForClassBalancing(ytrain) >= 1):
xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod)
valueCount=targetData.value_counts()
categoryCountList=valueCount.tolist()
ldp_mlexecutionTime=time.time() - ldp_mlstart
log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime))
log.info('Status:-|... AION Learner data preparation completed')
if learnerStatus:
log.info('\\n================== ML Started ==================')
log.info('Status:-|... AION training started')
log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum()))
mlstart = time.time()
log.info('-------> Target Problem Type:'+ self.targetType)
learner_type = 'ML'
learnerJson = config_obj.getEionLearnerConfiguration()
log.info('-------> Target Model Type:'+ modelType)
modelParams,modelList = config_obj.getEionLearnerModelParams(modelType)
if(modelType == 'regression'):
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
if(modelType == 'classification'):
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
scoreParam = scoreParam.lower()
from incremental.incMachineLearning import incMachineLearning
incMlObj = incMachineLearning(mlobj)
self.configDict['riverModel'] = False
status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=incMlObj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,self.features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps)
if model in self.riverAlgoNames:
self.configDict['riverModel'] = True
if(self.matrix != '{'):
self.matrix += ','
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
self.matrix += matrix
mlexecutionTime=time.time() - mlstart
log.info('-------> Total ML Execution Time '+str(mlexecutionTime))
log.info('Status:-|... AION training completed')
log.info('================== ML Completed ==================\\n')
if visualizationstatus:
visualizationJson = config_obj.getEionVisualizationConfiguration()
log.info('Status:-|... AION Visualizer started')
visualizer_mlstart = time.time()
from visualization.visualization import Visualization
visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfNum |
Rows,self.dfNumCols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file)
visualizationObj.visualizationrecommandsystem()
visualizer_mlexecutionTime=time.time() - visualizer_mlstart
log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime))
log.info('Status:-|... AION Visualizer completed')
try:
os.remove(os.path.join(deployLocation,'aion_xai.py'))
except:
pass
if deployStatus:
if str(model) != 'None':
log.info('\\n================== Deployment Started ==================')
log.info('Status:-|... AION Deployer started')
deployPath = deployLocation
deployer_mlstart = time.time()
src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','useCaseFiles')
shutil.copy2(os.path.join(src,'incBatchLearning.py'),deployPath)
os.rename(os.path.join(deployPath,'incBatchLearning.py'),os.path.join(deployPath,'aion_inclearning.py'))
shutil.copy2(os.path.join(src,'incBatchPrediction.py'),deployPath)
os.rename(os.path.join(deployPath,'incBatchPrediction.py'),os.path.join(deployPath,'aion_predict.py'))
self.configDict['modelName'] = str(model)
self.configDict['modelParams'] = params
self.configDict['problemType'] = problemType.lower()
self.configDict['score'] = score
self.configDict['metricList'] = []
self.configDict['metricList'].append(score)
self.configDict['trainRowsList'] = []
self.configDict['trainRowsList'].append(featureDataShape[0])
self.configDict['scoreParam'] = scoreParam
self.configDict['partialFit'] = 0
with open(os.path.join(deployLocation,'production', 'Config.json'), 'w', encoding='utf8') as f:
json.dump(self.configDict, f, ensure_ascii=False)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Batch Deployment completed')
log.info('================== Deployment Completed ==================')
# self.features = profilerObj.set_features(self.features,self.textFeatures,self.vectorizerFeatures)
self.matrix += '}'
self.trainmatrix += '}'
matrix = eval(self.matrix)
trainmatrix = eval(self.trainmatrix)
model_tried = eval('['+model_tried+']')
try:
json.dumps(params)
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}}
except:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}}
print(output_json)
if bool(topics) == True:
output_json['topics'] = topics
with open(outputjsonFile, 'w') as f:
json.dump(output_json, f)
output_json = json.dumps(output_json)
log.info('\\n------------- Summary ------------')
log.info('------->No of rows & columns in data:('+str(self.dfNumRows)+','+str(self.dfNumCols)+')')
log.info('------->No of missing Features :'+str(len(self.mFeatures)))
log.info('------->Missing Features:'+str(self.mFeatures))
log.info('------->Text Features:'+str(self.textFeatures))
log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures)))
log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures))
if threshold == -1:
log.info('------->Threshold: NA')
else:
log.info('------->Threshold: '+str(threshold))
log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps))
if((learner_type != 'TS') & (learner_type != 'AR')):
log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape))
log.info('------->Features Used for Modeling:'+str(self.features))
log.info('------->Target Feature: '+str(targetColumn))
log.info('------->Best Model Score :'+str(score))
log.info('------->Best Parameters:'+str(params))
log.info('------->Type of Model :'+str(modelType))
log.info('------->Best Model :'+str(model))
log.info('------------- Summary ------------\\n')
except Exception as inst:
log.info('server code execution failed !....'+str(inst))
output_json = {"status":"FAIL","message":str(inst).strip('"')}
output_json = json.dumps(output_json)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
executionTime = timeit.default_timer() - startTime
log.info('\\nTotal execution time(sec) :'+str(executionTime))
log.info('\\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output_json))
log.info('------------- Output JSON ------------\\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return output_json
def aion_ot_train_model(arg):
warnings.filterwarnings('ignore')
try:
valid, msg = pushRecordForOnlineTraining()
if valid:
serverObj = server()
configObj = OTAionConfigManager()
jsonPath = arg
readConfistatus,msg = configObj.readConfigurationFile(jsonPath)
if(readConfistatus == False):
output = {"status":"FAIL","message":str(msg).strip('"')}
output = json.dumps(output)
print("\\n")
print("aion_learner_status:",output)
print("\\n")
return output
output = serverObj.startScriptExecution(configObj)
else:
output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')}
output = json.dumps(output)
print("\\n")
print("aion_learner_status:",output)
print("\\n")
return output
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print("\\n")
print("aion_learner_status:",output)
print("\\n")
return output
if __name__ == "__main__":
aion_ot_train_model(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import joblib
import time
from pandas import json_normalize
import pandas as pd
import numpy as np
import argparse
import json
import os
import pathlib
from pathlib import Path
from sagemaker.aionMlopsService import aionMlopsService
import logging
import os.path
from os.path import expanduser
import platform,sys
from pathlib import Path
from sklearn.model_selection import train_test_split
def getAWSConfiguration(mlops_params,log):
awsId=mlops_params['awsSagemaker']['awsID']
if ((not awsId) or (awsId is None)):
awsId=""
log.info('awsId error. ')
awsAccesskeyid=mlops_params['awsSagemaker']['accesskeyID']
if ((not awsAccesskeyid) or (awsAccesskeyid is None)):
awsAccesskeyid=""
log.info('awsAccesskeyid error. ')
awsSecretaccesskey=mlops_params['awsSagemaker']['secretAccesskey']
if ((not awsSecretaccesskey) or (awsSecretaccesskey is None)):
awsSecretaccesskey=""
log.info('awsSecretaccesskey error. ')
awsSessiontoken=mlops_params['awsSagemaker']['sessionToken']
if ((not awsSessiontoken) or (awsSessiontoken is None)):
awsSessiontoken=""
log.info('awsSessiontoken error. ')
awsRegion=mlops_params['awsSagemaker']['region']
if ((not awsRegion) or (awsRegion is None)):
awsRegion=""
log.info('awsRegion error. ')
IAMSagemakerRoleArn=mlops_params['awsSagemaker']['IAMSagemakerRoleArn']
if ((not IAMSagemakerRoleArn) or (IAMSagemakerRoleArn is None)):
IAMSagemakerRoleArn=""
log.info('IAMSagemakerRoleArn error. ')
return awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn
def getMlflowParams(mlops_params,log):
modelInput = mlops_params['modelInput']
data = mlops_params['data']
mlflowtosagemakerDeploy=mlops_params['sagemakerDeploy']
if ((not mlflowtosagemakerDeploy) or (mlflowtosagemakerDeploy is None)):
mlflowtosagemakerDeploy="True"
mlflowtosagemakerPushOnly=mlops_params['deployExistingModel']['status']
if ((not mlflowtosagemakerPushOnly) or (mlflowtosagemakerPushOnly is None)):
mlflowtosagemakerPushOnly="False"
mlflowtosagemakerPushImageName=mlops_params['deployExistingModel']['dockerImageName']
if ((not mlflowtosagemakerPushImageName) or (mlflowtosagemakerPushImageName is None)):
mlflowtosagemakerPushImageName="mlops_image"
mlflowtosagemakerdeployModeluri=mlops_params['deployExistingModel']['deployModeluri']
if ((not mlflowtosagemakerdeployModeluri) or (mlflowtosagemakerdeployModeluri is None)):
mlflowtosagemakerdeployModeluri="None"
log.info('mlflowtosagemakerdeployModeluri error. ')
cloudInfrastructure = mlops_params['modelOutput']['cloudInfrastructure']
if ((not cloudInfrastructure) or (cloudInfrastructure is None)):
cloudInfrastructure="Sagemaker"
endpointName=mlops_params['endpointName']
if ((not endpointName) or (endpointName is None)):
sagemakerAppName="aion-demo-app"
log.info('endpointName not given, setting default one. ')
experimentName=str(endpointName)
mlflowContainerName=str(endpointName)
return modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName
def getPredictionParams(mlops_params,log):
predictStatus=mlops_params['prediction']['status']
if ((not predictStatus) or (predictStatus is None)):
predictStatus="False"
modelInput = mlops_params['modelInput']
data = mlops_params['data']
if (predictStatus == "True" or predictStatus.lower()== "true"):
if ((not modelInput) or (modelInput is None)):
log.info('prediction model input error.Please check given model file or its path for prediction ')
if ((not data) or (data is None)):
log.info('prediction data input error.Please check given data file or its path for prediction ')
targetFeature=mlops_params['prediction']['target']
return predictStatus,targetFeature
def sagemakerPrediction(mlopsobj,data,log):
df = json_normalize(data)
model=None
predictionStatus=False
try:
endpointPrediction=mlopsobj.predict_sm_app_endpoint(df)
if (endpointPrediction is None):
log.info('Sagemaker endpoint application prediction Issue.')
outputjson = {"status":"Error","msg":"Sagemaker endpoint application prediction Issue"}
outputjson = json.dumps(outputjson)
#print("predictions: "+str(outputjson))
predictionStatus=False
else:
log.info("sagemaker end point Prediction: \\n"+str(endpointPrediction))
df['prediction'] = endpointPred |
iction
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
outputjson = json.dumps(outputjson)
#print("predictions: "+str(outputjson))
predictionStatus=True
except Exception as e:
#log.info("sagemaker end point Prediction error: \\n")
outputjson = {"status":"Error","msg":str(e)}
outputjson=None
predictionStatus=False
return outputjson,predictionStatus
## Main aion sagemaker fn call
def sagemaker_exec(mlops_params,log):
#mlops_params = json.loads(config)
mlops_params=mlops_params
modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName = getMlflowParams(mlops_params,log)
mlflowModelname=None
awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn = getAWSConfiguration(mlops_params,log)
predictStatus,targetFeature = getPredictionParams(mlops_params,log)
sagemakerDeployOption='create'
deleteAwsecrRepository='False'
sagemakerAppName=str(endpointName)
ecrRepositoryName='aion-ecr-repo'
#aws ecr model app_name should contain only [[a-zA-Z0-9-]], again rechecking here.
import re
if sagemakerAppName:
pattern = re.compile("[A-Za-z0-9-]+")
# if found match (entire string matches pattern)
if pattern.fullmatch(sagemakerAppName) is not None:
#print("Found match: ")
pass
else:
log.info('wrong sagemaker Application Name, Nmae should contains only [A-Za-z0-9-] .')
app_name = 'aion-demo-app'
else:
app_name = 'aion-demo-app'
#Following 3 aws parameter values are now hard coded , because currently we are not using. If aion using the options, please make sure to get the values from GUI .
sagemakerDeployOption="create"
deleteAwsecrRepository="False"
ecrRepositoryName="aion_test_repo"
log.info('mlops parameter check done.')
# predictionStatus=False
deploystatus = 'SUCCESS'
try:
log.info('cloudInfrastructure: '+str(cloudInfrastructure))
if(cloudInfrastructure.lower() == "sagemaker"):
## sagemaker app prediction call
if (predictStatus.lower() == "true"):
# df = json_normalize(data)
model=None
mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName)
outputjson,predictionStatus = sagemakerPrediction(mlopsobj,data,log)
print("predictions: "+str(outputjson))
predictionStatus=predictionStatus
return(outputjson)
else:
if Path(modelInput).is_file():
msg = ''
model = joblib.load(modelInput)
ProblemName = model.__class__.__name__
mlflowModelname=str(ProblemName)
log.info('aion mlops Model name: '+str(mlflowModelname))
df=None
mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName)
mlflow2sm_status,localhost_container_status=mlopsobj.mlflow2sagemaker_deploy()
log.info('mlflow2sm_status: '+str(mlflow2sm_status))
log.info('localhost_container_status: '+str(localhost_container_status))
# Checking deploy status
if (mlflowtosagemakerPushOnly.lower() == "true" ):
if (mlflow2sm_status.lower() == "success"):
deploystatus = 'SUCCESS'
msg = 'Endpoint succesfully deployed in sagemaker'
log.info('Endpoint succesfully deployed in sagemaker (Push eisting model container).\\n ')
elif(mlflow2sm_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Endpoint failed to deploy in sagemaker'
log.info('Endpoint failed to deploy in sagemaker. (Push eisting model container).\\n ')
else:
pass
elif(mlflowtosagemakerDeploy.lower() == "true"):
if (mlflow2sm_status.lower() == "success"):
deploystatus='SUCCESS'
msg = 'Endpoint succesfully deployed in sagemaker'
log.info('Endpoint succesfully deployed in sagemaker')
elif(mlflow2sm_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Endpoint failed to deploy in sagemaker'
log.info('Endpoint failed to deploy in sagemaker.\\n ')
elif (mlflow2sm_status.lower() == "Notdeployed"):
deploystatus= 'ERROR'
msg = 'Sagemaker compatible container created'
log.info('sagemaker endpoint not deployed, check aws connection and credentials. \\n')
elif (mlflowtosagemakerDeploy.lower() == "false"):
if(localhost_container_status.lower() == "success"):
deploystatus = 'SUCCESS'
msg = 'Localhost mlops docker created successfully'
log.info('Localhost mlops docker created successfully. \\n')
elif(localhost_container_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Localhost mlops docker created failed'
log.info('Localhost mlops docker creation failed. \\n')
elif (localhost_container_status.lower() == "Notdeployed"):
deploystatus= 'ERROR'
log.info('Localhost mlops docker not deployed, check local docker status. \\n')
else:
pass
else:
pass
else:
deploystatus = 'ERROR'
msg = 'Model Path not Found'
print('Error: Model Path not Found')
outputjson = {"status":str(deploystatus),"data":str(msg)}
outputjson = json.dumps(outputjson)
print("predictions: "+str(outputjson))
return(outputjson)
except Exception as inst:
outputjson = {"status":str(deploystatus),"data":str(msg)}
outputjson = json.dumps(outputjson)
print("predictions: "+str(outputjson))
return(outputjson)
def aion_sagemaker(config):
try:
mlops_params = config
print(mlops_params)
from appbe.dataPath import LOG_LOCATION
sagemakerLogLocation = LOG_LOCATION
try:
os.makedirs(sagemakerLogLocation)
except OSError as e:
if (os.path.exists(sagemakerLogLocation)):
pass
else:
raise OSError('sagemakerLogLocation error.')
filename_mlops = 'mlopslog_'+str(int(time.time()))
filename_mlops=filename_mlops+'.log'
filepath = os.path.join(sagemakerLogLocation, filename_mlops)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionMLOps')
log.setLevel(logging.DEBUG)
output = sagemaker_exec(mlops_params,log)
return output
except Exception as inst:
print(inst)
deploystatus = 'ERROR'
output = {"status":str(deploystatus),"data":str(inst)}
output = json.dumps(output)
print("predictions: "+str(output))
return(output)
#Sagemaker main fn call
if __name__=='__main__':
json_config = str(sys.argv[1])
output = aion_sagemaker(json.loads(json_config))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import requests
import json
import os
from datetime import datetime
import socket
import getmac
def telemetry_data(operation,Usecase,data):
now = datetime.now()
ID = datetime.timestamp(now)
record_date = now.strftime("%y-%m-%d %H:%M:%S")
try:
user = os.getlogin()
except:
user = 'NA'
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
item = {}
item['ID'] = str(int(ID))
item['record_date'] = record_date
item['UseCase'] = Usecase
item['user'] = str(user)
item['operation'] = operation
item['remarks'] = data
item['hostname'] = computername
item['macaddress'] = macaddress
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
record = {}
record['TableName'] = 'AION_OPERATION'
record['Item'] = item
record = json.dumps(record)
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
check_telemetry_file()
except Exception as inst:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
f=open(filename, "a+")
f.write(record+'\\n')
f.close()
def check_telemetry_file():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
if(os.path.isfile(file_path)):
f = open(file_path, 'r')
file_content = f.read()
f.close()
matched_lines = file_content.split('\\n')
write_lines = []
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
for record in matched_lines:
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
except:
write_lines.append(record)
f = open(file_path, "a")
f.seek(0)
f.truncate()
for record in write_lines:
f.write(record+'\\n')
f.close()
return True
else:
return True<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from statsmodels.tsa.stattools import adfuller
from scipy.stats.stats import pearsonr
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from numpy.random import randn
from sklearn.metrics import normalized_mutual_info_score
from sklearn.feature_selection import mutual_info_regression
import logging
#SDP1 class import
from feature_engineering.featureImportance import featureImp
from feature_engineering.featureReducer import featureReducer
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import FactorAnalysis
from sklearn.decomposition import FastICA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
# noinspection PyPep8Naming
class featureSelector():
def __init__(self):
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.log = logging.getLogger('eion')
def startSelector(self,df,conf_json,textFeatures,targetFeature,problem_type):
try:
categoricalMaxLabel = int(conf_json['categoryMaxLabel'])
pca='None'
pcaReducerStatus = conf_json['featureEngineering']['PCA']
svdReducerStatus = conf_json['featureEngineering']['SVD']
factorReducerStatus = conf_json['featureEngineering']['FactorAnalysis']
icaReducerStatus = conf_json['featureEngineering']['ICA']
nfeatures=float(conf_json['featureEngineering']['numberofComponents'])
statisticalConfig = conf_json['statisticalConfig']
|
corrThresholdInput = float(statisticalConfig.get('correlationThresholdFeatures',0.50))
corrThresholdTarget = float(statisticalConfig.get('correlationThresholdTarget',0.85))
pValThresholdInput = float(statisticalConfig.get('pValueThresholdFeatures',0.05))
pValThresholdTarget = float(statisticalConfig.get('pValueThresholdTarget',0.04))
varThreshold = float(statisticalConfig.get('varianceThreshold',0.01))
allFeaturesSelector = conf_json['featureSelection']['allFeatures']
correlationSelector = conf_json['featureSelection']['statisticalBased']
modelSelector = conf_json['featureSelection']['modelBased']
featureSelectionMethod = conf_json['selectionMethod']['featureSelection']
featureEngineeringSelector = conf_json['selectionMethod']['featureEngineering']
if featureSelectionMethod == 'True':
featureEngineeringSelector = 'False'
# if feature engineering is true then we check weather PCA is true or svd is true. By default we will run PCA
if featureEngineeringSelector == 'True':
if pcaReducerStatus == 'True':
svdReducerStatus = 'False'
factorReducerStatus=='False'
icaReducerStatus == 'False'
elif svdReducerStatus == 'True':
pcaReducerStatus = 'False'
factorReducerStatus=='False'
icaReducerStatus == 'False'
elif factorReducerStatus=='True':
pcaReducerStatus=='False'
svdReducerStatus=='False'
icaReducerStatus=='False'
elif icaReducerStatus=='True':
pcaReducerStatus=="False"
svdReducerStatus=="False"
factorReducerStatus=="False"
else:
pcaReducerStatus = 'True'
if featureSelectionMethod == 'False' and featureEngineeringSelector == 'False':
featureSelectionMethod = 'True'
if featureSelectionMethod == 'True':
if modelSelector == 'False' and correlationSelector == 'False' and allFeaturesSelector == 'False':
modelSelector = 'True'
reductionMethod = 'na'
bpca_features = []
#nfeatures = 0
if 'maxClasses' in conf_json:
maxclasses = int(conf_json['maxClasses'])
else:
maxClasses = 20
target = targetFeature
self.log.info('-------> Feature: '+str(target))
dataFrame = df
pThresholdInput=pValThresholdInput
pThresholdTarget=pValThresholdTarget
cThresholdInput=corrThresholdInput
cThresholdTarget=corrThresholdTarget
numericDiscreteFeatures=[]
similarGruops=[]
numericContinuousFeatures=[]
categoricalFeatures=[]
nonNumericFeatures=[]
apca_features = []
dTypesDic={}
dataColumns = list(dataFrame.columns)
features_list = list(dataFrame.columns)
modelselectedFeatures=[]
topFeatures=[]
allFeatures=[]
targetType=""
# just to make sure feature engineering is false
#print(svdReducerStatus)
if featureEngineeringSelector.lower() == 'false' and correlationSelector.lower() == "true" and len(textFeatures) <= 0:
reducerObj=featureReducer()
self.log.info(featureReducer.__doc__)
self.log.info('Status:- |... Feature reduction started')
updatedNumericFeatures,updatedFeatures,similarGruops=reducerObj.startReducer(dataFrame,dataColumns,target,varThreshold)
if len(updatedFeatures) <= 1:
self.log.info('=======================================================')
self.log.info('Most of the features are of low variance. Use Model based feature engineering for better result')
self.log.info('=======================================================')
raise Exception('Most of the features are of low variance. Use Model based feature engineering for better result')
dataFrame=dataFrame[updatedFeatures]
dataColumns=list(dataFrame.columns)
self.log.info('Status:- |... Feature reduction completed')
elif (pcaReducerStatus.lower() == "true" or svdReducerStatus.lower() == 'true' or factorReducerStatus.lower() == 'true' or icaReducerStatus.lower()=='true') and featureEngineeringSelector.lower() == 'true':
# check is PCA or SVD is true
pcaColumns=[]
#print(svdReducerStatus.lower())
if target != "":
dataColumns.remove(target)
targetArray=df[target].values
targetArray.shape = (len(targetArray), 1)
if pcaReducerStatus.lower() == "true":
if nfeatures == 0:
pca = PCA(n_components='mle',svd_solver = 'full')
elif nfeatures < 1:
pca = PCA(n_components=nfeatures,svd_solver = 'full')
else:
pca = PCA(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'PCA'
elif svdReducerStatus.lower() == 'true':
if nfeatures < 2:
nfeatures = 2
pca = TruncatedSVD(n_components=int(nfeatures), n_iter=7, random_state=42)
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'SVD'
elif factorReducerStatus.lower()=='true':
if int(nfeatures) == 0:
pca=FactorAnalysis()
else:
pca=FactorAnalysis(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'FactorAnalysis'
elif icaReducerStatus.lower()=='true':
if int(nfeatures) == 0:
pca=FastICA()
else:
pca=FastICA(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'IndependentComponentAnalysis'
pcaDF=pd.DataFrame(pcaArray)
#print(pcaDF)
for i in range(len(pcaDF.columns)):
pcaColumns.append(method+str(i))
topFeatures=pcaColumns
apca_features= pcaColumns.copy()
if target != '':
pcaColumns.append(target)
scaledDf = pd.DataFrame(np.hstack((pcaArray, targetArray)),columns=pcaColumns)
else:
scaledDf = pd.DataFrame(pcaArray,columns=pcaColumns)
self.log.info("<--- dataframe after dimensionality reduction using "+method)
self.log.info(scaledDf.head())
dataFrame=scaledDf
dataColumns=list(dataFrame.columns)
self.log.info('Status:- |... Feature reduction started')
self.log.info('Status:- |... '+method+' done')
self.log.info('Status:- |... Feature reduction completed')
self.numofCols = dataFrame.shape[1]
self.numOfRows = dataFrame.shape[0]
dataFDtypes=[]
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
#Categoring datatypes
for item in dataFDtypes:
dTypesDic[item[0]] = item[1]
if item[0] != target:
if item[1] in ['int16', 'int32', 'int64'] :
numericDiscreteFeatures.append(item[0])
elif item[1] in ['float16', 'float32', 'float64']:
numericContinuousFeatures.append(item[0])
else:
nonNumericFeatures.append(item[0])
self.numOfRows = dataFrame.shape[0]
'''
cFRatio = 0.01
if(self.numOfRows < 1000):
cFRatio = 0.2
elif(self.numOfRows < 10000):
cFRatio = 0.1
elif(self.numOfRows < 100000):
cFRatio = 0.01
'''
for i in numericDiscreteFeatures:
nUnique=len(dataFrame[i].unique().tolist())
nRows=self.numOfRows
if nUnique <= categoricalMaxLabel:
categoricalFeatures.append(i)
for i in numericContinuousFeatures:
nUnique=len(dataFrame[i].unique().tolist())
nRows=self.numOfRows
if nUnique <= categoricalMaxLabel:
categoricalFeatures.append(i)
discreteFeatures=list(set(numericDiscreteFeatures)-set(categoricalFeatures))
numericContinuousFeatures=list(set(numericContinuousFeatures)-set(categoricalFeatures))
self.log.info('-------> Numerical continuous features :'+(str(numericContinuousFeatures))[:500])
self.log.info('-------> Numerical discrete features :'+(str(discreteFeatures))[:500])
self.log.info('-------> Non numerical features :'+(str(nonNumericFeatures))[:500])
self.log.info('-------> Categorical Features :'+(str(categoricalFeatures))[:500])
if target !="" and featureEngineeringSelector.lower() == "false" and correlationSelector.lower() == "true":
self.log.info('\\n------- Feature Based Correlation Analysis Start ------')
start = time.time()
featureImpObj = featureImp()
topFeatures,targetType= featureImpObj.FFImpNew(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThresholdInput,pThresholdTarget,cThresholdInput,cThresholdTarget,categoricalMaxLabel,problem_type,maxClasses)
#topFeatures,targetType= featureImpObj.FFImp(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThreshold,cThreshold,categoricalMaxLabel,problem_type,maxClasses)
self.log.info('-------> Highly Correlated Features Using Correlation Techniques'+(str(topFeatures))[:500])
executionTime=time.time() - start
self.log.info('-------> Time Taken: '+str(executionTime))
self.log.info('Status:- |... Correlation based feature selection done: '+str(len(topFeatures))+' out of '+str(len(dataColumns))+' selected')
self.log.info('------- Feature Based Correlation Analysis End ------>\\n')
if targetType == '':
if problem_type.lower() == 'classification':
targetType = 'categorical'
if problem_type.lower() == 'regression':
targetType = 'continuous'
if target !="" and featureEngineeringSelector.lower() == "false" and modelSelector.lower() == "true":
self.log.info('\\n------- Model Based Correlation Analysis Start -------')
start = time.time()
updatedFeatures = dataColumns
updatedFeatures.remove(target)
#targetType = problem_type.lower()
modelselectedFeatures=[]
if targetType == 'categorical':
try:
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
etc = ExtraTreesClassifier(n_estimators=100)
etc.fit(xtrain, ytrain)
rfe = RFE(etc, n_features_to_select=1, verbose =0 )
rfe.fit(xtrain, ytrain)
# total list of features
ranks = {}
ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1)
for item in ranks["RFE_LR"]:
if ranks["RFE_LR"][item]>0.30: #threshold as 30%
modelselectedFeatures.append(item)
modelselectedFeatures = list(modelselectedFe |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 3