file
stringlengths 6
44
| content
stringlengths 38
162k
|
---|---|
__init__.py | import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
from .bin.aion_pipeline import aion_train_model
|
aion.py | import argparse
import sys
import os
import subprocess
INSTALL = 'install'
LINUXINSTALL = 'linuxinstall'
FE_MIGRATE = 'migrateappfe'
LAUNCH_KAFKA = 'launchkafkaconsumer'
RUN_LOCAL_MLAC_PIPELINE = 'runpipelinelocal'
BUILD_MLAC_CONTAINER = 'buildmlaccontainerlocal'
CONVERT_MODEL = 'convertmodel'
START_MLFLOW = 'mlflow'
COMMON_SERVICE = 'service'
TRAINING = 'training'
TRAINING_AWS = 'trainingonaws'
TRAINING_DISTRIBUTED = 'distributedtraining'
START_APPF = 'appfe'
ONLINE_TRAINING = 'onlinetraining'
TEXT_SUMMARIZATION = 'textsummarization'
GENERATE_MLAC = 'generatemlac'
AWS_TRAINING = 'awstraining'
LLAMA_7B_TUNING = 'llama7btuning'
LLM_PROMPT = 'llmprompt'
LLM_TUNING = 'llmtuning'
LLM_PUBLISH = 'llmpublish'
LLM_BENCHMARKING = 'llmbenchmarking'
TELEMETRY_PUSH = 'pushtelemetry'
def aion_aws_training(confFile):
from hyperscalers.aion_aws_training import awsTraining
status = awsTraining(confFile)
print(status)
def aion_training(confFile):
from bin.aion_pipeline import aion_train_model
status = aion_train_model(confFile)
print(status)
def aion_awstraining(config_file):
from hyperscalers import aws_instance
print(config_file)
aws_instance.training(config_file)
def aion_generatemlac(ConfFile):
from bin.aion_mlac import generate_mlac_code
status = generate_mlac_code(ConfFile)
print(status)
def aion_textsummarization(confFile):
from bin.aion_text_summarizer import aion_textsummary
status = aion_textsummary(confFile)
def aion_oltraining(confFile):
from bin.aion_online_pipeline import aion_ot_train_model
status = aion_ot_train_model(confFile)
print(status)
def do_telemetry_sync():
from appbe.telemetry import SyncTelemetry
SyncTelemetry()
def aion_llm_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image):
from llm.llm_inference import LLM_publish
LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image)
def aion_migratefe(operation):
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append(operation)
execute_from_command_line(argi)
def aion_appfe(url,port):
#manage_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'manage.py')
#subprocess.check_call([sys.executable,manage_location, "runserver","%s:%s"%(url,port)])
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append('runaion')
argi.append("%s:%s"%(url,port))
execute_from_command_line(argi)
def aion_linux_install(version):
from install import linux_dependencies
linux_dependencies.process(version)
def aion_install(version):
from install import dependencies
dependencies.process(version)
def aion_service(ip,port,username,password):
from bin.aion_service import start_server
start_server(ip,port,username,password)
def aion_distributedLearning(confFile):
from distributed_learning import learning
learning.training(confFile)
def aion_launchkafkaconsumer():
from mlops import kafka_consumer
kafka_consumer.launch_kafka_consumer()
def aion_start_mlflow():
from appbe.dataPath import DEPLOY_LOCATION
import platform
import shutil
from os.path import expanduser
mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','Scripts','mlflow.exe'))
print(mlflowpath)
home = expanduser("~")
if platform.system() == 'Windows':
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
outputStr = subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION])
else:
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
subprocess.check_call(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION])
def aion_model_conversion(config_file):
from conversions import model_convertions
model_convertions.convert(config_file)
def aion_model_buildMLaCContainer(config):
from mlops import build_container
build_container.local_docker_build(config)
def aion_model_runpipelinelocal(config):
from mlops import local_pipeline
local_pipeline.run_pipeline(config)
def aion_llm_tuning(config):
from llm.llm_tuning import run
run(config)
def aion_llm_prompt(cloudconfig,instanceid,prompt):
from llm.aws_instance_api import LLM_predict
LLM_predict(cloudconfig,instanceid,prompt)
def llm_bench_marking(hypervisor,instanceid,model,usecaseid,eval):
print(eval)
from llm.bench_marking import bench_mark
bench_mark(hypervisor,instanceid,model,usecaseid,eval)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configPath', help='Config File Path')
parser.add_argument('-i', '--instanceid', help='instanceid')
parser.add_argument('-hv', '--hypervisor', help='hypervisor')
parser.add_argument('-md', '--model', help='model')
parser.add_argument('-uc', '--usecase', help='usecase')
parser.add_argument('-cc', '--cloudConfigPath', help='Cloud Config File Path')
parser.add_argument('-m', '--module', help='MODULE=TRAINING, APPFE, ONLINETRAINING,DISTRIBUTEDTRAINING')
parser.add_argument('-ip', '--ipaddress', help='URL applicable only for APPFE method ')
parser.add_argument('-p', '--port', help='APP Front End Port applicable only for APPFE method ')
parser.add_argument('-ac', '--appfecommand', help='APP Front End Command ')
parser.add_argument('-un','--username', help="USERNAME")
parser.add_argument('-passw','--password', help="PASSWORD")
parser.add_argument('-j', '--jsoninput', help='JSON Input')
parser.add_argument('-v', '--version', help='Installer Version')
parser.add_argument('-pf', '--prompt', help='Prompt File')
parser.add_argument('-r', '--region', help='REGION NAME')
parser.add_argument('-im', '--image', help='IMAGE NAME')
parser.add_argument('-e', '--eval', help='evaluation for code or doc', default='doc')
args = parser.parse_args()
if args.module.lower() == TRAINING:
aion_training(args.configPath)
elif args.module.lower() == TRAINING_AWS:
aion_awstraining(args.configPath)
elif args.module.lower() == TRAINING_DISTRIBUTED:
aion_distributedLearning(args.configPath)
elif args.module.lower() == START_APPF:
aion_appfe(args.ipaddress,args.port)
elif args.module.lower() == ONLINE_TRAINING:
aion_oltraining(args.configPath)
elif args.module.lower() == TEXT_SUMMARIZATION:
aion_textsummarization(args.configPath)
elif args.module.lower() == GENERATE_MLAC:
aion_generatemlac(args.configPath)
elif args.module.lower() == COMMON_SERVICE:
aion_service(args.ipaddress,args.port,args.username,args.password)
elif args.module.lower() == START_MLFLOW:
aion_mlflow()
elif args.module.lower() == CONVERT_MODEL:
aion_model_conversion(args.configPath)
elif args.module.lower() == BUILD_MLAC_CONTAINER:
aion_model_buildMLaCContainer(args.jsoninput)
elif args.module.lower() == RUN_LOCAL_MLAC_PIPELINE:
aion_model_runpipelinelocal(args.jsoninput)
elif args.module.lower() == LAUNCH_KAFKA:
aion_launchkafkaconsumer()
elif args.module.lower() == INSTALL:
aion_install(args.version)
elif args.module.lower() == LINUXINSTALL:
aion_linux_install(args.version)
elif args.module.lower() == FE_MIGRATE:
aion_migratefe('makemigrations')
aion_migratefe('migrate')
elif args.module.lower() == AWS_TRAINING:
aion_aws_training(args.configPath)
elif args.module.lower() == LLAMA_7B_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_PROMPT:
aion_llm_prompt(args.cloudConfigPath,args.instanceid,args.prompt)
elif args.module.lower() == LLM_PUBLISH:
aion_llm_publish(args.cloudConfigPath,args.instanceid,args.hypervisor,args.model,args.usecase,args.region,args.image)
elif args.module.lower() == LLM_BENCHMARKING:
llm_bench_marking(args.hypervisor,args.instanceid,args.model,args.usecase, args.eval)
elif args.module.lower() == TELEMETRY_PUSH:
do_telemetry_sync() |
aionMlopsService.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import logging
logging.getLogger('tensorflow').disabled = True
import json
import mlflow
import mlflow.sklearn
import mlflow.sagemaker as mfs
# from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# from sklearn import datasets
import time
import numpy as np
# Load dataset
# from sklearn.datasets import load_iris
import pickle
# Load the pickled model
# from matplotlib import pyplot
import sys
import os
import boto3
import subprocess
import os.path
from os.path import expanduser
import platform
from pathlib import Path
class aionMlopsService:
def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName):
try:
self.model=model
self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy
self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly)
self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName)
self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri)
self.experiment_name=experiment_name
self.mlflow_modelname=mlflow_modelname
self.awsaccesskey_id=awsaccesskey_id
self.awssecretaccess_key=awssecretaccess_key
self.aws_session_token=aws_session_token
self.mlflow_container_name=mlflow_container_name
self.aws_region=aws_region
self.aws_id=aws_id
self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn
self.sm_app_name=sm_app_name
self.sm_deploy_option=sm_deploy_option
self.delete_ecr_repository=delete_ecr_repository
self.ecrRepositoryName=ecrRepositoryName
from appbe.dataPath import LOG_LOCATION
sagemakerLogLocation = LOG_LOCATION
try:
os.makedirs(sagemakerLogLocation)
except OSError as e:
if (os.path.exists(sagemakerLogLocation)):
pass
else:
raise OSError('sagemakerLogLocation error.')
self.sagemakerLogLocation=str(sagemakerLogLocation)
filename_mlops = 'mlopslog_'+str(int(time.time()))
filename_mlops=filename_mlops+'.log'
# filename = 'mlopsLog_'+Time()
filepath = os.path.join(self.sagemakerLogLocation, filename_mlops)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
# logging.basicConfig(filename="uq_logging.log", format='%(asctime)s %(message)s',filemode='w')
# logging.basicConfig(filename="uq_logging.log", format=' %(message)s',filemode='w')
# logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO)
self.log = logging.getLogger('aionMLOps')
self.log.setLevel(logging.DEBUG)
# mlflow.set_experiment(self.experiment_name)
except Exception as e:
self.log.info('<!------------- mlflow model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def mlflowSetPath(self,path):
track_dir=os.path.join(path,'mlruns')
uri="file:"+str(Path(track_dir))
return uri
#Currently not used this delete ecr repository option
def ecr_repository_delete(self,rep_name):
# import subprocess
client = boto3.client('ecr')
repositories = client.describe_repositories()
ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True)
mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true'])
self.log.info('Success: deleted aws ecr repository which contains mlops image.')
def check_sm_deploy_status(self,app_name):
sage_client = boto3.client('sagemaker', region_name=self.aws_region)
endpoint_description = sage_client.describe_endpoint(EndpointName=app_name)
endpoint_status = endpoint_description["EndpointStatus"]
try:
failure_reason=endpoint_description["FailureReason"]
self.log.info("sagemaker end point creation failure reason is: "+str(failure_reason))
except:
pass
endpoint_status=str(endpoint_status)
return endpoint_status
def invoke_sm_endpoint(self,app_name, input_json):
client = boto3.session.Session().client("sagemaker-runtime", self.aws_region)
response = client.invoke_endpoint(
EndpointName=app_name,
Body=input_json,
ContentType='application/json; format=pandas-split',
)
# preds = response['Body'].read().decode("ascii")
preds = response['Body'].read().decode("ascii")
preds = json.loads(preds)
# print("preds: {}".format(preds))
return preds
def predict_sm_app_endpoint(self,X_test):
#print(X_test)
import pandas as pd
prediction=None
AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id)
AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key)
AWS_SESSION_TOKEN=str(self.aws_session_token)
region = str(self.aws_region)
#Existing model deploy options
# mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName)
# mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri)
try:
import subprocess
cmd = 'aws configure set region_name '+region
os.system(cmd)
cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID
os.system(cmd)
cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY
os.system(cmd)
'''
aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region])
aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID])
aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY])
'''
except:
pass
#Create a session for aws communication using aws boto3 lib
# s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY)
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
#X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2)
# query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient="split")
try:
query_input = pd.DataFrame(X_test).to_json(orient="split")
#print(query_input)
prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input)
# self.log.info("sagemaker end point Prediction: \n"+str(prediction))
except Exception as e:
print(e)
return prediction
def deleteSagemakerApp(self,app_name,region):
# import mlflow.sagemaker as mfs
# region = 'ap-south-1'
# app_name = 'aion-demo-app'
mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
# print("AION mlops sagemaker application endpoint is deleted....\n")
self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name))
def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path):
region = str(self.aws_region)
aws_id = str(self.aws_id)
iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn)
app_name = str(self.sm_app_name)
model_uri = str(model_path)
app_status=False
mlflow_root_dir = None
try:
os.chdir(str(self.sagemakerLogLocation))
mlflow_root_dir = os.getcwd()
self.log.info('mlflow root dir: '+str(mlflow_root_dir))
except:
self.log.info("path issue.")
try:
c_status=self.check_sm_deploy_status(app_name)
#if ((c_status == "Failed") or (c_status == "OutOfService")):
if ((c_status == "Failed") or (c_status.lower() == "failed")):
app_status=False
self.log.info("Sagemaker endpoint status: Failed.\n")
mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
elif ((c_status.lower() == "inservice") or (c_status == "InService")):
app_status=True
self.log.info("Sagemaker endpoint status: InService. Running sagemaker endpoint name: \n"+str(app_name))
else:
app_status=False
pass
except:
# print("deploy status error.\n")
pass
#aws ecr model app_name should contain only [[a-zA-Z0-9-]]
import re
if app_name:
pattern = re.compile("[A-Za-z0-9-]+")
# if found match (entire string matches pattern)
if pattern.fullmatch(app_name) is not None:
#print("Found match: ")
pass
else:
app_name = 'aion-demo-app'
else:
app_name = 'aion-demo-app'
mlflow_image=mlflow_container_name+':'+tag_id
image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image
deploy_option="create"
self.log.info('deploy_option: \n'+str(deploy_option))
if (deploy_option.lower() == "create"):
# Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE
if not (app_status):
try:
mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode="create",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url)
self.log.info('sagemaker endpoint created and model deployed. Application name is: \n'+str(app_name))
except:
self.log.info('Creating end point application issue.Please check the connection and aws credentials \n')
else:
self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\n')
elif (deploy_option.lower() == "delete"):
# import mlflow.sagemaker as mfs
# # region = 'ap-south-1'
# # app_name = 'aion-demo-app'
# mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
# print("Mlflow sagemaker application endpoint is deleted....\n")
# self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name))
pass
elif (deploy_option.lower() == "add"):
pass
elif (deploy_option.lower() == "replace"):
pass
else:
pass
return app_status
def mlflow2sagemaker_deploy(self):
self.log.info('<!------------- Inside AION mlops to sagemaker communication and deploy process. ---------------> ')
deploy_status=False
app_name = str(self.sm_app_name)
self.log.info('Sagemaker Application Name: '+str(app_name))
uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation)
mlflow.set_tracking_uri(uri_mlflow)
mlops_trackuri=mlflow.get_tracking_uri()
mlops_trackuri=str(mlops_trackuri)
self.log.info('mlops tracking uri: '+str(mlops_trackuri))
localhost_deploy=False
try:
#Loading aion model to deploy in sagemaker
mlflow.set_experiment(self.experiment_name)
self.log.info('Endpoint Name: '+str(self.experiment_name))
# Assume, the model already loaded from joblib in aionmlflow2smInterface.py file.
aionmodel2deploy=self.model
# run_id = None
# experiment_id=None
# Use the loaded pickled model to make predictions
# pred = knn_from_pickle.predict(X_test)
with mlflow.start_run(run_name='AIONMLOps') as run:
# aionmodel2deploy.fit(X_train, y_train)
# predictions = aionmodel2deploy.predict(X_test)
mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname)
run_id = run.info.run_uuid
experiment_id = run.info.experiment_id
self.log.info('AION mlops experiment run_id: '+str(run_id))
self.log.info('AION mlops experiment experiment_id: '+str(experiment_id))
self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname))
artifact_uri = {mlflow.get_artifact_uri()}
# print("1.artifact_uri: \n",artifact_uri)
mlflow.end_run()
#If we need, we can check the mlflow experiments.
# try:
# mlflow_client = mlflow.tracking.MlflowClient('./mlruns')
# exp_list = mlflow_client.list_experiments()
# except:
# pass
#print("mlflow exp_list: \n",exp_list)
mlflow_modelname=str(self.mlflow_modelname)
mlops_trackuri=mlops_trackuri.replace('file:','')
mlops_trackuri=str(mlops_trackuri)
# mlflow_root_dir = os.getcwd()
mlflow_root_dir = None
try:
os.chdir(str(self.sagemakerLogLocation))
mlflow_root_dir = os.getcwd()
self.log.info('mlflow root dir: '+str(mlflow_root_dir))
except:
self.log.info("path issue.")
model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname)
# model_path=mlops_trackuri+'\\%s\\%s\\artifacts\\%s' % (experiment_id, run_id,mlflow_modelname)
self.log.info("local host aion mlops model_path is: "+str(model_path))
time.sleep(2)
#print("Environment variable setup in the current working dir for aws sagemaker cli connection... \n")
self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \n ')
AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id)
AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key)
AWS_SESSION_TOKEN=str(self.aws_session_token)
region = str(self.aws_region)
#Existing model deploy options
mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName)
mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri)
import subprocess
cmd = 'aws configure set region_name '+region
os.system(cmd)
cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID
os.system(cmd)
cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY
os.system(cmd)
#Create a session for aws communication using aws boto3 lib
# s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY)
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# session = boto3.session.Session(
# aws_access_key_id=AWS_ACCESS_KEY_ID,
# aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
# aws_session_token=AWS_SESSION_TOKEN
# )
# awsclient = session.resource('ecr')
# s3 = session.resource('s3')
self.log.info('aws environment variable setup done... \n')
try:
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
mlflow_container_name=str(self.mlflow_container_name)
mlflow_version=mlflow.__version__
tag_id=mlflow_version
if (self.mlflowtosagemakerPushOnly.lower() == "true"):
self.log.info('Selected option is <Deploy existing model to sagemaker> \n')
aws_id=str(self.aws_id)
arn=str(self.iam_sagemakerfullaccess_arn)
mlflow_image=mlflow_container_name+':'+tag_id
image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image
# print("image_url:========= \n",image_url)
deploy_status=True
try:
model_path=mlflowtosagemakerdeployModeluri
# ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns.
self.log.info('Deploy existing model container-Model path given by user: '+str(model_path))
try:
os.chdir(model_path)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
try:
mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName)
deploy_status=True
self.log.info('AION mlops pushed the docker container to aws ecr. \n ')
except:
self.log.info("error in pushing existing container to ecr.\n")
deploy_status=False
time.sleep(2)
#Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir.
try:
# print(" Changing directory to mlflow root dir....\n")
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('model path is not a directory. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model path is not a directory. '+str(mlflow_root_dir))
# print("{0} is not a directory".format(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
# self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri)
try:
if (deploy_status):
self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri)
self.log.info('AION creates docker container and push the container into aws ecr.. ')
time.sleep(2)
except:
self.log.info('AION deploy error.check connection and aws config parameters. ')
deploy_status=False
# self.log.info('model deployed in sagemaker. ')
except Exception as e:
self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \n'+str(e))
elif (self.mlflowtosagemakerPushOnly.lower() == "false"):
if (self.mlflowtosagemakerDeploy.lower() == "true"):
self.log.info('Selected option is <Create and Deploy model> \n')
deploy_status=True
try:
# ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns.
try:
os.chdir(model_path)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
try:
mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name])
self.log.info('AION mlops creates docker container and push the container into aws ecr.. ')
deploy_status=True
time.sleep(2)
except:
self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.')
deploy_status=False
self.log.info('Now deploying the model container to sagemaker starts....\n ')
# Once docker push completes, again going back to mlflow parent dir for deployment
#Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir.
try:
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('model_path does not exist. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
# app_name = str(self.sm_app_name)
try:
if (deploy_status):
self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path)
except:
self.log.info('mlops deploy error.check connection')
deploy_status=False
except Exception as e:
exc = {"status":"FAIL","message":str(e).strip('"')}
out_exc = json.dumps(exc)
self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n')
elif(self.mlflowtosagemakerDeploy.lower() == "false"):
deploy_status=False
localhost_deploy=True
self.log.info('Selected option is <Create AION mlops container in local host .> \n')
self.log.info("User selected create-Deploy sagemaker option as False,")
self.log.info("Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. ")
try:
# ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns.
try:
os.chdir(model_path)
self.log.info('After change to AION mlops model dir, cwd: '+str(model_path))
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
# mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name])
try:
if not (deploy_status):
mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name])
self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with <docker images> command.\n ')
localhost_deploy=True
time.sleep(2)
except:
self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.')
deploy_status=False
localhost_deploy=False
# print("AION mlops creates docker container and push the container into aws ecr.\n")
self.log.info('AION mlops creates docker container and stored locally... ')
time.sleep(2)
except Exception as e:
localhost_deploy=False
# print("mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n")
self.log.info('AION mlops failed to creates docker container in local machine.\n'+str(e))
else:
self.log.info('Deploy option not selected, Please check. ')
localhost_deploy=False
deploy_status=False
else:
pass
localhost_container_status="Notdeployed"
mlflow2sm_deploy_status="Notdeployed"
if localhost_deploy:
localhost_container_status="success"
mlflow2sm_deploy_status="Notdeployed"
# print("AION creates local docker container successfully.Please check in docker repository.")
self.log.info("AION creates local docker container successfully.Please check in docker repository.")
# else:
# localhost_container_status="failed"
# # print("AION failed to create local docker container successfully.Please check in docker repository.")
# self.log.info("AION failed to create local docker container successfully.Please check in docker repository.")
if (deploy_status):
# Finally checking whether mlops model is deployed to sagemaker or not.
app_name = str(self.sm_app_name)
deploy_s = self.check_sm_deploy_status(app_name)
if (deploy_s == "InService"):
# print("AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n")
self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n'+str(app_name))
mlflow2sm_deploy_status="success"
localhost_container_status="Notdeployed"
else:
# print("AION Mlflow model not able to deploy at aws sagemaker\n")
self.log.info('AION mlops model not able to deploy at aws sagemaker.\n')
mlflow2sm_deploy_status="failed"
localhost_container_status="Notdeployed"
# else:
# mlflow2sm_deploy_status="None"
return mlflow2sm_deploy_status,localhost_container_status
except Exception as inst:
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
aion_pipeline.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime, time, timeit
import argparse
import logging
logging.getLogger('tensorflow').disabled = True
import math
import shutil
import re
from datetime import datetime as dt
import warnings
from config_manager.pipeline_config import AionConfigManager
import pandas as pd
import numpy as np
import sklearn
import string
from records import pushrecords
import logging
from pathlib import Path
from pytz import timezone
from config_manager.config_gen import code_configure
import joblib
from sklearn.model_selection import train_test_split
from config_manager.check_config import config_validate
from utils.file_ops import save_csv_compressed,save_csv,save_chromadb
LOG_FILE_NAME = 'model_training_logs.log'
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = True
def getversion():
configFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config')
version = 'NA'
for file in os.listdir(configFolder):
if file.endswith(".var"):
version = file.rsplit('.', 1)
version = version[0]
break
return version
AION_VERSION = getversion()
def pushRecordForTraining():
try:
status,msg = pushrecords.enterRecord(AION_VERSION)
except Exception as e:
print("Exception", e)
status = False
msg = str(e)
return status,msg
def mlflowSetPath(path,experimentname):
import mlflow
url = "file:" + str(Path(path).parent.parent) + "/mlruns"
mlflow.set_tracking_uri(url)
mlflow.set_experiment(str(experimentname))
def set_log_handler( basic, mode='w'):
deploy_loc = Path(basic.get('deployLocation'))
log_file_parent = deploy_loc/basic['modelName']/basic['modelVersion']/'log'
log_file_parent.mkdir(parents=True, exist_ok=True)
log_file = log_file_parent/LOG_FILE_NAME
filehandler = logging.FileHandler(log_file, mode,'utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
class server():
def __init__(self):
self.response = None
self.features=[]
self.mFeatures=[]
self.emptyFeatures=[]
self.textFeatures=[]
self.vectorizerFeatures=[]
self.wordToNumericFeatures=[]
self.profilerAction = []
self.targetType = ''
self.matrix1='{'
self.matrix2='{'
self.matrix='{'
self.trainmatrix='{'
self.numericalFeatures=[]
self.nonNumericFeatures=[]
self.similarGroups=[]
self.dfcols=0
self.dfrows=0
self.method = 'NA'
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.modelSelTopFeatures=[]
self.topFeatures=[]
self.allFeatures=[]
def startScriptExecution(self, config_obj, codeConfigure, log):
oldStdout = sys.stdout
model_training_details = ''
model_tried=''
learner_type = ''
topics = {}
pred_filename = ''
numericContinuousFeatures=''
discreteFeatures=''
sessonal_freq = ''
additional_regressors = ''
threshold=-1
targetColumn = ''
numericalFeatures =''
nonNumericFeatures=''
categoricalFeatures=''
dataFolderLocation = ''
featureReduction = 'False'
original_data_file = ''
normalizer_pickle_file = ''
pcaModel_pickle_file = ''
bpca_features= []
apca_features = []
lag_order = 1
profiled_data_file = ''
trained_data_file = ''
predicted_data_file=''
dictDiffCount={}
cleaning_kwargs = {}
grouperbyjson = ''
rowfilterexpression=''
featureEngineeringSelector = 'false'
conversion_method = ''
params={}
loss_matrix='binary_crossentropy'
optimizer='Nadam'
numericToLabel_json='[]'
preprocessing_pipe=''
firstDocFeature = ''
secondDocFeature = ''
padding_length = 30
pipe = None
scalertransformationFile=None
column_merge_flag = False
merge_columns = []
score = 0
profilerObj = None
imageconfig=''
labelMaps={}
featureDataShape=[]
normFeatures = []
preprocess_out_columns = []
preprocess_pipe = None
label_encoder = None
unpreprocessed_columns = []
import pickle
iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings()
inlierLabels=config_obj.getEionInliers()
scoreParam = config_obj.getScoringCreteria()
noofforecasts = config_obj.getNumberofForecasts()
datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures()
filter_expression = config_obj.getFilterExpression()
refined_filter_expression = ""
sa_images = []
model_tried = ''
deploy_config = {}
iterName = iterName.replace(" ", "_")
deployFolder = deployLocation
usecaseLocation,deployLocation,dataFolderLocation,imageFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile,reduction_data_file = config_obj.createDeploymentFolders(deployFolder,iterName,iterVersion)
outputLocation=deployLocation
mlflowSetPath(deployLocation,iterName+'_'+iterVersion)
# mlflowSetPath shut down the logger, so set again
set_log_handler( config_obj.basic, mode='a')
xtrain=pd.DataFrame()
xtest=pd.DataFrame()
log.info('Status:-|... AION Training Configuration started')
startTime = timeit.default_timer()
try:
output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}}
problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textsummarizationStatus,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus, objectDetectionStatus,stateTransitionStatus, similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus = config_obj.getModulesDetails()
status, error_id, msg = config_obj.validate_config()
if not status:
if error_id == 'fasttext':
raise ValueError(msg)
VideoProcessing = False
if(problem_type.lower() in ['classification','regression']):
if(targetFeature == ''):
output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"}
return output
from transformations.dataReader import dataReader
objData = dataReader()
DataIsFolder = False
folderdetails = config_obj.getFolderSettings()
if os.path.isfile(dataLocation):
log.info('Status:-|... AION Loading Data')
dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier)
status,msg = save_csv_compressed(dataFrame,original_data_file)
if not status:
log.info('CSV File Error: '+str(msg))
elif os.path.isdir(dataLocation):
if problem_type.lower() == 'summarization':
from document_summarizer import summarize
keywords, pretrained_type, embedding_sz = summarize.get_params()
dataFrame = summarize.to_dataframe(dataLocation,keywords, deploy_loc, pretrained_type, embedding_sz)
problem_type = 'classification'
targetFeature = 'label'
scoreParam = 'Accuracy'
elif folderdetails['fileType'].lower() == 'document':
dataFrame, error = objData.documentsTodf(dataLocation, folderdetails['labelDataFile'])
if error:
log.info(error)
elif folderdetails['fileType'].lower() == 'object':
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
intermediateLocation = os.path.join(deployLocation,'intermediate')
os.mkdir(intermediateLocation)
AugEnabled,keepAugImages,operations,augConf = config_obj.getEionImageAugmentationConfiguration()
dataFrame, n_class = objData.createTFRecord(dataLocation, intermediateLocation, folderdetails['labelDataFile'], testPercentage,AugEnabled,keepAugImages,operations, "objectdetection",augConf) #Unnati
DataIsFolder = True
else:
datafilelocation = os.path.join(dataLocation,folderdetails['labelDataFile'])
dataFrame = objData.csvTodf(datafilelocation,delimiter,textqualifier)
DataIsFolder = True
if textSimilarityStatus or similarityIdentificationStatus or contextualSearchStatus:
similaritydf = dataFrame
filter = config_obj.getfilter()
if filter != 'NA':
dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame)
timegrouper = config_obj.gettimegrouper()
grouping = config_obj.getgrouper()
if grouping != 'NA':
dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame)
elif timegrouper != 'NA':
dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame)
if timeseriesStatus or anomalyDetectionStatus:
from utils.validate_inputs import dataGarbageValue
status,msg = dataGarbageValue(dataFrame,datetimeFeature)
if status.lower() == 'error':
raise ValueError(msg)
if not DataIsFolder:
if timeseriesStatus:
if(modelFeatures != 'NA' and datetimeFeature != ''):
if datetimeFeature:
if isinstance(datetimeFeature, list): #to handle if time series having multiple time column
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature
else:
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',')
if datetimeFeature not in modelFeatures:
modelFeatures = modelFeatures+','+datetimeFeature
dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature)
elif survival_analysis_status or anomalyDetectionStatus:
if(modelFeatures != 'NA'):
if datetimeFeature != 'NA' and datetimeFeature != '':
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',')
if datetimeFeature not in modelFeatures:
modelFeatures = modelFeatures+','+datetimeFeature
dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature)
else:
dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature)
log.info('\n-------> First Ten Rows of Input Data: ')
log.info(dataFrame.head(10))
self.dfrows=dataFrame.shape[0]
self.dfcols=dataFrame.shape[1]
log.info('\n-------> Rows: '+str(self.dfrows))
log.info('\n-------> Columns: '+str(self.dfcols))
topFeatures=[]
profilerObj = None
normalizer=None
dataLoadTime = timeit.default_timer() - startTime
log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime))
if timeseriesStatus:
if datetimeFeature != 'NA' and datetimeFeature != '':
preproces_config = config_obj.basic.get('preprocessing',{}).get('timeSeriesForecasting',{})
if preproces_config:
from transformations.preprocess import timeSeries as ts_preprocess
preprocess_obj = ts_preprocess( preproces_config,datetimeFeature, log)
dataFrame = preprocess_obj.run( dataFrame)
log.info('-------> Input dataFrame(5 Rows) after preprocessing: ')
log.info(dataFrame.head(5))
deploy_config['preprocess'] = {}
deploy_config['preprocess']['code'] = preprocess_obj.get_code()
if profiler_status:
log.info('\n================== Data Profiler has started ==================')
log.info('Status:-|... AION feature transformation started')
from transformations.dataProfiler import profiler as dataProfiler
dp_mlstart = time.time()
profilerJson = config_obj.getEionProfilerConfigurarion()
log.info('-------> Input dataFrame(5 Rows): ')
log.info(dataFrame.head(5))
log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape))
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
if DataIsFolder:
if folderdetails['type'].lower() != 'objectdetection':
profilerObj = dataProfiler(dataFrame)
topFeatures,VideoProcessing,tfrecord_directory = profilerObj.folderPreprocessing(dataLocation,folderdetails,deployLocation)
elif textSimilarityStatus:
firstDocFeature = config_obj.getFirstDocumentFeature()
secondDocFeature = config_obj.getSecondDocumentFeature()
profilerObj = dataProfiler(dataFrame,targetFeature, data_path=dataFolderLocation)
dataFrame,pipe,targetColumn,topFeatures = profilerObj.textSimilarityStartProfiler(firstDocFeature,secondDocFeature)
elif recommenderStatus:
profilerObj = dataProfiler(dataFrame)
dataFrame = profilerObj.recommenderStartProfiler(modelFeatures)
else:
if deeplearner_status or learner_status:
if (problem_type.lower() != 'clustering') and (problem_type.lower() != 'topicmodelling'):
if targetFeature != '':
try:
biasingDetail = config_obj.getDebiasingDetail()
if len(biasingDetail) > 0:
if biasingDetail['FeatureName'] != 'None':
protected_feature = biasingDetail['FeatureName']
privileged_className = biasingDetail['ClassName']
target_feature = biasingDetail['TargetFeature']
algorithm = biasingDetail['Algorithm']
from debiasing.DebiasingManager import DebiasingManager
mgrObj = DebiasingManager()
log.info('Status:-|... Debiasing transformation started')
transf_dataFrame = mgrObj.Bias_Mitigate(dataFrame, protected_feature, privileged_className, target_feature, algorithm)
log.info('Status:-|... Debiasing transformation completed')
dataFrame = transf_dataFrame
except Exception as e:
print(e)
pass
# ---------------------------------------------- ----------------------------------------------
targetData = dataFrame[targetFeature]
featureData = dataFrame[dataFrame.columns.difference([targetFeature])]
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
xtrain,ytrain,xtest,ytest = self.split_into_train_test_data(featureData,targetData,testPercentage,log,problem_type.lower())
xtrain.reset_index(drop=True,inplace=True)
ytrain.reset_index(drop=True,inplace=True)
xtest.reset_index(drop=True,inplace=True)
ytest.reset_index(drop=True,inplace=True)
dataFrame = xtrain
dataFrame[targetFeature] = ytrain
encode_target_problems = ['classification','anomalyDetection', 'timeSeriesAnomalyDetection'] #task 11997
if problem_type == 'survivalAnalysis' and dataFrame[targetFeature].nunique() > 1:
encode_target_problems.append('survivalAnalysis')
if timeseriesStatus: #task 12627 calling data profiler without target feature specified separately (i.e) profiling is done for model features along with target features
profilerObj = dataProfiler(dataFrame, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation)
else:
profilerObj = dataProfiler(dataFrame, target=targetFeature, encode_target= problem_type in encode_target_problems, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) #task 12627
dataFrame, preprocess_pipe, label_encoder = profilerObj.transform()
preprocess_out_columns = dataFrame.columns.tolist()
if not timeseriesStatus: #task 12627 preprocess_out_columns goes as output_columns in target folder script/input_profiler.py, It should contain the target feature also as it is what is used for forecasting
if targetFeature in preprocess_out_columns:
preprocess_out_columns.remove(targetFeature)
for x in unpreprocessed_columns:
preprocess_out_columns.remove(x)
if label_encoder:
joblib.dump(label_encoder, Path(deployLocation)/'model'/'label_encoder.pkl')
labelMaps = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))
codeConfigure.update_config('train_features',list(profilerObj.train_features_type.keys()))
codeConfigure.update_config('text_features',profilerObj.text_feature)
self.textFeatures = profilerObj.text_feature
deploy_config['profiler'] = {}
deploy_config['profiler']['input_features'] = list(profilerObj.train_features_type.keys())
deploy_config['profiler']['output_features'] = preprocess_out_columns
deploy_config['profiler']['input_features_type'] = profilerObj.train_features_type
deploy_config['profiler']['word2num_features'] = profilerObj.wordToNumericFeatures
deploy_config['profiler']['unpreprocessed_columns'] = unpreprocessed_columns
deploy_config['profiler']['force_numeric_conv'] = profilerObj.force_numeric_conv
if self.textFeatures:
deploy_config['profiler']['conversion_method'] = config_obj.get_conversion_method()
if anomalyDetectionStatus and datetimeFeature != 'NA' and datetimeFeature != '':
if unpreprocessed_columns:
dataFrame.set_index( unpreprocessed_columns[0], inplace=True)
log.info('-------> Data Frame Post Data Profiling(5 Rows): ')
log.info(dataFrame.head(5))
if not xtest.empty:
if targetFeature != '':
non_null_index = ytest.notna()
ytest = ytest[non_null_index]
xtest = xtest[non_null_index]
if profilerObj.force_numeric_conv:
xtest[ profilerObj.force_numeric_conv] = xtest[profilerObj.force_numeric_conv].apply(pd.to_numeric,errors='coerce')
xtest.astype(profilerObj.train_features_type)
if unpreprocessed_columns:
xtest_unprocessed = xtest[unpreprocessed_columns]
xtest = preprocess_pipe.transform(xtest)
if not isinstance(xtest, np.ndarray):
xtest = xtest.toarray()
xtest = pd.DataFrame(xtest, columns=preprocess_out_columns)
if unpreprocessed_columns:
xtest[unpreprocessed_columns] = xtest_unprocessed
if survival_analysis_status:
xtest.astype({x:'float' for x in unpreprocessed_columns})
xtrain.astype({x:'float' for x in unpreprocessed_columns})
#task 11997 removed setting datetime column as index of dataframe code as it is already done before
if label_encoder:
ytest = label_encoder.transform(ytest)
if preprocess_pipe:
if self.textFeatures:
from text.textProfiler import reset_pretrained_model
reset_pretrained_model(preprocess_pipe) # pickle is not possible for fasttext model ( binary)
joblib.dump(preprocess_pipe, Path(deployLocation)/'model'/'preprocess_pipe.pkl')
self.features=topFeatures
if targetColumn in topFeatures:
topFeatures.remove(targetColumn)
self.topFeatures=topFeatures
if normalizer != None:
normalizer_file_path = os.path.join(deployLocation,'model','normalizer_pipe.sav')
normalizer_pickle_file = 'normalizer_pipe.sav'
pickle.dump(normalizer, open(normalizer_file_path,'wb'))
log.info('Status:-|... AION feature transformation completed')
dp_mlexecutionTime=time.time() - dp_mlstart
log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime))
log.info('================== Data Profiling completed ==================\n')
else:
datacolumns=list(dataFrame.columns)
if targetFeature in datacolumns:
datacolumns.remove(targetFeature)
if not timeseriesStatus and not anomalyDetectionStatus and not inputDriftStatus and not outputDriftStatus and not imageClassificationStatus and not associationRuleStatus and not objectDetectionStatus and not stateTransitionStatus and not textsummarizationStatus:
self.textFeatures,self.vectorizerFeatures,pipe,column_merge_flag,merge_columns = profilerObj.checkForTextClassification(dataFrame)
self.topFeatures =datacolumns
if(pipe is not None):
preprocessing_pipe = 'pppipe'+iterName+'_'+iterVersion+'.sav'
ppfilename = os.path.join(deployLocation,'model','pppipe'+iterName+'_'+iterVersion+'.sav')
pickle.dump(pipe, open(ppfilename, 'wb'))
status, msg = save_csv_compressed(dataFrame,profiled_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
if selector_status:
log.info("\n================== Feature Selector has started ==================")
log.info("Status:-|... AION feature engineering started")
fs_mlstart = time.time()
selectorJson = config_obj.getEionSelectorConfiguration()
if self.textFeatures:
config_obj.updateFeatureSelection(selectorJson, codeConfigure, self.textFeatures)
log.info("-------> For vectorizer 'feature selection' is disabled and all the features will be used for training")
from feature_engineering.featureSelector import featureSelector
selectorObj = featureSelector()
dataFrame,targetColumn,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,self.similarGroups,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pcaModel,bpca_features,apca_features,featureEngineeringSelector = selectorObj.startSelector(dataFrame, selectorJson,self.textFeatures,targetFeature,problem_type)
if(str(pcaModel) != 'None'):
featureReduction = 'True'
status, msg = save_csv(dataFrame,reduction_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
pcaFileName = os.path.join(deployLocation,'model','pca'+iterName+'_'+iterVersion+'.sav')
pcaModel_pickle_file = 'pca'+iterName+'_'+iterVersion+'.sav'
pickle.dump(pcaModel, open(pcaFileName, 'wb'))
if not xtest.empty:
xtest = pd.DataFrame(pcaModel.transform(xtest),columns= apca_features)
if targetColumn in self.topFeatures:
self.topFeatures.remove(targetColumn)
fs_mlexecutionTime=time.time() - fs_mlstart
log.info('-------> COMPUTING: Total Feature Selection Execution Time '+str(fs_mlexecutionTime))
log.info('================== Feature Selection completed ==================\n')
log.info("Status:-|... AION feature engineering completed")
if deeplearner_status or learner_status:
log.info('Status:-|... AION training started')
ldp_mlstart = time.time()
balancingMethod = config_obj.getAIONDataBalancingMethod()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
modelType = problem_type.lower()
targetColumn = targetFeature
if modelType == "na":
if self.targetType == 'categorical':
modelType = 'classification'
elif self.targetType == 'continuous':
modelType = 'regression'
else:
modelType='clustering'
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
features =datacolumns
featureData = dataFrame[features]
if(modelType == 'clustering') or (modelType == 'topicmodelling'):
xtrain = featureData
ytrain = pd.DataFrame()
xtest = featureData
ytest = pd.DataFrame()
elif (targetColumn!=''):
xtrain = dataFrame[features]
ytrain = dataFrame[targetColumn]
else:
pass
categoryCountList = []
if modelType == 'classification':
if(mlobj.checkForClassBalancing(ytrain) >= 1):
xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod)
valueCount=targetData.value_counts()
categoryCountList=valueCount.tolist()
ldp_mlexecutionTime=time.time() - ldp_mlstart
log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime))
if learner_status:
base_model_score=0
log.info('\n================== ML Started ==================')
log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum()))
mlstart = time.time()
log.info('-------> Target Problem Type:'+ self.targetType)
learner_type = 'ML'
learnerJson = config_obj.getEionLearnerConfiguration()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
anomalyDetectionStatus = False
anomalyMethod =config_obj.getEionanomalyModels()
if modelType.lower() == "anomalydetection" or modelType.lower() == "timeseriesanomalydetection": #task 11997
anomalyDetectionStatus = True
if anomalyDetectionStatus == True :
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
if datetimeFeature in datacolumns:
datacolumns.remove(datetimeFeature)
self.features = datacolumns
from learner.anomalyDetector import anomalyDetector
anomalyDetectorObj=anomalyDetector()
model_type ="anomaly_detection"
saved_model = model_type+'_'+iterName+'_'+iterVersion+'.sav'
if problem_type.lower() == "timeseriesanomalydetection": #task 11997
anomalyconfig = config_obj.getAIONTSAnomalyDetectionConfiguration()
modelType = "TimeSeriesAnomalyDetection"
else:
anomalyconfig = config_obj.getAIONAnomalyDetectionConfiguration()
testPercentage = config_obj.getAIONTestTrainPercentage()
##Multivariate feature based anomaly detection status from gui (true/false)
mv_featurebased_selection = config_obj.getMVFeaturebasedAD()
mv_featurebased_ad_status=str(mv_featurebased_selection['uniVariate'])
model,estimator,matrix,trainmatrix,score,labelMaps=anomalyDetectorObj.startanomalydetector(dataFrame,targetColumn,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status) #Unnati
score = 'NA'
if(self.matrix != '{'):
self.matrix += ','
self.matrix += matrix
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
scoreParam = 'NA'
scoredetails = f'{{"Model":"{model}","Score":"{score}"}}'
if model_tried != '':
model_tried += ','
model_tried += scoredetails
model = anomalyMethod
else:
log.info('-------> Target Problem Type:'+ self.targetType)
log.info('-------> Target Model Type:'+ modelType)
if(modelType == 'regression'):
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
if(modelType == 'classification'):
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
scoreParam = scoreParam.lower()
codeConfigure.update_config('scoring_criteria',scoreParam)
modelParams,modelList = config_obj.getEionLearnerModelParams(modelType)
status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=mlobj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,'MB',codeConfigure,featureEngineeringSelector,config_obj.getModelEvaluationConfig(),imageFolderLocation)
#Getting model,data for ensemble calculation
e_model=loaded_model
base_model_score=score
if(self.matrix != '{'):
self.matrix += ','
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
self.matrix += matrix
mlexecutionTime=time.time() - mlstart
log.info('-------> Total ML Execution Time '+str(mlexecutionTime))
log.info('================== ML Completed ==================\n')
if deeplearner_status:
learner_type = 'DL'
log.info('Status:- |... AION DL training started')
from dlearning.deeplearning import deeplearning
dlobj = deeplearning()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
log.info('\n================== DL Started ==================')
dlstart = time.time()
deeplearnerJson = config_obj.getEionDeepLearnerConfiguration()
targetColumn = targetFeature
method = deeplearnerJson['optimizationMethod']
optimizationHyperParameter = deeplearnerJson['optimizationHyperParameter']
cvSplit = optimizationHyperParameter['trainTestCVSplit']
roundLimit=optimizationHyperParameter['roundLimit']
if 'randomMethod' in optimizationHyperParameter:
randomMethod = optimizationHyperParameter['randomMethod']
else:
randomMethod = 'Quantum'
modelType = problem_type.lower()
modelParams = deeplearnerJson['modelParams']
modelParamsFile=deeplearnerJson['modelparamsfile']
if roundLimit =="":
roundLimit=None
else:
roundLimit=int(roundLimit)
if len(self.modelSelTopFeatures) !=0:
dl_features=self.modelSelTopFeatures
best_feature_model = 'ModelBased'
elif len(self.topFeatures) != 0:
dl_features=self.topFeatures
if featureEngineeringSelector.lower() == 'true':
best_feature_model = 'DimensionalityReduction'
else:
best_feature_model = 'StatisticalBased'
elif len(self.allFeatures) != 0:
dl_features=self.allFeatures
best_feature_model = 'AllFeatures'
else:
datacolumns=list(dataFrame.columns)
datacolumns.remove(targetColumn)
dl_features =datacolumns
best_feature_model = 'AllFeatures'
log.info('-------> Features Used For Modeling: '+(str(dl_features))[:500])
if cvSplit == "":
cvSplit =None
else:
cvSplit =int(cvSplit)
xtrain = xtrain[dl_features]
xtest = xtest[dl_features]
df_test = xtest.copy()
df_test['actual'] = ytest
modelParams,modelList = config_obj.getEionDeepLearnerModelParams(modelType)
if modelType.lower() == 'classification':
scoreParam = dlobj.setScoreParams(scoreParam,modelType)
featureDataShape = xtrain.shape
model_type = 'Classification'
log.info('\n------ Training DL: Classification ----')
elif modelType.lower() == 'regression':
model_type = "Regression"
if scoreParam == 'None':
scoreParam = None
log.info('\n------ Training DL: Regression ----')
featureDataShape = xtrain.shape
model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix = dlobj.startLearning(model_type,modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,labelMaps,df_test,deployLocation,iterName,iterVersion,best_feature_model)
if model_tried != '':
model_tried += ','
model_tried += model_tried_dl
bestDL = True
if learner_status:
if score_dl <= score:
bestDL = False
log.info("\n----------- Machine Learning is Good ---")
log.info("-------> Model: "+str(model) +" Score: "+str(score))
log.info("---------------------------------------\n")
else:
os.remove(filename)
os.remove(predicted_data_file)
log.info("\n------------ Deep Learning is Good---")
log.info("-------> Model: "+str(model_dl)+" Score: "+str(score_dl))
log.info("---------------------------------------\n")
if bestDL:
model = model_dl
score = score_dl
best_model = best_model_dl
params = params_dl
filename = filename_dl
status, msg = save_csv(df_test,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
saved_model = saved_model_dl
self.matrix = '{'+performancematrix
self.trainmatrix = '{'+trainingperformancematrix
self.features = dl_features
else:
learner_type = 'ML'
shutil.rmtree(filename_dl)
dlexecutionTime=time.time() - dlstart
log.info('-------> DL Execution Time '+str(dlexecutionTime))
log.info('Status:- |... AION DL training completed')
log.info('================== Deep Completed ==================\n')
if deeplearner_status or learner_status:
log.info('Status:-|... AION training completed')
if stateTransitionStatus:
log.info('Status:-|... AION State Transition start')
learner_type = modelType = model_type = 'StateTransition'
model = 'MarkovClustering'
scoreParam = 'NA'
score = 0
from state_transition.pattern import pattern
patternobj = pattern(modelFeatures,targetFeature)
model_tried,probabilityfile,clusteringfile = patternobj.training(dataFrame,outputLocation)
deploy_status = False
visualizationstatus = False
log.info('Status:-|... AION State Transition completed')
if associationRuleStatus:
log.info('\n================== Association Rule Started ==================')
log.info('Status:-|... AION Association Rule start')
learner_type = 'AR'
modelType = 'Association Rule'
model = 'apriori'
scoreParam = 'NA'
score = 'NA'
model_type = modelType
associationRuleJson = config_obj.getEionAssociationRuleConfiguration()
modelparams,modelList = config_obj.getEionAssociationRuleModelParams()
invoiceNoFeature,itemFeature = config_obj.getAssociationRuleFeatures()
if model in modelparams:
modelparam = modelparams[model]
log.info('\n-------- Assciation Rule Start -----')
from association_rules.associationrules import associationrules
associationrulesobj = associationrules(dataFrame,associationRuleJson,modelparam,invoiceNoFeature,itemFeature)
model_tried = associationrulesobj.apply_associationRules(outputLocation)
log.info('-------- Association Rule End -----\n')
log.info('<--------Association Rule Completed----->')
log.info('Status:-|... AION Association Rule completed')
deploy_status = False
if textSimilarityStatus:
log.info('================ Text Similarity Started ====================')
log.info('Status:-|... AION Text Similarity started')
learner_type = 'Text Similarity'
model_type = 'Text Similarity'
scoreParam = 'Accuracy'
modelType = model_type
firstDocFeature = config_obj.getFirstDocumentFeature()
secondDocFeature = config_obj.getSecondDocumentFeature()
textSimilarityCongig = config_obj.getEionTextSimilarityConfig()
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
from recommender.text_similarity import eion_similarity_siamese
objTextSimilarity = eion_similarity_siamese()
model,score,matrix,trainmatrix,modeltried,saved_model,filename,padding_length,threshold = objTextSimilarity.siamese_model(dataFrame,firstDocFeature,secondDocFeature,targetFeature,textSimilarityCongig,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file)
if(self.matrix != '{'):
self.matrix += ','
self.matrix += matrix
if model_tried != '':
model_tried += ','
model_tried += modeltried
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
log.info('Status:-|... AION Text Similarity completed')
log.info('================ Text Similarity Started End====================')
if timeseriesStatus:
log.info('================ Time Series Forecasting Started ====================') #task 11997
log.info('Status:-|... AION TimeSeries Forecasting started') #task 11997
modelType = 'TimeSeriesForecasting' #task 11997
model_type = 'TimeSeriesForecasting' #task 11997
learner_type = 'TS'
modelName='ARIMA'
numericContinuousFeatures = targetFeature.split(",")
profilerJson = config_obj.getEionTimeSeriesConfiguration()
modelParams,modelList = config_obj.getEionTimeSeriesModelParams()
modelName = modelList
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
from time_series.timeseries import timeseries
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'rmse'
objTS = timeseries(profilerJson,modelParams,modelList,dataFrame,targetFeature,datetimeFeature,modelName,testPercentage,iterName,iterVersion,deployLocation,scoreParam)
modelName,model,scoreParam,score,best_model,sfeatures,errormatrix,model_tried,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scalertransformationFile = objTS.timeseries_learning(trained_data_file,predicted_data_file,deployLocation)
xtrain = dataFrame
self.matrix += errormatrix
log.info("Best model to deploy: \n"+str(model))
## Below part is for var,arima,fbprophet
try:
with open(filename, 'rb') as f:
loaded_model = pickle.load(f)
f.close()
except:
loaded_model=best_model
pass
df_l=len(dataFrame)
pred_threshold=0.1
max_pred_by_user= round((df_l)*pred_threshold)
#prediction for 24 steps or next 24 hours
if noofforecasts == -1:
noofforecasts = max_pred_by_user
no_of_prediction=noofforecasts
if (no_of_prediction > max_pred_by_user):
log.info("-------> Forecast beyond the threshold.So, Reset to Maximum:" +str(max_pred_by_user))
no_of_prediction=max_pred_by_user
noofforecasts = no_of_prediction
log.info("-------> Number of Forecast Records: "+str(no_of_prediction))
log.info("\n------ Forecast Prediction Start -------------")
if(model.lower() == 'var'):
sfeatures.remove(datetimeFeature)
self.features = sfeatures
originalPredictions=objTS.var_prediction(no_of_prediction)
log.info("-------> Predictions")
log.info(originalPredictions)
predictions=originalPredictions
forecast_output = predictions.to_json(orient='records')
else:
if (model.lower() == 'fbprophet'):
self.features = sfeatures
if not pred_freq:
sessonal_freq = 'H'
else:
sessonal_freq=pred_freq
ts_prophet_future = best_model.make_future_dataframe(periods=no_of_prediction,freq=sessonal_freq,include_history = False)
#If additional regressor given by user.
if (additional_regressors):
log.info("------->Prophet additional regressor given by user: "+str(additional_regressors))
ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]
ts_prophet_future.reset_index(drop=True)
ts_prophet_future=ts_prophet_future.dropna()
else:
pass
train_forecast = best_model.predict(ts_prophet_future)
train_forecast = train_forecast.round(2)
prophet_forecast_tail=train_forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
prophet_forecast_tail['ds'] = prophet_forecast_tail['ds'].dt.strftime('%Y-%m-%d %H:%i:%s')
log.info("------->Prophet Predictions")
log.info(prophet_forecast_tail)
forecast_output = prophet_forecast_tail.to_json(orient='records')
elif (model.lower() == 'arima'):
predictions = loaded_model.predict(n_periods=no_of_prediction)
predictions = predictions.round(2)
self.features = sfeatures
col = targetFeature.split(",")
pred = pd.DataFrame(predictions,columns=col)
predictionsdf = pred
log.info("-------> Predictions")
log.info(predictionsdf)
forecast_output = predictionsdf.to_json(orient='records')
elif (model.lower() == 'encoder_decoder_lstm_mvi_uvo'):
log.info(datetimeFeature)
log.info(sfeatures)
self.features = sfeatures
if len(sfeatures) == 1:
xt = xtrain[self.features].values
else:
xt = xtrain[self.features].values
with open(scalertransformationFile, 'rb') as f:
loaded_scaler_model = pickle.load(f)
f.close()
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
pred_data = xt
y_future = []
featuerlen = len(sfeatures)
targetColIndx = (xtrain.columns.get_loc(targetFeature))
#in case of lstm multivariate input and univariate out prediction only one sequence can be predicted
#consider the last xtrain window as input sequence
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order, featuerlen))
pred = loaded_model.predict(pdata)
pred_1d = pred.ravel()
#pred_1d = pred_1d.reshape(len(pred_1d),1)
pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, featuerlen)
pdata_2d[:,targetColIndx] = pred_1d
pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d)
predout = pred_2d_inv[:, targetColIndx]
predout = predout.reshape(len(pred_1d),1)
#y_future.append(predout)
col = targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(predout)),columns=col)
for i in range(0, len(predout)):
pred.iloc[i] = predout[i]
predictions = pred
log.info("-------> Predictions")
log.info(predictions)
forecast_output = predictions.to_json(orient='records')
elif (model.lower() == 'mlp' or model.lower() == 'lstm'):
sfeatures.remove(datetimeFeature)
self.features = sfeatures
if len(sfeatures) == 1:
xt = xtrain[self.features].values
else:
xt = xtrain[self.features].values
with open(scalertransformationFile, 'rb') as f:
loaded_scaler_model = pickle.load(f)
f.close()
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
pred_data = xt
y_future = []
for i in range(no_of_prediction):
pdata = pred_data[-lag_order:]
if model.lower() == 'mlp':
pdata = pdata.reshape((1,lag_order))
else:
pdata = pdata.reshape((1,lag_order, len(sfeatures)))
if (len(sfeatures) > 1):
pred = loaded_model.predict(pdata)
predout = loaded_scaler_model.inverse_transform(pred)
y_future.append(predout)
pred_data=np.append(pred_data,pred,axis=0)
else:
pred = loaded_model.predict(pdata)
predout = loaded_scaler_model.inverse_transform(pred)
y_future.append(predout.flatten()[-1])
pred_data = np.append(pred_data,pred)
col = targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(y_future)),columns=col)
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
predictions = pred
log.info("-------> Predictions")
log.info(predictions)
forecast_output = predictions.to_json(orient='records')
else:
pass
log.info('Status:-|... AION TimeSeries Forecasting completed') #task 11997
log.info("------ Forecast Prediction End -------------\n")
log.info('================ Time Series Forecasting Completed ================\n') #task 11997
if recommenderStatus:
log.info('\n================ Recommender Started ================ ')
log.info('Status:-|... AION Recommender started')
learner_type = 'RecommenderSystem'
model_type = 'RecommenderSystem'
modelType = model_type
model = model_type
targetColumn=''
datacolumns=list(dataFrame.columns)
self.features=datacolumns
svd_params = config_obj.getEionRecommenderConfiguration()
from recommender.item_rating import recommendersystem
recommendersystemObj = recommendersystem(modelFeatures,svd_params)
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
saved_model,rmatrix,score,trainingperformancematrix,model_tried = recommendersystemObj.recommender_model(dataFrame,outputLocation)
scoreParam = 'NA' #Task 11190
log.info('Status:-|... AION Recommender completed')
log.info('================ Recommender Completed ================\n')
if textsummarizationStatus:
log.info('\n================ text Summarization Started ================ ')
log.info('Status:-|... AION text Summarization started')
modelType = 'textsummarization'
model_type = 'textsummarization'
learner_type = 'Text Summarization'
modelName='TextSummarization'
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from scipy import spatial
model = model_type
dataLocationTS,deployLocationTS,KeyWordsTS,pathForKeywordFileTS = config_obj.getEionTextSummarizationConfig()
#print("dataLocationTS",dataLocationTS)
#print("deployLocationTS",deployLocationTS)
#print("KeyWordsTS",KeyWordsTS)
#print("pathForKeywordFileTS",pathForKeywordFileTS)
#PreTrained Model Download starts-------------------------
from appbe.dataPath import DATA_DIR
preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
models = {'glove':{50:'glove.6B.50d.w2vformat.txt'}}
supported_models = [x for y in models.values() for x in y.values()]
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
Path(modelsPath).mkdir(parents=True, exist_ok=True)
p = Path(modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
selected_model="glove.6B.50d.w2vformat.txt"
if selected_model not in modelsDownloaded:
print("Model not in folder, downloading")
import urllib.request
location = Path(modelsPath)
local_file_path = location/f"glove.6B.50d.w2vformat.txt"
urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.50d.w2vformat.txt', local_file_path)
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
model = AutoModelForSeq2SeqLM.from_pretrained("sshleifer/distilbart-cnn-12-6")
tokenizer.save_pretrained(preTrainedModellocation)
model.save_pretrained(preTrainedModellocation)
#PreTrained Model Download ends-----------------------
deployLocationData=deployLocation+"\\data\\"
modelLocation=Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'/'glove.6B.50d.w2vformat.txt'
KeyWordsTS=KeyWordsTS.replace(",", " ")
noOfKeyword = len(KeyWordsTS.split())
keywords = KeyWordsTS.split()
embeddings = {}
word = ''
with open(modelLocation, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
readData=pd.read_csv(pathForKeywordFileTS,encoding='utf-8',encoding_errors= 'replace')
for i in range(noOfKeyword):
terms=(sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6]
readData = readData.append({'Keyword': keywords[i]}, ignore_index=True)
for j in range(len(terms)):
readData = readData.append({'Keyword': terms[j]}, ignore_index=True)
deployLocationDataKwDbFile=deployLocationData+"keywordDataBase.csv"
readData.to_csv(deployLocationDataKwDbFile,encoding='utf-8',index=False)
datalocation_path=dataLocationTS
path=Path(datalocation_path)
fileList=os.listdir(path)
textExtraction = pd.DataFrame()
textExtraction['Sentences']=""
rowIndex=0
for i in range(len(fileList)):
fileName=str(datalocation_path)+"\\"+str(fileList[i])
if fileName.endswith(".pdf"):
print("\n files ",fileList[i])
from pypdf import PdfReader
reader = PdfReader(fileName)
number_of_pages = len(reader.pages)
text=""
textOutputForFile=""
OrgTextOutputForFile=""
for i in range(number_of_pages) :
page = reader.pages[i]
text1 = page.extract_text()
text=text+text1
import nltk
tokens = nltk.sent_tokenize(text)
for sentence in tokens:
sentence=sentence.replace("\n", " ")
if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) :
continue
textExtraction.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
if fileName.endswith(".txt"):
print("\n txt files",fileList[i])
data=[]
with open(fileName, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
sentences=str1.split(".")
count=0
for sentence in sentences:
count += 1
textExtraction.at[rowIndex+i,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
df=textExtraction
#print("textExtraction",textExtraction)
deployLocationDataPreProcessData=deployLocationData+"preprocesseddata.csv"
save_csv_compressed(deployLocationDataPreProcessData, df, encoding='utf-8')
df['Label']=0
kw=pd.read_csv(deployLocationDataKwDbFile,encoding='utf-8',encoding_errors= 'replace')
Keyword_list = kw['Keyword'].tolist()
for i in df.index:
for x in Keyword_list:
if (str(df["Sentences"][i])).find(x) != -1:
df['Label'][i]=1
break
deployLocationDataPostProcessData=deployLocationData+"postprocesseddata.csv"
#df.to_csv(deployLocationDataPostProcessData,encoding='utf-8')
save_csv_compressed(deployLocationDataPostProcessData, df, encoding='utf-8')
labelledData=df
train_df=labelledData
labelencoder = LabelEncoder()
train_df['Sentences'] = labelencoder.fit_transform(train_df['Sentences'])
X = train_df.drop('Label',axis=1)
y = train_df['Label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
Classifier = RandomForestClassifier(n_estimators = 10, random_state = 42)
modelTs=Classifier.fit(X, y)
import pickle
deployLocationTS=deployLocation+"\\model\\"+iterName+'_'+iterVersion+'.sav'
deployLocationTS2=deployLocation+"\\model\\"+"classificationModel.sav"
pickle.dump(modelTs, open(deployLocationTS, 'wb'))
pickle.dump(modelTs, open(deployLocationTS2, 'wb'))
print("\n trainModel Ends")
saved_model = 'textsummarization_'+iterName+'_'+iterVersion
log.info('Status:-|... AION text summarization completed')
model = learner_type
log.info('================ text summarization Completed ================\n')
if survival_analysis_status:
sa_method = config_obj.getEionanomalyModels()
labeldict = {}
log.info('\n================ SurvivalAnalysis Started ================ ')
log.info('Status:-|... AION SurvivalAnalysis started')
log.info('\n================ SurvivalAnalysis DataFrame ================ ')
log.info(dataFrame)
from survival import survival_analysis
from learner.machinelearning import machinelearning
sa_obj = survival_analysis.SurvivalAnalysis(dataFrame, preprocess_pipe, sa_method, targetFeature, datetimeFeature, filter_expression, profilerObj.train_features_type)
if sa_obj != None:
predict_json = sa_obj.learn()
if sa_method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']:
predicted = sa_obj.models[0].predict(dataFrame[datetimeFeature])
status, msg = save_csv(predicted,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
self.features = [datetimeFeature]
elif sa_method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']:
predicted = sa_obj.models[0].predict_cumulative_hazard(dataFrame)
datacolumns = list(dataFrame.columns)
targetColumn = targetFeature
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
self.features = datacolumns
score = sa_obj.score
scoreParam = 'Concordance_Index'
status,msg = save_csv(predicted,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
model = sa_method
modelType = "SurvivalAnalysis"
model_type = "SurvivalAnalysis"
modelName = sa_method
i = 1
for mdl in sa_obj.models:
saved_model = "%s_%s_%s_%d.sav"%(model_type,sa_method,iterVersion,i)
pickle.dump(mdl, open(os.path.join(deployLocation,'model',saved_model), 'wb')),
i+=1
p = 1
for plot in sa_obj.plots:
img_name = "%s_%d.png"%(sa_method,p)
img_location = os.path.join(imageFolderLocation,img_name)
plot.savefig(img_location,bbox_inches='tight')
sa_images.append(img_location)
p+=1
log.info('Status:-|... AION SurvivalAnalysis completed')
log.info('\n================ SurvivalAnalysis Completed ================ ')
if visualizationstatus:
visualizationJson = config_obj.getEionVisualizationConfiguration()
log.info('\n================== Visualization Recommendation Started ==================')
visualizer_mlstart = time.time()
from visualization.visualization import Visualization
visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfrows,self.dfcols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file)
visualizationObj.visualizationrecommandsystem()
visualizer_mlexecutionTime=time.time() - visualizer_mlstart
log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime))
log.info('================== Visualization Recommendation Started ==================\n')
if similarityIdentificationStatus or contextualSearchStatus:
datacolumns=list(dataFrame.columns)
features = modelFeatures.split(",")
if indexFeature != '' and indexFeature != 'NA':
iFeature = indexFeature.split(",")
for ifea in iFeature:
if ifea not in features:
features.append(ifea)
for x in features:
dataFrame[x] = similaritydf[x]
#get vectordb(chromadb) status selected
if similarityIdentificationStatus:
learner_type = 'similarityIdentification'
else:
learner_type = 'contextualSearch'
vecDBCosSearchStatus = config_obj.getVectorDBCosSearchStatus(learner_type)
if vecDBCosSearchStatus:
status, msg = save_chromadb(dataFrame, config_obj, trained_data_file, modelFeatures)
if not status:
log.info('Vector DB File Error: '+str(msg))
else:
status, msg = save_csv(dataFrame,trained_data_file)
if not status:
log.info('CSV File Error: '+str(msg))
self.features = datacolumns
model_type = config_obj.getAlgoName(problem_type)
model = model_type #bug 12833
model_tried = '{"Model":"'+model_type+'","FeatureEngineering":"NA","Score":"NA","ModelUncertainty":"NA"}'
modelType = learner_type
saved_model = learner_type
score = 'NA'
if deploy_status:
if str(model) != 'None':
log.info('\n================== Deployment Started ==================')
log.info('Status:-|... AION Creating Prediction Service Start')
deployer_mlstart = time.time()
deployJson = config_obj.getEionDeployerConfiguration()
deploy_name = iterName+'_'+iterVersion
from prediction_package.model_deploy import DeploymentManager
if textsummarizationStatus :
deploy = DeploymentManager()
deploy.deployTSum(deployLocation,preTrainedModellocation)
codeConfigure.save_config(deployLocation)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Deployer completed')
log.info('================== Deployment Completed ==================')
else:
deploy = DeploymentManager()
deploy.deploy_model(deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deployLocation,self.features,self.profilerAction,dataLocation,labelMaps,column_merge_flag,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer_pickle_file,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,self.method,deployFolder,iterName,iterVersion,self.wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,noofforecasts,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,deploy_config)
codeConfigure.update_config('deploy_path',os.path.join(deployLocation,'publish'))
codeConfigure.save_config(deployLocation)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Creating Prediction Service completed')
log.info('================== Deployment Completed ==================')
if not outputDriftStatus and not inputDriftStatus:
from transformations.dataProfiler import set_features
self.features = set_features(self.features,profilerObj)
self.matrix += '}'
self.trainmatrix += '}'
print(model_tried)
model_tried = eval('['+model_tried+']')
matrix = eval(self.matrix)
trainmatrix = eval(self.trainmatrix)
deployPath = deployLocation.replace(os.sep, '/')
if survival_analysis_status:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"survivalProbability":json.loads(predict_json),"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,"imageLocation":str(sa_images),"LogFile":logFileName}}
elif not timeseriesStatus:
try:
json.dumps(params)
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}}
except:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}}
else:
if config_obj.summarize:
modelType = 'Summarization'
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,'forecasts':json.loads(forecast_output),"LogFile":logFileName}}
if bool(topics) == True:
output_json['topics'] = topics
with open(outputjsonFile, 'w',encoding='utf-8') as f:
json.dump(output_json, f)
f.close()
output_json = json.dumps(output_json)
log.info('\n------------- Summary ------------')
log.info('------->No of rows & columns in data:('+str(self.dfrows)+','+str(self.dfcols)+')')
log.info('------->No of missing Features :'+str(len(self.mFeatures)))
log.info('------->Missing Features:'+str(self.mFeatures))
log.info('------->Text Features:'+str(self.textFeatures))
log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures)))
log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures))
if threshold == -1:
log.info('------->Threshold: NA')
else:
log.info('------->Threshold: '+str(threshold))
log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps))
for i in range(0,len(self.similarGroups)):
log.info('------->Similar Groups '+str(i+1)+' '+str(self.similarGroups[i]))
if((learner_type != 'TS') & (learner_type != 'AR')):
log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape))
log.info('------->Features Used for Modeling:'+str(self.features))
log.info('------->Target Feature: '+str(targetColumn))
log.info('------->Best Model Score :'+str(score))
log.info('------->Best Parameters:'+str(params))
log.info('------->Type of Model :'+str(modelType))
log.info('------->Best Model :'+str(model))
log.info('------------- Summary ------------\n')
log.info('Status:-|... AION Model Training Successfully Done')
except Exception as inst:
log.info('server code execution failed !....'+str(inst))
log.error(inst, exc_info = True)
output_json = {"status":"FAIL","message":str(inst).strip('"'),"LogFile":logFileName}
output_json = json.dumps(output_json)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
executionTime = timeit.default_timer() - startTime
log.info('\nTotal execution time(sec) :'+str(executionTime))
log.info('\n------------- Output JSON ------------')
log.info('aion_learner_status:'+str(output_json))
log.info('------------- Output JSON ------------\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return output_json
def split_into_train_test_data(self,featureData,targetData,testPercentage,log,modelType='classification'): #Unnati
log.info('\n-------------- Test Train Split ----------------')
if testPercentage == 0 or testPercentage == 100: #Unnati
xtrain=featureData
ytrain=targetData
xtest=pd.DataFrame()
ytest=pd.DataFrame()
else:
testSize= testPercentage/100 #Unnati
if modelType == 'regression':
log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42)
else:
try:
log.info('-------> Split Type: Stratify Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,random_state=42)
except Exception as ValueError:
count_unique = targetData.value_counts()
feature_with_single_count = count_unique[ count_unique == 1].index.tolist()
error = f"The least populated class in {feature_with_single_count} has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2"
raise Exception(error) from ValueError
except:
log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42)
log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') #Unnati
log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->')
log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->')
log.info('-------------- Test Train Split End ----------------\n')
return(xtrain,ytrain,xtest,ytest)
def aion_train_model(arg):
warnings.filterwarnings('ignore')
config_path = Path( arg)
with open( config_path, 'r') as f:
config = json.load( f)
log = set_log_handler(config['basic'])
log.info('************* Version - v'+AION_VERSION+' *************** \n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
log.info(msg)
try:
config_validate(arg)
valid, msg = pushRecordForTraining()
if valid:
serverObj = server()
configObj = AionConfigManager()
codeConfigure = code_configure()
codeConfigure.create_config(config)
readConfistatus,msg = configObj.readConfigurationFile(config)
if(readConfistatus == False):
raise ValueError( msg)
output = serverObj.startScriptExecution(configObj, codeConfigure, log)
else:
output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')}
output = json.dumps(output)
print( f"\naion_learner_status:{output}\n")
log.info( f"\naion_learner_status:{output}\n")
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(f"\naion_learner_status:{output}\n")
log.info( f"\naion_learner_status:{output}\n")
return output
if __name__ == "__main__":
aion_train_model( sys.argv[1])
|
aion_uncertainties.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import joblib
import time
import pandas as pd
import numpy as np
import argparse
import json
import os
import pathlib
from pathlib import Path
from uncertainties.uq_main import aionUQ
import os
from datetime import datetime
from os.path import expanduser
import platform
import logging
class run_uq:
def __init__(self,modelfeatures,modelFile,csvFile,target):
self.modelfeatures=modelfeatures
self.modelFile=modelFile
self.csvFile=csvFile
self.target=target
##UQ classification fn
def getUQclassification(self,model,ProblemName,Params):
df = pd.read_csv(self.csvFile)
# # object_cols = [col for col, col_type in df.dtypes.iteritems() if col_type == 'object'] -- Fix for python 3.8.11 update (in 2.9.0.8)
object_cols = [col for col, col_type in zip(df.columns,df.dtypes) if col_type == 'object']
df = df.drop(object_cols, axis=1)
df = df.dropna(axis=1)
df = df.reset_index(drop=True)
modelfeatures = self.modelfeatures
#tar = args.target
# target = df[tar]
y=df[self.target].values
y = y.flatten()
X = df.drop(self.target, axis=1)
try:
uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,self.target)
accuracy,uq_ece,output_jsonobject=uqObj.uqMain_BBMClassification()
except Exception as e:
print("uq error",e)
# print("UQ Classification: \n",output_jsonobject)
# print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per)
#print(output_jsonobject)
return accuracy,uq_ece,output_jsonobject
##UQ regression fn
def getUQregression(self,model,ProblemName,Params):
df = pd.read_csv(self.csvFile)
modelfeatures = self.modelfeatures
dfp = df[modelfeatures]
tar = self.target
target = df[tar]
uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression()
return total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject
def uqMain(self,model):
#print("inside uq main.\n")
reg_status=""
class_status=""
algorithm_status=""
try:
model=model
if Path(self.modelFile).is_file():
ProblemName = model.__class__.__name__
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecisionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','GradientBoostingClassifier']:
Problemtype = 'Classification'
elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor']:
Problemtype = 'Regression'
else:
Problemtype = "None"
if Problemtype.lower() == 'classification':
try:
Params = model.get_params()
accuracy,uq_ece,output = self.getUQclassification(model,ProblemName,Params)
class_status="SUCCESS"
#print(output)
except Exception as e:
print(e)
class_status="FAILED"
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
elif Problemtype.lower() == 'regression' :
try:
Params = model.get_params()
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,output = self.getUQregression(model,ProblemName,Params)
#print(uq_jsonobject)
reg_status="SUCCESS"
except Exception as e:
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
reg_status="FAILED"
else:
try:
output={}
output['Problem']="None"
output['msg']="Uncertainty Quantification not supported for this algorithm."
output = json.dumps(output)
algorithm_status="FAILED"
except:
algorithm_status="FAILED"
except Exception as e:
print(e)
reg_status="FAILED"
class_status="FAILED"
algorithm_status="FAILED"
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
return class_status,reg_status,algorithm_status,output
def aion_uq(modelFile,dataFile,features,targetfeatures):
try:
from appbe.dataPath import DEPLOY_LOCATION
uqLogLocation = os.path.join(DEPLOY_LOCATION,'logs')
try:
os.makedirs(uqLogLocation)
except OSError as e:
if (os.path.exists(uqLogLocation)):
pass
else:
raise OSError('uqLogLocation error.')
filename_uq = 'uqlog_'+str(int(time.time()))
filename_uq=filename_uq+'.log'
filepath = os.path.join(uqLogLocation, filename_uq)
print(filepath)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionUQ')
log.setLevel(logging.INFO)
log.info('************* Version - v1.7.0 *************** \n')
if isinstance(features, list):
modelfeatures = features
else:
if ',' in features:
modelfeatures = [x.strip() for x in features.split(',')]
else:
modelfeatures = features.split(',')
model = joblib.load(modelFile)
uqobj = run_uq(modelfeatures,modelFile,dataFile,targetfeatures)
class_status,reg_status,algorithm_status,output=uqobj.uqMain(model)
if (class_status.lower() == 'failed'):
log.info('uq classifiction failed./n')
elif (class_status.lower() == 'success'):
log.info('uq classifiction success./n')
else:
log.info('uq classifiction not used../n')
if (reg_status.lower() == 'failed'):
log.info('uq regression failed./n')
elif (reg_status.lower() == 'success'):
log.info('uq regression success./n')
else:
log.info('uq regression not used./n')
if (algorithm_status.lower() == 'failed'):
log.info('Problem type issue, UQ only support classification and regression. May be selected algorithm not supported by Uncertainty Quantification currently./n')
except Exception as e:
log.info('uq test failed.n'+str(e))
#print(e)
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
return(output)
#Sagemaker main fn call
if __name__=='__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('savFile')
parser.add_argument('csvFile')
parser.add_argument('features')
parser.add_argument('target')
args = parser.parse_args()
home = expanduser("~")
if platform.system() == 'Windows':
uqLogLocation = os.path.join(home,'AppData','Local','HCLT','AION','uqLogs')
else:
uqLogLocation = os.path.join(home,'HCLT','AION','uqLogs')
try:
os.makedirs(uqLogLocation)
except OSError as e:
if (os.path.exists(uqLogLocation)):
pass
else:
raise OSError('uqLogLocation error.')
# self.sagemakerLogLocation=str(sagemakerLogLocation)
filename_uq = 'uqlog_'+str(int(time.time()))
filename_uq=filename_uq+'.log'
# filename = 'mlopsLog_'+Time()
filepath = os.path.join(uqLogLocation, filename_uq)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionUQ')
log.setLevel(logging.DEBUG)
if ',' in args.features:
args.features = [x.strip() for x in args.features.split(',')]
else:
args.features = args.features.split(',')
modelFile = args.savFile
modelfeatures = args.features
csvFile = args.csvFile
target=args.target
model = joblib.load(args.savFile)
##Main uq function call
uqobj = run_uq(modelfeatures,modelFile,csvFile,target)
class_status,reg_status,algorithm_status,output=uqobj.uqMain(model)
if (class_status.lower() == 'failed'):
log.info('uq classifiction failed./n')
elif (class_status.lower() == 'success'):
log.info('uq classifiction success./n')
else:
log.info('uq classifiction not used../n')
if (reg_status.lower() == 'failed'):
log.info('uq regression failed./n')
elif (reg_status.lower() == 'success'):
log.info('uq regression success./n')
else:
log.info('uq regression not used./n')
if (algorithm_status.lower() == 'failed'):
msg = 'Uncertainty Quantification not supported for this algorithm'
log.info('Algorithm not supported by Uncertainty Quantification./n')
output = {'Problem':'None','msg':str(msg)}
output = json.dumps(output)
except Exception as e:
log.info('uq test failed.n'+str(e))
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
#print(e)
print(output) |
aion_telemetry.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import requests
import json
import os
from datetime import datetime
import socket
import getmac
def telemetry_data(operation,Usecase,data):
now = datetime.now()
ID = datetime.timestamp(now)
record_date = now.strftime("%y-%m-%d %H:%M:%S")
try:
user = os.getlogin()
except:
user = 'NA'
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
item = {}
item['ID'] = str(int(ID))
item['record_date'] = record_date
item['UseCase'] = Usecase
item['user'] = str(user)
item['operation'] = operation
item['remarks'] = data
item['hostname'] = computername
item['macaddress'] = macaddress
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
record = {}
record['TableName'] = 'AION_OPERATION'
record['Item'] = item
record = json.dumps(record)
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
check_telemetry_file()
except Exception as inst:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
f=open(filename, "a+")
f.write(record+'\n')
f.close()
def check_telemetry_file():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
if(os.path.isfile(file_path)):
f = open(file_path, 'r')
file_content = f.read()
f.close()
matched_lines = file_content.split('\n')
write_lines = []
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
for record in matched_lines:
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
except:
write_lines.append(record)
f = open(file_path, "a")
f.seek(0)
f.truncate()
for record in write_lines:
f.write(record+'\n')
f.close()
return True
else:
return True |
aion_publish.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import shutil
import subprocess
import sys
import glob
import json
def publish(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
model = jsonData['modelName']
version = jsonData['modelVersion']
deployFolder = jsonData['deployLocation']
model = model.replace(" ", "_")
deployedPath = os.path.join(deployFolder,model+'_'+version)
deployedPath = os.path.join(deployedPath,'WHEELfile')
whlfilename='na'
if os.path.isdir(deployedPath):
for file in os.listdir(deployedPath):
if file.endswith(".whl"):
whlfilename = os.path.join(deployedPath,file)
if whlfilename != 'na':
subprocess.check_call([sys.executable, "-m", "pip", "uninstall","-y",model])
subprocess.check_call([sys.executable, "-m", "pip", "install", whlfilename])
status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status == 'Running':
service_stop(json.dumps(jsonData))
service_start(json.dumps(jsonData))
output_json = {'status':"SUCCESS"}
output_json = json.dumps(output_json)
else:
output_json = {'status':'Error','Msg':'Installation Package not Found'}
output_json = json.dumps(output_json)
return(output_json)
def check_service_running(model,serviceFolder):
model = model.replace(" ", "_")
filename = model+'_service.py'
modelservicefile = os.path.join(serviceFolder,filename)
status = 'File Not Exist'
ip = ''
port = ''
pid = ''
if os.path.exists(modelservicefile):
status = 'File Exist'
import psutil
for proc in psutil.process_iter():
pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections'])
if 'python' in pinfo['name']:
if filename in pinfo['cmdline'][1]:
status = 'Running'
pid = pinfo['pid']
for x in pinfo['connections']:
ip = x.laddr.ip
port = x.laddr.port
return(status,pid,ip,port)
def service_stop(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status == 'Running':
import psutil
p = psutil.Process(int(pid))
p.terminate()
time.sleep(2)
output_json = {'status':'SUCCESS'}
output_json = json.dumps(output_json)
return(output_json)
def service_start(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
model = jsonData['modelName']
version = jsonData['modelVersion']
ip = jsonData['ip']
port = jsonData['port']
deployFolder = jsonData['deployLocation']
serviceFolder = jsonData['serviceFolder']
model = model.replace(" ", "_")
deployLocation = os.path.join(deployFolder,model+'_'+version)
org_service_file = os.path.abspath(os.path.join(os.path.dirname(__file__),'model_service.py'))
filename = model+'_service.py'
modelservicefile = os.path.join(serviceFolder,filename)
status = 'File Not Exist'
if os.path.exists(modelservicefile):
status = 'File Exist'
r = ([line.split() for line in subprocess.check_output("tasklist").splitlines()])
for i in range(len(r)):
if filename in r[i]:
status = 'Running'
if status == 'File Not Exist':
shutil.copy(org_service_file,modelservicefile)
with open(modelservicefile, 'r+') as file:
content = file.read()
file.seek(0, 0)
line = 'from '+model+' import aion_performance'
file.write(line+"\n")
line = 'from '+model+' import aion_drift'
file.write(line+ "\n")
line = 'from '+model+' import featureslist'
file.write(line+ "\n")
line = 'from '+model+' import aion_prediction'
file.write(line+ "\n")
file.write(content)
file.close()
status = 'File Exist'
if status == 'File Exist':
status,pid,ipold,portold = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status != 'Running':
command = "python "+modelservicefile+' '+str(port)+' '+str(ip)
os.system('start cmd /c "'+command+'"')
time.sleep(2)
status = 'Running'
output_json = {'status':'SUCCESS','Msg':status}
output_json = json.dumps(output_json)
return(output_json)
if __name__ == "__main__":
aion_publish(sys.argv[1])
|
aion_text_summarizer.py | import json
import logging
import os
import shutil
import time
import sys
from sys import platform
from distutils.util import strtobool
from config_manager.pipeline_config import AionConfigManager
from summarizer import Summarizer
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class AionTextManager:
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
def readTextfile(self,dataPath):
#dataPath=self.[baisc][]
file = open(dataPath, "r")
data = file.read()
return data
#print(data)
def generateSummary(self,data,algo,stype):
bert_model = Summarizer()
if stype == "large":
bert_summary = ''.join(bert_model(data, min_length=300))
return(bert_summary)
elif stype == "medium":
bert_summary = ''.join(bert_model(data, min_length=150))
return(bert_summary)
elif stype == "small":
bert_summary = ''.join(bert_model(data, min_length=60))
return(bert_summary)
def aion_textsummary(arg):
Obj = AionTextManager()
configObj = AionConfigManager()
readConfistatus,msg = configObj.readConfigurationFile(arg)
dataPath = configObj.getTextlocation()
text_data = Obj.readTextfile(dataPath)
getAlgo, getMethod = configObj.getTextSummarize()
summarize = Obj.generateSummary(text_data, getAlgo, getMethod)
output = {'status':'Success','summary':summarize}
output_json = json.dumps(output)
return(output_json)
if __name__ == "__main__":
aion_textsummary(sys.argv[1])
|
__init__.py | import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
|
aion_service.py | #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
#from SocketServer import ThreadingMixIn
from socketserver import ThreadingMixIn
from functools import partial
from http.server import SimpleHTTPRequestHandler, test
import base64
from appbe.dataPath import DEPLOY_LOCATION
'''
from augustus.core.ModelLoader import ModelLoader
from augustus.strict import modelLoader
'''
import pandas as pd
import os,sys
from os.path import expanduser
import platform
import numpy as np
import configparser
import threading
import subprocess
import argparse
from functools import partial
import re
import cgi
from datetime import datetime
import json
import sys
from datetime import datetime
user_records = {}
class LocalModelData(object):
models = {}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
username = kwargs.pop("username")
password = kwargs.pop("password")
self._auth = base64.b64encode(f"{username}:{password}".encode()).decode()
super().__init__(*args)
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="Test"')
self.send_header("Content-type", "text/html")
self.end_headers()
def do_POST(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
if self.headers.get("Authorization") == None:
self.do_AUTHHEAD()
resp = "Authentication Failed: Auth Header Not Present"
resp=resp.encode()
self.wfile.write(resp)
elif self.headers.get("Authorization") == "Basic " + self._auth:
length = int(self.headers.get('content-length'))
#data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
data = self.rfile.read(length)
#print(data)
#keyList = list(data.keys())
#print(keyList[0])
model = self.path.split('/')[-2]
operation = self.path.split('/')[-1]
home = expanduser("~")
#data = json.loads(data)
dataStr = data
model_path = os.path.join(DEPLOY_LOCATION,model)
isdir = os.path.isdir(model_path)
if isdir:
if operation.lower() == 'predict':
predict_path = os.path.join(model_path,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'spredict':
try:
predict_path = os.path.join(model_path,'aion_spredict.py')
print(predict_path)
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
except Exception as e:
print(e)
elif operation.lower() == 'features':
predict_path = os.path.join(model_path,'featureslist.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'explain':
predict_path = os.path.join(model_path,'explainable_ai.py')
outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'monitoring':
predict_path = os.path.join(model_path,'aion_ipdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'performance':
predict_path = os.path.join(model_path,'aion_opdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'pattern_anomaly_predict':
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
configfilename = os.path.join(model_path,'datadetails.json')
filename = os.path.join(model_path,'clickstream.json')
clusterfilename = os.path.join(model_path,'stateClustering.csv')
probfilename = os.path.join(model_path,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
outputStr = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}'
elif operation.lower() == 'pattern_anomaly_settings':
data = json.loads(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
filename = os.path.join(model_path,'clickstream.json')
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
outputStr = '{"Status":"SUCCESS"}'
else:
outputStr = "{'Status':'Error','Msg':'Operation not supported'}"
else:
outputStr = "{'Status':'Error','Msg':'Model Not Present'}"
resp = outputStr
resp=resp+"\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
self.do_AUTHHEAD()
self.wfile.write(self.headers.get("Authorization").encode())
resp = "Authentication Failed"
resp=resp.encode()
self.wfile.write(resp)
else:
print("python ==> else1")
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print("PYTHON ######## REQUEST ####### ENDED")
return
def getModelFeatures(self,modelSignature):
datajson = {'Body':'Gives the list of features'}
home = expanduser("~")
if platform.system() == 'Windows':
predict_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'featureslist.py')
else:
predict_path = os.path.join(home,'HCLT','AION','target',modelSignature,'featureslist.py')
if(os.path.isfile(predict_path)):
outputStr = subprocess.check_output([sys.executable,predict_path])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'features:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
displaymsg = outputStr
#displaymsg = json.dumps(displaymsg)
return(True,displaymsg)
else:
displaymsg = "{'status':'ERROR','msg':'Unable to fetch featuers'}"
return(False,displaymsg)
def getFeatures(self,modelSignature):
datajson = {'Body':'Gives the list of features'}
urltext = '/AION/UseCase_Version/features'
if modelSignature != '':
status,displaymsg = self.getModelFeatures(modelSignature)
if status:
urltext = '/AION/'+modelSignature+'/features'
else:
displaymsg = json.dumps(datajson)
else:
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Output: {displaymsg}.
""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def features_help(self,modelSignature):
home = expanduser("~")
if platform.system() == 'Windows':
display_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'display.json')
else:
display_path = os.path.join(home,'HCLT','AION','target',modelSignature,'display.json')
#display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json')
datajson = {'Body':'Data Should be in JSON Format'}
if(os.path.isfile(display_path)):
with open(display_path) as file:
config = json.load(file)
file.close()
datajson={}
for feature in config['numericalFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Numeric Value'
for feature in config['nonNumericFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Category Value'
for feature in config['textFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Category Value'
displaymsg = json.dumps(datajson)
return(displaymsg)
def predict_help(self,modelSignature):
if modelSignature != '':
displaymsg = self.features_help(modelSignature)
urltext = '/AION/'+modelSignature+'/predict'
else:
datajson = {'Body':'Data Should be in JSON Format'}
displaymsg = json.dumps(datajson)
urltext = '/AION/UseCase_Version/predict'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def performance_help(self,modelSignature):
if modelSignature != '':
urltext = '/AION/'+modelSignature+'/performance'
else:
urltext = '/AION/UseCase_Version/performance'
datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: HTML File Path.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def monitoring_help(self,modelSignature):
if modelSignature != '':
urltext = '/AION/'+modelSignature+'/monitoring'
else:
urltext = '/AION/UseCase_Version/monitoring'
datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: Affected Columns. HTML File Path.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def explain_help(self,modelSignature):
if modelSignature != '':
displaymsg = self.features_help(modelSignature)
urltext = '/AION/'+modelSignature+'/explain'
else:
datajson = {'Body':'Data Should be in JSON Format'}
displaymsg = json.dumps(datajson)
urltext = '/AION/UseCase_Version/explain'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: anchor (Local Explanation),prediction,forceplot,multidecisionplot.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def help_text(self,modelSignature):
predict_help = self.predict_help(modelSignature)
explain_help = self.explain_help(modelSignature)
features_help = self.getFeatures(modelSignature)
monitoring_help = self.monitoring_help(modelSignature)
performance_help = self.performance_help(modelSignature)
msg="""
Following URL:
Prediction
{predict_help}
Local Explaination
{explain_help}
Features
{features_help}
Monitoring
{monitoring_help}
Performance
{performance_help}
""".format(predict_help=predict_help,explain_help=explain_help,features_help=features_help,monitoring_help=monitoring_help,performance_help=performance_help)
return msg
def do_GET(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
helplist = self.path.split('/')[-1]
print(helplist)
if helplist.lower() == 'help':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.help_text(model)
elif helplist.lower() == 'predict':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.predict_help(model)
elif helplist.lower() == 'explain':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.explain_help(model)
elif helplist.lower() == 'monitoring':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.monitoring_help(model)
elif helplist.lower() == 'performance':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.performance_help(model)
elif helplist.lower() == 'features':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
status,msg = self.getModelFeatures(model)
else:
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =helplist
msg = self.help_text(model)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port,username,password):
handler_class = partial(HTTPRequestHandler,username=username,password=password,)
self.server = ThreadedHTTPServer((ip,port), handler_class)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
def start_server(ip,port,username,password):
server = SimpleHttpServer(ip,int(port),username,password)
print('HTTP Server Running...........')
server.start()
server.waitForThread()
|
aion_online_pipeline.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime, time, timeit
import logging
logging.getLogger('tensorflow').disabled = True
import shutil
import warnings
from config_manager.online_pipeline_config import OTAionConfigManager
from records import pushrecords
import logging
import mlflow
from pathlib import Path
from pytz import timezone
def pushRecordForOnlineTraining():
try:
from appbe.pages import getversion
status,msg = pushrecords.enterRecord(AION_VERSION)
except Exception as e:
print("Exception", e)
status = False
msg = str(e)
return status,msg
def mlflowSetPath(path,experimentname):
import mlflow
url = "file:" + str(Path(path).parent.parent) + "/mlruns"
mlflow.set_tracking_uri(url)
mlflow.set_experiment(str(experimentname))
class server():
def __init__(self):
self.response = None
self.dfNumCols=0
self.dfNumRows=0
self.features=[]
self.mFeatures=[]
self.emptyFeatures=[]
self.vectorizerFeatures=[]
self.wordToNumericFeatures=[]
self.profilerAction = []
self.targetType = ''
self.matrix1='{'
self.matrix2='{'
self.matrix='{'
self.trainmatrix='{'
self.numericalFeatures=[]
self.nonNumericFeatures=[]
self.similarGroups=[]
self.method = 'NA'
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.modelSelTopFeatures=[]
self.topFeatures=[]
self.allFeatures=[]
def startScriptExecution(self, config_obj):
rowfilterexpression = ''
grouperbyjson = ''
model_tried=''
learner_type = ''
topics = {}
numericContinuousFeatures=''
discreteFeatures=''
threshold=-1
targetColumn = ''
categoricalFeatures=''
dataFolderLocation = ''
original_data_file = ''
profiled_data_file = ''
trained_data_file = ''
predicted_data_file=''
featureReduction = 'False'
reduction_data_file=''
params={}
score = 0
labelMaps={}
featureDataShape=[]
self.riverModels = []
self.riverAlgoNames = ['Online Logistic Regression', 'Online Softmax Regression', 'Online Decision Tree Classifier', 'Online KNN Classifier', 'Online Linear Regression', 'Online Bayesian Linear Regression', 'Online Decision Tree Regressor','Online KNN Regressor']
#ConfigSettings
iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings()
scoreParam = config_obj.getScoringCreteria()
datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures()
iterName = iterName.replace(" ", "_")
deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile = config_obj.createDeploymentFolders(deployLocation,iterName,iterVersion)
#Mlflow
mlflowSetPath(deployLocation,iterName+'_'+iterVersion)
#Logger
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('************* Version - v2.2.5 *************** \n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
log.info(msg)
startTime = timeit.default_timer()
try:
output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}}
#ConfigSetting
problemType,targetFeature,profilerStatus,selectorStatus,learnerStatus,visualizationstatus,deployStatus = config_obj.getModulesDetails()
selectorStatus = False
if(problemType.lower() in ['classification','regression']):
if(targetFeature == ''):
output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"}
return output
#DataReading
from transformations.dataReader import dataReader
objData = dataReader()
if os.path.isfile(dataLocation):
dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier)
dataFrame.rename(columns=lambda x:x.strip(), inplace=True)
#FilterDataframe
filter = config_obj.getfilter()
if filter != 'NA':
dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame)
#GroupDataframe
timegrouper = config_obj.gettimegrouper()
grouping = config_obj.getgrouper()
if grouping != 'NA':
dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame)
elif timegrouper != 'NA':
dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame)
#KeepOnlyModelFtrs
dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature)
log.info('\n-------> First Ten Rows of Input Data: ')
log.info(dataFrame.head(10))
self.dfNumRows=dataFrame.shape[0]
self.dfNumCols=dataFrame.shape[1]
dataLoadTime = timeit.default_timer() - startTime
log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime))
if profilerStatus:
log.info('\n================== Data Profiler has started ==================')
log.info('Status:-|... AION feature transformation started')
dp_mlstart = time.time()
profilerJson = config_obj.getEionProfilerConfigurarion()
log.info('-------> Input dataFrame(5 Rows): ')
log.info(dataFrame.head(5))
log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape))
from incremental.incProfiler import incProfiler
incProfilerObj = incProfiler()
dataFrame,targetColumn,self.mFeatures,self.numericalFeatures,self.nonNumericFeatures,labelMaps,self.configDict,self.textFeatures,self.emptyFeatures,self.wordToNumericFeatures = incProfilerObj.startIncProfiler(dataFrame,profilerJson,targetFeature,deployLocation,problemType)
self.features = self.configDict['allFtrs']
log.info('-------> Data Frame Post Data Profiling(5 Rows): ')
log.info(dataFrame.head(5))
log.info('Status:-|... AION feature transformation completed')
dp_mlexecutionTime=time.time() - dp_mlstart
log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime))
log.info('================== Data Profiling completed ==================\n')
dataFrame.to_csv(profiled_data_file,index=False)
selectorStatus = False
if learnerStatus:
log.info('Status:-|... AION Learner data preparation started')
ldp_mlstart = time.time()
testPercentage = config_obj.getAIONTestTrainPercentage()
balancingMethod = config_obj.getAIONDataBalancingMethod()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
modelType = problemType.lower()
targetColumn = targetFeature
if modelType == "na":
if self.targetType == 'categorical':
modelType = 'classification'
elif self.targetType == 'continuous':
modelType = 'regression'
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
features =datacolumns
featureData = dataFrame[features]
if targetColumn != '':
targetData = dataFrame[targetColumn]
xtrain,ytrain,xtest,ytest = mlobj.split_into_train_test_data(featureData,targetData,testPercentage,modelType)
categoryCountList = []
if modelType == 'classification':
if(mlobj.checkForClassBalancing(ytrain) >= 1):
xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod)
valueCount=targetData.value_counts()
categoryCountList=valueCount.tolist()
ldp_mlexecutionTime=time.time() - ldp_mlstart
log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime))
log.info('Status:-|... AION Learner data preparation completed')
if learnerStatus:
log.info('\n================== ML Started ==================')
log.info('Status:-|... AION training started')
log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum()))
mlstart = time.time()
log.info('-------> Target Problem Type:'+ self.targetType)
learner_type = 'ML'
learnerJson = config_obj.getEionLearnerConfiguration()
log.info('-------> Target Model Type:'+ modelType)
modelParams,modelList = config_obj.getEionLearnerModelParams(modelType)
if(modelType == 'regression'):
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
if(modelType == 'classification'):
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
scoreParam = scoreParam.lower()
from incremental.incMachineLearning import incMachineLearning
incMlObj = incMachineLearning(mlobj)
self.configDict['riverModel'] = False
status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=incMlObj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,self.features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps)
if model in self.riverAlgoNames:
self.configDict['riverModel'] = True
if(self.matrix != '{'):
self.matrix += ','
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
self.matrix += matrix
mlexecutionTime=time.time() - mlstart
log.info('-------> Total ML Execution Time '+str(mlexecutionTime))
log.info('Status:-|... AION training completed')
log.info('================== ML Completed ==================\n')
if visualizationstatus:
visualizationJson = config_obj.getEionVisualizationConfiguration()
log.info('Status:-|... AION Visualizer started')
visualizer_mlstart = time.time()
from visualization.visualization import Visualization
visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfNumRows,self.dfNumCols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file)
visualizationObj.visualizationrecommandsystem()
visualizer_mlexecutionTime=time.time() - visualizer_mlstart
log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime))
log.info('Status:-|... AION Visualizer completed')
try:
os.remove(os.path.join(deployLocation,'aion_xai.py'))
except:
pass
if deployStatus:
if str(model) != 'None':
log.info('\n================== Deployment Started ==================')
log.info('Status:-|... AION Deployer started')
deployPath = deployLocation
deployer_mlstart = time.time()
src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','useCaseFiles')
shutil.copy2(os.path.join(src,'incBatchLearning.py'),deployPath)
os.rename(os.path.join(deployPath,'incBatchLearning.py'),os.path.join(deployPath,'aion_inclearning.py'))
shutil.copy2(os.path.join(src,'incBatchPrediction.py'),deployPath)
os.rename(os.path.join(deployPath,'incBatchPrediction.py'),os.path.join(deployPath,'aion_predict.py'))
self.configDict['modelName'] = str(model)
self.configDict['modelParams'] = params
self.configDict['problemType'] = problemType.lower()
self.configDict['score'] = score
self.configDict['metricList'] = []
self.configDict['metricList'].append(score)
self.configDict['trainRowsList'] = []
self.configDict['trainRowsList'].append(featureDataShape[0])
self.configDict['scoreParam'] = scoreParam
self.configDict['partialFit'] = 0
with open(os.path.join(deployLocation,'production', 'Config.json'), 'w', encoding='utf8') as f:
json.dump(self.configDict, f, ensure_ascii=False)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Batch Deployment completed')
log.info('================== Deployment Completed ==================')
# self.features = profilerObj.set_features(self.features,self.textFeatures,self.vectorizerFeatures)
self.matrix += '}'
self.trainmatrix += '}'
matrix = eval(self.matrix)
trainmatrix = eval(self.trainmatrix)
model_tried = eval('['+model_tried+']')
try:
json.dumps(params)
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}}
except:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}}
print(output_json)
if bool(topics) == True:
output_json['topics'] = topics
with open(outputjsonFile, 'w') as f:
json.dump(output_json, f)
output_json = json.dumps(output_json)
log.info('\n------------- Summary ------------')
log.info('------->No of rows & columns in data:('+str(self.dfNumRows)+','+str(self.dfNumCols)+')')
log.info('------->No of missing Features :'+str(len(self.mFeatures)))
log.info('------->Missing Features:'+str(self.mFeatures))
log.info('------->Text Features:'+str(self.textFeatures))
log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures)))
log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures))
if threshold == -1:
log.info('------->Threshold: NA')
else:
log.info('------->Threshold: '+str(threshold))
log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps))
if((learner_type != 'TS') & (learner_type != 'AR')):
log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape))
log.info('------->Features Used for Modeling:'+str(self.features))
log.info('------->Target Feature: '+str(targetColumn))
log.info('------->Best Model Score :'+str(score))
log.info('------->Best Parameters:'+str(params))
log.info('------->Type of Model :'+str(modelType))
log.info('------->Best Model :'+str(model))
log.info('------------- Summary ------------\n')
except Exception as inst:
log.info('server code execution failed !....'+str(inst))
output_json = {"status":"FAIL","message":str(inst).strip('"')}
output_json = json.dumps(output_json)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
executionTime = timeit.default_timer() - startTime
log.info('\nTotal execution time(sec) :'+str(executionTime))
log.info('\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output_json))
log.info('------------- Output JSON ------------\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return output_json
def aion_ot_train_model(arg):
warnings.filterwarnings('ignore')
try:
valid, msg = pushRecordForOnlineTraining()
if valid:
serverObj = server()
configObj = OTAionConfigManager()
jsonPath = arg
readConfistatus,msg = configObj.readConfigurationFile(jsonPath)
if(readConfistatus == False):
output = {"status":"FAIL","message":str(msg).strip('"')}
output = json.dumps(output)
print("\n")
print("aion_learner_status:",output)
print("\n")
return output
output = serverObj.startScriptExecution(configObj)
else:
output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')}
output = json.dumps(output)
print("\n")
print("aion_learner_status:",output)
print("\n")
return output
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print("\n")
print("aion_learner_status:",output)
print("\n")
return output
if __name__ == "__main__":
aion_ot_train_model(sys.argv[1])
|
aion_mlac.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import os
from pathlib import Path
os.chdir(Path(__file__).parent)
import json
import shutil
from mlac.timeseries import app as ts_app
from mlac.ml import app as ml_app
import traceback
def create_test_file(config):
code_file = 'aionCode.py'
text = """
from pathlib import Path
import subprocess
import sys
import json
import argparse
def run_pipeline(data_path):
print('Data Location:', data_path)
cwd = Path(__file__).parent
monitor_file = str(cwd/'ModelMonitoring'/'{code_file}')
load_file = str(cwd/'DataIngestion'/'{code_file}')
transformer_file = str(cwd/'DataTransformation'/'{code_file}')
selector_file = str(cwd/'FeatureEngineering'/'{code_file}')
train_folder = cwd
register_file = str(cwd/'ModelRegistry'/'{code_file}')
deploy_file = str(cwd/'ModelServing'/'{code_file}')
print('Running modelMonitoring')
cmd = [sys.executable, monitor_file, '-i', data_path]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running dataIngestion')
cmd = [sys.executable, load_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running DataTransformation')
cmd = [sys.executable, transformer_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running FeatureEngineering')
cmd = [sys.executable, selector_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
train_models = [f for f in train_folder.iterdir() if 'ModelTraining' in f.name]
for model in train_models:
print('Running',model.name)
cmd = [sys.executable, str(model/'{code_file}')]
train_result = subprocess.check_output(cmd)
train_result = train_result.decode('utf-8')
print(train_result)
print('Running ModelRegistry')
cmd = [sys.executable, register_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running ModelServing')
cmd = [sys.executable, deploy_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputPath', help='path of the input data')
args = parser.parse_args()
if args.inputPath:
filename = args.inputPath
else:
filename = r"{filename}"
try:
print(run_pipeline(filename))
except Exception as e:
print(e)
""".format(filename=config['dataLocation'],search='{"Status":',code_file=code_file)
deploy_path = Path(config["deploy_path"])/'MLaC'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"run_pipeline.py"
with open(py_file, "w") as f:
f.write(text)
def is_module_in_req_file(mod, folder):
status = False
if (Path(folder)/'requirements.txt').is_file():
with open(folder/'requirements.txt', 'r') as f:
status = mod in f.read()
return status
def copy_local_modules(config):
deploy_path = Path(config["deploy_path"])
local_modules_location = config.get("local_modules_location", None)
if local_modules_location:
folder_loc = local_modules_location
else:
folder_loc = Path(__file__).parent/'local_modules'
if not folder_loc.exists():
folder_loc = None
if folder_loc:
file = folder_loc/'config.json'
if file.exists():
with open(file, 'r') as f:
data = json.load(f)
for key, values in data.items():
local_module = folder_loc/key
if local_module.exists():
for folder in values:
target_folder = Path(deploy_path)/'MLaC'/folder
if target_folder.is_dir():
if is_module_in_req_file(key, target_folder):
shutil.copy(local_module, target_folder)
def validate(config):
error = ''
if 'error' in config.keys():
error = config['error']
return error
def generate_mlac_code(config):
with open(config, 'r') as f:
config = json.load(f)
error = validate(config)
if error:
raise ValueError(error)
if config['problem_type'] in ['classification','regression']:
return generate_mlac_ML_code(config)
elif config['problem_type'].lower() == 'timeseriesforecasting': #task 11997
return generate_mlac_TS_code(config)
def generate_mlac_ML_code(config):
try:
ml_app.run_loader(config)
ml_app.run_transformer(config)
ml_app.run_selector(config)
ml_app.run_trainer(config)
ml_app.run_register(config)
ml_app.run_deploy(config)
ml_app.run_drift_analysis(config)
copy_local_modules(config)
create_test_file(config)
status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')}
except Exception as Inst:
status = {'Status':'Failure','msg':str(Inst)}
traceback.print_exc()
status = json.dumps(status)
return(status)
def generate_mlac_TS_code(config):
try:
ts_app.run_loader(config)
ts_app.run_transformer(config)
ts_app.run_selector(config)
ts_app.run_trainer(config)
ts_app.run_register(config)
ts_app.run_deploy(config)
ts_app.run_drift_analysis(config)
create_test_file(config)
status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')}
except Exception as Inst:
status = {'Status':'Failure','msg':str(Inst)}
traceback.print_exc()
status = json.dumps(status)
return(status) |
aion_gluon.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
#from autogluon.tabular import TabularDataset, TabularPredictor
#from autogluon.core.utils.utils import setup_outputdir
#from autogluon.core.utils.loaders import load_pkl
#from autogluon.core.utils.savers import save_pkl
import datetime, time, timeit
from datetime import datetime as dt
import os.path
import json
import io
import shutil
import sys
#from Gluon_MultilabelPredictor import MultilabelPredictor
class MultilabelPredictor():
""" Tabular Predictor for predicting multiple columns in table.
Creates multiple TabularPredictor objects which you can also use individually.
You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)`
Parameters
----------
labels : List[str]
The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object.
path : str
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors.
problem_types : List[str]
The ith element is the `problem_type` for the ith TabularPredictor stored in this object.
eval_metrics : List[str]
The ith element is the `eval_metric` for the ith TabularPredictor stored in this object.
consider_labels_correlation : bool
Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others.
If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion).
Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels.
kwargs :
Arguments passed into the initialization of each TabularPredictor.
"""
multi_predictor_file = 'multilabel_predictor.pkl'
def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs):
if len(labels) < 2:
raise ValueError("MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column).")
self.path = setup_outputdir(path, warn_if_exist=False)
self.labels = labels
#print(self.labels)
self.consider_labels_correlation = consider_labels_correlation
self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label
if eval_metrics is None:
self.eval_metrics = {}
else:
self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))}
problem_type = None
eval_metric = None
for i in range(len(labels)):
label = labels[i]
path_i = self.path + "Predictor_" + label
if problem_types is not None:
problem_type = problem_types[i]
if eval_metrics is not None:
eval_metric = self.eval_metrics[i]
self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs)
def fit(self, train_data, tuning_data=None, **kwargs):
""" Fits a separate TabularPredictor to predict each of the labels.
Parameters
----------
train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame
See documentation for `TabularPredictor.fit()`.
kwargs :
Arguments passed into the `fit()` call for each TabularPredictor.
"""
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
train_data_og = train_data.copy()
if tuning_data is not None:
tuning_data_og = tuning_data.copy()
save_metrics = len(self.eval_metrics) == 0
for i in range(len(self.labels)):
label = self.labels[i]
predictor = self.get_predictor(label)
if not self.consider_labels_correlation:
labels_to_drop = [l for l in self.labels if l!=label]
else:
labels_to_drop = [self.labels[j] for j in range(i+1,len(self.labels))]
train_data = train_data_og.drop(labels_to_drop, axis=1)
if tuning_data is not None:
tuning_data = tuning_data_og.drop(labels_to_drop, axis=1)
print(f"Fitting TabularPredictor for label: {label} ...")
predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs)
self.predictors[label] = predictor.path
if save_metrics:
self.eval_metrics[label] = predictor.eval_metric
self.save()
def eval_metrics(self):
return(self.eval_metrics)
def predict(self, data, **kwargs):
""" Returns DataFrame with label columns containing predictions for each label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`.
kwargs :
Arguments passed into the predict() call for each TabularPredictor.
"""
return self._predict(data, as_proba=False, **kwargs)
def predict_proba(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`.
kwargs :
Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call).
"""
return self._predict(data, as_proba=True, **kwargs)
def evaluate(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`.
kwargs :
Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call).
"""
data = self._get_data(data)
eval_dict = {}
for label in self.labels:
print(f"Evaluating TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
eval_dict[label] = predictor.evaluate(data, **kwargs)
if self.consider_labels_correlation:
data[label] = predictor.predict(data, **kwargs)
return eval_dict
def save(self):
""" Save MultilabelPredictor to disk. """
for label in self.labels:
if not isinstance(self.predictors[label], str):
self.predictors[label] = self.predictors[label].path
save_pkl.save(path=self.path+self.multi_predictor_file, object=self)
print(f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path}')")
@classmethod
def load(cls, path):
""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """
path = os.path.expanduser(path)
if path[-1] != os.path.sep:
path = path + os.path.sep
return load_pkl.load(path=path+cls.multi_predictor_file)
def get_predictor(self, label):
""" Returns TabularPredictor which is used to predict this label. """
predictor = self.predictors[label]
if isinstance(predictor, str):
return TabularPredictor.load(path=predictor)
return predictor
def _get_data(self, data):
if isinstance(data, str):
return TabularDataset(data)
return data.copy()
def _predict(self, data, as_proba=False, **kwargs):
data = self._get_data(data)
if as_proba:
predproba_dict = {}
for label in self.labels:
print(f"Predicting with TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
if as_proba:
predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs)
data[label] = predictor.predict(data, **kwargs)
if not as_proba:
return data[self.labels]
else:
return predproba_dict
def aion_train_gluon(arg):
configFile = arg
with open(configFile, 'rb') as cfile:
data = json.load(cfile)
cfile.close()
rootElement = data['basic']
modelname = rootElement['modelName']
version = rootElement['modelVersion']
dataLocation = rootElement['dataLocation']
deployFolder = rootElement['deployLocation']
analysisType = rootElement['analysisType']
testPercentage = data['advance']['testPercentage']
deployLocation = os.path.join(deployFolder,modelname+'_'+version)
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
logLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logLocation)
except OSError as e:
pass
etcLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcLocation)
except OSError as e:
pass
logFileName=os.path.join(deployLocation,'log','model_training_logs.log')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('************* Version - v1.2.0 *************** \n')
msg = '-------> Execution Start Time: '+ dt.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
log.info(msg)
dataLabels = rootElement['targetFeature'].split(',')
# Create and Write the config file used in Prediction
# ----------------------------------------------------------------------------#
tdata = TabularDataset(dataLocation)
#train_data = tdata
train_data = tdata.sample(frac = 0.8)
test_data = tdata.drop(train_data.index)
if rootElement['trainingFeatures'] != '':
trainingFeatures = rootElement['trainingFeatures'].split(',')
else:
trainingFeatures = list(train_data.columns)
features = trainingFeatures
for x in dataLabels:
if x not in features:
features.append(x)
indexFeature = rootElement['indexFeature']
if indexFeature != '':
indexFeature = indexFeature.split(',')
for x in indexFeature:
if x in features:
features.remove(x)
dateTimeFeature = rootElement['dateTimeFeature']
if dateTimeFeature != '':
dateTimeFeature = dateTimeFeature.split(',')
for x in dateTimeFeature:
if x in features:
features.remove(x)
train_data = train_data[features]
test_data = test_data[features]
configJsonFile = {"targetFeature":dataLabels,"features":",".join([feature for feature in features])}
configJsonFilePath = os.path.join(deployLocation,'etc','predictionConfig.json')
if len(dataLabels) == 1 and analysisType['multiLabelPrediction'] == "False":
dataLabels = rootElement['targetFeature']
with io.open(configJsonFilePath, 'w', encoding='utf8') as outfile:
str_ = json.dumps(configJsonFile, ensure_ascii=False)
outfile.write(str_)
# ----------------------------------------------------------------------------#
if analysisType['multiLabelPrediction'] == "True":
# Copy and Write the Predictiion script file into deployment location
# ----------------------------------------------------------------------------#
srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiLabelPrediction.py')
dstFile = os.path.join(deployLocation,'aion_predict.py')
shutil.copy(srcFile,dstFile)
# ----------------------------------------------------------------------------#
labels = dataLabels # which columns to predict based on the others
#problem_types = dataProblem_types # type of each prediction problem
save_path = os.path.join(deployLocation,'ModelPath') # specifies folder to store trained models
time_limit = 5 # how many seconds to train the TabularPredictor for each label
log.info('Status:-|... AION Gluon Start')
try:
if len(labels) < 2:
log.info('Status:-|... AION Evaluation Error: Target should be multiple column')
# ----------------------------------------------------------------------------#
output = {'status':'FAIL','message':'Number of target variable should be 2 or more than 2'}
else:
multi_predictor = MultilabelPredictor(labels=labels, path=save_path)
multi_predictor.fit(train_data, time_limit=time_limit)
log.info('Status:-|... AION Gluon Stop')
log.info('Status:-|... AION Evaluation Start')
trainevaluations = multi_predictor.evaluate(train_data)
testevaluations = multi_predictor.evaluate(test_data)
best_model = {}
for label in labels:
predictor_class = multi_predictor.get_predictor(label)
predictor_class.get_model_best()
best_model[label] = predictor_class.get_model_best()
log.info('Status:-|... AION Evaluation Stop')
# ----------------------------------------------------------------------------#
output = {'status':'SUCCESS','data':{'ModelType':'MultiLabelPrediction','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'ACCURACY','deployLocation':deployLocation,'matrix':trainevaluations,'testmatrix':testevaluations,'BestModel':best_model, 'LogFile':logFileName}}
except Exception as inst:
log.info('Status:-|... AION Gluon Error')
output = {"status":"FAIL","message":str(inst).strip('"')}
if analysisType['multiModalLearning'] == "True":
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.text import TextPredictor
# check the system and then set the equivelent flag
# ----------------------------------------------------------------------------#
os.environ["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"] = "0"
if get_gpu_count() == 0:
os.environ["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"] = "1"
# ----------------------------------------------------------------------------#
# Copy and Write the Predictiion script file into deployment location
# ----------------------------------------------------------------------------#
srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiModalPrediction.py')
dstFile = os.path.join(deployLocation,'aion_predict.py')
shutil.copy(srcFile,dstFile)
time_limit = None # set to larger value in your applications
save_path = os.path.join(deployLocation,'text_prediction')
predictor = TextPredictor(label=dataLabels, path=save_path)
predictor.fit(train_data, time_limit=time_limit)
log.info('Status:-|... AION Gluon Stop')
log.info('Status:-|... AION Evaluation Start')
trainevaluations = predictor.evaluate(train_data)
log.info('Status:-|... AION Evaluation Stop')
# ----------------------------------------------------------------------------#
output = {'status':'SUCCESS','data':{'ModelType':'MultiModelLearning','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'SCORE','deployLocation':deployLocation,'matrix':trainevaluations,'LogFile':logFileName}}
output = json.dumps(output)
print("\n")
print("aion_learner_status:",output)
print("\n")
log.info('\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return(output)
if __name__ == "__main__":
aion_train_gluon(sys.argv[1]) |
aion_sagemaker.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import joblib
import time
from pandas import json_normalize
import pandas as pd
import numpy as np
import argparse
import json
import os
import pathlib
from pathlib import Path
from sagemaker.aionMlopsService import aionMlopsService
import logging
import os.path
from os.path import expanduser
import platform,sys
from pathlib import Path
from sklearn.model_selection import train_test_split
def getAWSConfiguration(mlops_params,log):
awsId=mlops_params['awsSagemaker']['awsID']
if ((not awsId) or (awsId is None)):
awsId=""
log.info('awsId error. ')
awsAccesskeyid=mlops_params['awsSagemaker']['accesskeyID']
if ((not awsAccesskeyid) or (awsAccesskeyid is None)):
awsAccesskeyid=""
log.info('awsAccesskeyid error. ')
awsSecretaccesskey=mlops_params['awsSagemaker']['secretAccesskey']
if ((not awsSecretaccesskey) or (awsSecretaccesskey is None)):
awsSecretaccesskey=""
log.info('awsSecretaccesskey error. ')
awsSessiontoken=mlops_params['awsSagemaker']['sessionToken']
if ((not awsSessiontoken) or (awsSessiontoken is None)):
awsSessiontoken=""
log.info('awsSessiontoken error. ')
awsRegion=mlops_params['awsSagemaker']['region']
if ((not awsRegion) or (awsRegion is None)):
awsRegion=""
log.info('awsRegion error. ')
IAMSagemakerRoleArn=mlops_params['awsSagemaker']['IAMSagemakerRoleArn']
if ((not IAMSagemakerRoleArn) or (IAMSagemakerRoleArn is None)):
IAMSagemakerRoleArn=""
log.info('IAMSagemakerRoleArn error. ')
return awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn
def getMlflowParams(mlops_params,log):
modelInput = mlops_params['modelInput']
data = mlops_params['data']
mlflowtosagemakerDeploy=mlops_params['sagemakerDeploy']
if ((not mlflowtosagemakerDeploy) or (mlflowtosagemakerDeploy is None)):
mlflowtosagemakerDeploy="True"
mlflowtosagemakerPushOnly=mlops_params['deployExistingModel']['status']
if ((not mlflowtosagemakerPushOnly) or (mlflowtosagemakerPushOnly is None)):
mlflowtosagemakerPushOnly="False"
mlflowtosagemakerPushImageName=mlops_params['deployExistingModel']['dockerImageName']
if ((not mlflowtosagemakerPushImageName) or (mlflowtosagemakerPushImageName is None)):
mlflowtosagemakerPushImageName="mlops_image"
mlflowtosagemakerdeployModeluri=mlops_params['deployExistingModel']['deployModeluri']
if ((not mlflowtosagemakerdeployModeluri) or (mlflowtosagemakerdeployModeluri is None)):
mlflowtosagemakerdeployModeluri="None"
log.info('mlflowtosagemakerdeployModeluri error. ')
cloudInfrastructure = mlops_params['modelOutput']['cloudInfrastructure']
if ((not cloudInfrastructure) or (cloudInfrastructure is None)):
cloudInfrastructure="Sagemaker"
endpointName=mlops_params['endpointName']
if ((not endpointName) or (endpointName is None)):
sagemakerAppName="aion-demo-app"
log.info('endpointName not given, setting default one. ')
experimentName=str(endpointName)
mlflowContainerName=str(endpointName)
return modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName
def getPredictionParams(mlops_params,log):
predictStatus=mlops_params['prediction']['status']
if ((not predictStatus) or (predictStatus is None)):
predictStatus="False"
modelInput = mlops_params['modelInput']
data = mlops_params['data']
if (predictStatus == "True" or predictStatus.lower()== "true"):
if ((not modelInput) or (modelInput is None)):
log.info('prediction model input error.Please check given model file or its path for prediction ')
if ((not data) or (data is None)):
log.info('prediction data input error.Please check given data file or its path for prediction ')
targetFeature=mlops_params['prediction']['target']
return predictStatus,targetFeature
def sagemakerPrediction(mlopsobj,data,log):
df = json_normalize(data)
model=None
predictionStatus=False
try:
endpointPrediction=mlopsobj.predict_sm_app_endpoint(df)
if (endpointPrediction is None):
log.info('Sagemaker endpoint application prediction Issue.')
outputjson = {"status":"Error","msg":"Sagemaker endpoint application prediction Issue"}
outputjson = json.dumps(outputjson)
#print("predictions: "+str(outputjson))
predictionStatus=False
else:
log.info("sagemaker end point Prediction: \n"+str(endpointPrediction))
df['prediction'] = endpointPrediction
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
outputjson = json.dumps(outputjson)
#print("predictions: "+str(outputjson))
predictionStatus=True
except Exception as e:
#log.info("sagemaker end point Prediction error: \n")
outputjson = {"status":"Error","msg":str(e)}
outputjson=None
predictionStatus=False
return outputjson,predictionStatus
## Main aion sagemaker fn call
def sagemaker_exec(mlops_params,log):
#mlops_params = json.loads(config)
mlops_params=mlops_params
modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName = getMlflowParams(mlops_params,log)
mlflowModelname=None
awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn = getAWSConfiguration(mlops_params,log)
predictStatus,targetFeature = getPredictionParams(mlops_params,log)
sagemakerDeployOption='create'
deleteAwsecrRepository='False'
sagemakerAppName=str(endpointName)
ecrRepositoryName='aion-ecr-repo'
#aws ecr model app_name should contain only [[a-zA-Z0-9-]], again rechecking here.
import re
if sagemakerAppName:
pattern = re.compile("[A-Za-z0-9-]+")
# if found match (entire string matches pattern)
if pattern.fullmatch(sagemakerAppName) is not None:
#print("Found match: ")
pass
else:
log.info('wrong sagemaker Application Name, Nmae should contains only [A-Za-z0-9-] .')
app_name = 'aion-demo-app'
else:
app_name = 'aion-demo-app'
#Following 3 aws parameter values are now hard coded , because currently we are not using. If aion using the options, please make sure to get the values from GUI .
sagemakerDeployOption="create"
deleteAwsecrRepository="False"
ecrRepositoryName="aion_test_repo"
log.info('mlops parameter check done.')
# predictionStatus=False
deploystatus = 'SUCCESS'
try:
log.info('cloudInfrastructure: '+str(cloudInfrastructure))
if(cloudInfrastructure.lower() == "sagemaker"):
## sagemaker app prediction call
if (predictStatus.lower() == "true"):
# df = json_normalize(data)
model=None
mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName)
outputjson,predictionStatus = sagemakerPrediction(mlopsobj,data,log)
print("predictions: "+str(outputjson))
predictionStatus=predictionStatus
return(outputjson)
else:
if Path(modelInput).is_file():
msg = ''
model = joblib.load(modelInput)
ProblemName = model.__class__.__name__
mlflowModelname=str(ProblemName)
log.info('aion mlops Model name: '+str(mlflowModelname))
df=None
mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName)
mlflow2sm_status,localhost_container_status=mlopsobj.mlflow2sagemaker_deploy()
log.info('mlflow2sm_status: '+str(mlflow2sm_status))
log.info('localhost_container_status: '+str(localhost_container_status))
# Checking deploy status
if (mlflowtosagemakerPushOnly.lower() == "true" ):
if (mlflow2sm_status.lower() == "success"):
deploystatus = 'SUCCESS'
msg = 'Endpoint succesfully deployed in sagemaker'
log.info('Endpoint succesfully deployed in sagemaker (Push eisting model container).\n ')
elif(mlflow2sm_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Endpoint failed to deploy in sagemaker'
log.info('Endpoint failed to deploy in sagemaker. (Push eisting model container).\n ')
else:
pass
elif(mlflowtosagemakerDeploy.lower() == "true"):
if (mlflow2sm_status.lower() == "success"):
deploystatus='SUCCESS'
msg = 'Endpoint succesfully deployed in sagemaker'
log.info('Endpoint succesfully deployed in sagemaker')
elif(mlflow2sm_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Endpoint failed to deploy in sagemaker'
log.info('Endpoint failed to deploy in sagemaker.\n ')
elif (mlflow2sm_status.lower() == "Notdeployed"):
deploystatus= 'ERROR'
msg = 'Sagemaker compatible container created'
log.info('sagemaker endpoint not deployed, check aws connection and credentials. \n')
elif (mlflowtosagemakerDeploy.lower() == "false"):
if(localhost_container_status.lower() == "success"):
deploystatus = 'SUCCESS'
msg = 'Localhost mlops docker created successfully'
log.info('Localhost mlops docker created successfully. \n')
elif(localhost_container_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Localhost mlops docker created failed'
log.info('Localhost mlops docker creation failed. \n')
elif (localhost_container_status.lower() == "Notdeployed"):
deploystatus= 'ERROR'
log.info('Localhost mlops docker not deployed, check local docker status. \n')
else:
pass
else:
pass
else:
deploystatus = 'ERROR'
msg = 'Model Path not Found'
print('Error: Model Path not Found')
outputjson = {"status":str(deploystatus),"data":str(msg)}
outputjson = json.dumps(outputjson)
print("predictions: "+str(outputjson))
return(outputjson)
except Exception as inst:
outputjson = {"status":str(deploystatus),"data":str(msg)}
outputjson = json.dumps(outputjson)
print("predictions: "+str(outputjson))
return(outputjson)
def aion_sagemaker(config):
try:
mlops_params = config
print(mlops_params)
from appbe.dataPath import LOG_LOCATION
sagemakerLogLocation = LOG_LOCATION
try:
os.makedirs(sagemakerLogLocation)
except OSError as e:
if (os.path.exists(sagemakerLogLocation)):
pass
else:
raise OSError('sagemakerLogLocation error.')
filename_mlops = 'mlopslog_'+str(int(time.time()))
filename_mlops=filename_mlops+'.log'
filepath = os.path.join(sagemakerLogLocation, filename_mlops)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionMLOps')
log.setLevel(logging.DEBUG)
output = sagemaker_exec(mlops_params,log)
return output
except Exception as inst:
print(inst)
deploystatus = 'ERROR'
output = {"status":str(deploystatus),"data":str(inst)}
output = json.dumps(output)
print("predictions: "+str(output))
return(output)
#Sagemaker main fn call
if __name__=='__main__':
json_config = str(sys.argv[1])
output = aion_sagemaker(json.loads(json_config))
|
mergeLogs.py | import json
from pathlib import Path
import shutil
class mergeLogs():
def __init__(self, folders, dataLocation=None):
self.folders = [Path(x) for x in folders]
self.dataLocation = dataLocation
self.baseFolder = ""
self.outputData = {}
def readOutputStr(self, data):
text = "-------> Output :"
output = data.find(text)
def keywithmaxval(self, d):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
v=list(d.values())
k=list(d.keys())
return k[v.index(max(v))]
def getBestScore(self, data):
text = "-------> Output :"
output = [x[len(text):-1] for x in data if text in x]
self.outputData = json.loads(output[0])
return self.outputData['data']['BestScore']
def getModelParams(self, data):
capture = False
startText = "---------- ClassifierModel has started ----------"
endText = "---------- ClassifierModel End ---------- "
modelBasedText = "Models Based Selected Features Start"
CorrelationBased = "Top/CorrelationBased Features Start"
removableText = "Status:- |... Search Optimization Method applied: random\n"
modelsParam = []
modelcorrelation = None
output = {}
for x in data:
if endText in x:
capture = False
output[modelcorrelation] = ''.join(modelsParam)
modelcorrelation = None
modelsParam = []
elif capture:
if x != removableText:
modelsParam.append(x)
elif startText in x:
capture = True
elif modelBasedText in x:
modelcorrelation = 'modelBased'
elif CorrelationBased in x:
modelcorrelation = 'correlationBased'
return output
def mergeConfigFiles(self, bestScoreFolder):
# path is already updated
with open(bestScoreFolder/'etc'/'code_config.json', 'r') as f:
config = json.load(f)
if self.dataLocation:
config['dataLocation'] = self.dataLocation
if 'modelVersion' in config.keys():
config['modelVersion'] = '_'.join(config['modelVersion'].split('_')[:-1])
with open(bestScoreFolder/'etc'/'code_config.json', 'w') as f:
json.dump(config, f, indent=4)
with open(bestScoreFolder/'etc'/'display.json', 'r') as f:
config = json.load(f)
if 'version' in config.keys():
config['version'] = '_'.join(config['version'].split('_')[:-1])
with open(bestScoreFolder/'etc'/'display.json', 'w') as f:
json.dump(config, f, indent=4)
if len(self.folders) > 1:
with open(bestScoreFolder/'etc'/'output.json', 'r') as f:
config = json.load(f)
evaluated_models = config['data']['EvaluatedModels']
for folder in self.folders:
if folder != bestScoreFolder:
with open(folder/'etc'/'output.json', 'r') as f:
sub_config = json.load(f)
for evaluated_model in sub_config['data']['EvaluatedModels']:
evaluated_models.append(evaluated_model)
with open(bestScoreFolder/'etc'/'output.json', 'w') as f:
config['data']['EvaluatedModels'] = evaluated_models
json.dump(config, f, indent=4)
def mergeLogFiles(self, bestScoreFolder, data):
startText = "---------- ClassifierModel has started ----------\n"
endText = "---------- ClassifierModel End ---------- \n"
modelBasedText = "Models Based Selected Features Start"
CorrelationBased = "Top/CorrelationBased Features Start"
with open(bestScoreFolder/'log'/'model_training_logs.log', 'r') as f:
text = f.read()
CorrelationBasedIndex = text.find(CorrelationBased)
modelBasedTextIndex = text.find(modelBasedText)
firstendIndex = text.find(endText)
numOfMethods = 0
if CorrelationBasedIndex > 0:
numOfMethods += 1
if modelBasedTextIndex > 0:
numOfMethods += 1
if numOfMethods == 2:
secondendIndex = text[firstendIndex+ len(endText):].find(endText) +firstendIndex+len(endText)
# assuming correlation is always first
for k,v in data.items():
if k != bestScoreFolder:
if 'correlationBased' in v.keys():
text = text[:firstendIndex] + v['correlationBased'] + text[firstendIndex:]
firstendIndex += len(v['correlationBased'])
if numOfMethods == 2:
secondendIndex += len(v['correlationBased'])
if 'modelBased' in v.keys():
if numOfMethods == 2:
text = text[:secondendIndex] + v['modelBased'] + text[secondendIndex:]
secondendIndex += len(v['modelBased'])
else:
text = text[:firstendIndex] + v['modelBased'] + text[firstendIndex:]
firstendIndex += len(v['modelBased'])
with open(bestScoreFolder/'log'/'model_training_logs.log', 'w') as f:
text = text.replace(str(bestScoreFolder), str(self.baseFolder))
f.write(text)
def mergeFolder(self):
bestScoreInFile = {}
modelsTrainOutput = {}
self.baseFolder = self.folders[0].parent/"_".join(self.folders[0].name.split('_')[:-1])
if len(self.folders) == 1:
if self.baseFolder.exists():
shutil.rmtree(self.baseFolder)
self.folders[0].rename(self.baseFolder)
else:
for folder in self.folders:
with open(folder/'log'/'model_training_logs.log', 'r') as f:
data = f.readlines()
bestScoreInFile[folder] = self.getBestScore(data)
modelsTrainOutput[folder] = self.getModelParams(data)
bestScoreFolder = self.keywithmaxval(bestScoreInFile)
self.mergeLogFiles(bestScoreFolder, modelsTrainOutput )
self.mergeConfigFiles(bestScoreFolder)
if self.baseFolder.exists():
shutil.rmtree(self.baseFolder)
bestScoreFolder.rename(self.baseFolder)
#remove extra folders
for folder in self.folders:
if folder.exists():
shutil.rmtree(folder)
return self.outputData
|
aion_aws_training.py | import json
import sys,os
from pathlib import Path, PurePosixPath
from fabric import Connection
import tarfile
import copy
from hyperscalers.cloudServer import awsGPUTraining
import time
import shutil
import logging
import multiprocessing
from hyperscalers.mergeLogs import mergeLogs
class AION(awsGPUTraining):
def __init__(self, config):
config['AMAZON_EC2']['InstanceIds'] = [] #removing the support for Instance Id
super().__init__(config)
self.remoteUpload = {}
def copyDataOnServer(self, index):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDeployLocation']))
client.put(self.remoteUpload['configFile'], self.remoteUpload['remoteConfigLoc'])
if not Path(self.remoteUpload['dataLoc']).exists():
raise ValueError(" data location {} does not exist".format(self.remoteUpload['dataLoc']))
if Path(self.remoteUpload['dataLoc']).is_file():
client.put(self.remoteUpload['dataLoc'], self.remoteUpload['remoteDataLoc'])
else:
client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDataLoc']))
p = Path(self.remoteUpload['dataLoc']).glob('**/*')
files = [x for x in p if x.is_file()]
for file in files:
client.put(file, self.remoteUpload['remoteDataLoc'])
if self.remoteUpload.get('imgCsvLoc', None):
client.put(self.remoteUpload['imgCsvLoc'], self.remoteUpload['remoteDataLoc'])
except Exception as e:
raise ValueError("Error in copying data to cloud server. " + str(e))
def executeCode(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
cmd = '{} {} {}'.format("/home/ubuntu/aws/venv/aion-env/bin/python3.8", "/home/ubuntu/aws/venv/aion-env/lib/python3.8/site-packages/AION/aion.py", self.remoteUpload['remoteConfigLoc'])
output = client.run( cmd, warn=True)
except Exception as e:
raise ValueError("Error in running code on cloud server. " + str(e))
def downloadAndExtractModel(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
remote = PurePosixPath(self.remoteUpload['remoteDeployLocation'])
fileName = self.remoteUpload['deployName']
local = Path(self.remoteUpload['localDeployLocation'])
tarFileName = fileName+".tar.gz"
cmd = 'cd {};tar -czvf {} -C {}/ {}'.format(remote, tarFileName, remote, fileName)
client.run( cmd)
extractFile = str(local/tarFileName)
client.get( str(remote/tarFileName), extractFile)
with tarfile.open(extractFile, "r:gz") as tar:
tar.extractall(local)
Path(extractFile).unlink()
client.run( 'rm -r {}'.format(remote/fileName))
client.run( 'rm {}'.format(remote/tarFileName))
except Exception as e:
raise ValueError("Error in downloading file from server. " + str(e))
def deleteDataOnServer(self):
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
dataPaths = [self.remoteUpload['remoteDataLoc'], self.remoteUpload['remoteDeployLocation'], self.remoteUpload['remoteConfigLoc']]
for loc in dataPaths:
if Path(loc).is_file():
client.run( 'rm {}'.format(loc))
else:
client.run( 'rm -r {}'.format(loc))
# only for csv files
def updateConfigGetRemoteLoc(self, config, index=0):
remote_location = '/home/ubuntu/aws/usecase'
remoteInputLoc = PurePosixPath(remote_location)/"input"
remoteOutputLoc = PurePosixPath(remote_location)/"target"
if Path(config['basic']['dataLocation']).is_dir():
if Path(config['basic']['folderSettings']['labelDataFile']).parent !=Path(config['basic']['dataLocation']):
self.remoteUpload['imgCsvLoc'] = config['basic']['folderSettings']['labelDataFile']
config['basic']['folderSettings']['labelDataFile'] = Path(config['basic']['folderSettings']['labelDataFile']).name
csvFile = Path(config['basic']['dataLocation']).name
localFile = config['basic']['dataLocation']
localDeployLoc = config['basic']['deployLocation']
config['basic']['dataLocation'] = str(remoteInputLoc/csvFile)
config['basic']['deployLocation'] = str(remoteOutputLoc)
jsonFile = Path(__file__).parent/'remote_{}.json'.format(index)
with open(jsonFile,"w") as f:
json.dump(config, f)
self.remoteUpload['remoteDataLoc'] = config['basic']['dataLocation']
self.remoteUpload['remoteConfigLoc'] = str(remoteInputLoc)+ "/temp.json"
self.remoteUpload['remoteDeployLocation'] = config['basic']['deployLocation']
self.remoteUpload['dataLoc'] = localFile
self.remoteUpload['configFile'] = str(jsonFile)
self.remoteUpload['localDeployLocation'] = localDeployLoc
self.remoteUpload['deployName'] = "{}_{}".format(config['basic']['modelName'],config['basic']['modelVersion'])
def updateDeployPath(self):
import fileinput
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"model_training_logs.log"
self.remoteUpload['localDeployLocation'] = self.remoteUpload['localDeployLocation'].replace('\\','/')
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"output.json"
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"display.json"
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
def updateUserServerConfig(aws_config):
aws_config['ssh']['keyFilePath'] = str(Path(__file__).parent/"AION_GPU.pem")
return aws_config
def getKeyByValue(dictionary, refValue):
for key, value in dictionary.items():
if value == refValue:
return key
return None
def getKeysByValue(dictionary, refValue):
keys = []
for key, value in dictionary.items():
if value == refValue:
keys.append(key)
return keys
class openInstancesStatus():
def __init__(self):
pass
def addInstance(self, instanceId, args=None):
fileName = instanceId + '.ec2instance'
data = {}
data[instanceId] = args
with open(fileName, "w") as f:
json.dump( data, f, indent=4) #TODO do we need to encrypt
def removeInstance(self, instanceId):
fileName = instanceId + '.ec2instance'
if Path(fileName).exists():
Path(fileName).unlink()
def clearPreviousInstancesState(self):
# check and stop the previous instance
openInstances = Path().glob("*.ec2instance")
for file in openInstances:
with open(file, 'r') as f:
data = json.load(f)
prevConfig = list(data.values())[0]
key = Path(file).stem
if prevConfig['AMAZON_EC2']['amiId']:
prevConfig['AMAZON_EC2']['InstanceIds'] = [key]
prevConfig['AMAZON_EC2']['amiId'] = "" # clear amiId
instance = awsGPUTraining(prevConfig)
if len(prevConfig['AMAZON_EC2']['InstanceIds']) > 0:
try:
if instance.is_instance_running(prevConfig['AMAZON_EC2']['InstanceIds'][0]):
instance.stop_server_instance()
except:
pass
self.removeInstance(key)
class prepareConfig():
def __init__(self, config,noOfInstance,ComputeInfrastructure):
if isinstance(config, dict):
self.config = config
self.configDir = Path(__file__).parent
elif isinstance(config, str):
with open(config, 'r') as f:
self.config = json.load(f)
self.configDir = Path(config).parent
else:
raise TypeError("{} type object is not supported for config".format(type(config)))
self.problemType = getKeyByValue(self.config['basic']['analysisType'] ,"True")
self.algorithms = getKeysByValue(self.config['basic']['algorithms'][self.problemType] ,"True")
self.numInstances = int(noOfInstance)
self.computeInfrastructure = ComputeInfrastructure
self.isMultiInstance = False
self.validateMultiInstance()
self.newConfigs = []
def isRemoteTraining(self):
return True if(self.computeInfrastructure == "True") else False
def validateMultiInstance(self):
if self.isRemoteTraining():
if self.problemType == 'classification' or self.problemType == 'regression':
if self.numInstances > len(self.algorithms):
self.numInstances = len(self.algorithms)
if len(self.algorithms) > 1 and self.numInstances > 1:
self.isMultiInstance = True
def createNewConfigs(self):
configs = []
algos = self.algorithms
if len(algos) <= self.numInstances:
self.numInstances = len(algos)
algosPerInstances = (len(algos)+(self.numInstances - 1))//self.numInstances
remainingAlgos = len(algos)
for i in range(self.numInstances):
newConfig = copy.deepcopy(self.config)
for k,v in newConfig['basic']['algorithms'][self.problemType].items():
newConfig['basic']['algorithms'][self.problemType][k] = "False"
algosPerInstances = remainingAlgos // (self.numInstances - i)
for j in range(algosPerInstances):
newConfig['basic']['algorithms'][self.problemType][algos[len(algos) - remainingAlgos + j]] = "True"
newConfig['basic']['modelVersion'] = newConfig['basic']['modelVersion'] + "_{}".format(i)
newFileName = str(self.configDir/"splittedConfig_{}.json".format(i))
with open(newFileName, 'w') as jsonFile:
json.dump(newConfig, jsonFile, indent=4)
configs.append(newFileName)
remainingAlgos -= algosPerInstances
return configs
class Process(multiprocessing.Process):
def __init__(self, aws_config, configFile, index, openInstancesLog):
super(Process, self).__init__()
self.index = index
self.aws_config = aws_config
self.configFile = configFile
self.openInstances = openInstancesLog
def run(self):
log = logging.getLogger('eion')
serverStart = False
try:
server = AION(self.aws_config)
with open(self.configFile,"r") as f:
config = json.load(f)
server.updateConfigGetRemoteLoc(config, self.index)
instanceId = server.start_instance()
log.info('Status:-|... start instance: {}'.format(instanceId))
serverStart = True
self.openInstances.addInstance(instanceId, self.aws_config)
time.sleep(40)
log.info('Status:-|... copying data on instance: {}'.format(instanceId))
server.copyDataOnServer( config)
log.info('Status:-|... Training on instance: {}'.format(instanceId))
server.executeCode()
log.info('Status:-|... downloading data from instance: {}'.format(instanceId))
server.downloadAndExtractModel()
server.deleteDataOnServer()
log.info('Status:-|... stopping instance: {}'.format(instanceId))
server.stop_server_instance()
serverStart = False
self.openInstances.removeInstance(instanceId)
server.updateDeployPath()
except Exception as e:
print(e)
pass
finally:
if serverStart:
log.info('Status:-|... stopping instance: {}'.format(instanceId))
server.stop_server_instance()
self.openInstances.removeInstance(instanceId)
def awsTraining(configPath):
try:
# This function responsible for starting the training with AWS
with open(configPath, "r") as f:
config = json.load(f)
ec2 = boto3.resource('ec2',region_name=AWS_Region)
instance_id= instance[0].instance_id
deployFolder = config['basic']['deployLocation']
iterName = config['basic']['modelName']
iterVersion = config['basic']['modelVersion']
dataLocation = config['basic']['dataLocation']
usecaseLocation = os.path.join(deployFolder,iterName)
if not Path(usecaseLocation).exists():
os.makedirs(usecaseLocation)
deployLocation = os.path.join(usecaseLocation,iterVersion)
if Path(deployLocation).exists():
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
logLocation = os.path.join(deployLocation,'log')
if not Path(logLocation).exists():
os.makedirs(logLocation)
#read the server config
logFileName=os.path.join(logLocation,'model_training_logs.log')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('Status:-|... Compute Infrastructure:AMAZON EC2')
with open(Path(__file__).parent/"../config/compute.conf", "r") as f:
aws_config = json.load(f)
aws_config = updateUserServerConfig(aws_config)
configSplitter = prepareConfig(sys.argv[1],aws_config['AMAZON_EC2']['NoOfInstance'],aws_config['ComputeInfrastructure'])
newConfigs = configSplitter.createNewConfigs()
print(newConfigs)
openInstances = openInstancesStatus()
openInstances.clearPreviousInstancesState()
folders = []
processes = [0] * len(newConfigs)
for index, config in enumerate(newConfigs):
processes[index] = Process(aws_config, config, index, openInstances)
processes[index].start()
for index, config in enumerate(newConfigs):
processes[index].join()
folders.append(deployLocation + '_{}'.format(index))
if Path(deployLocation+'_0').exists():
filehandler.close()
log.removeHandler(filehandler)
merge = mergeLogs(folders)
merge.mergeFolder()
else:
output = {"status":"FAIL","message":"Please check cloud server configuration."}
output = json.dumps(output)
log.info('server code execution failed !....')
log.info('\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\n')
print("\n")
print("aion_learner_status:",output)
print("\n")
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
log.info('server code execution failed !....'+str(inst))
log.info('\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\n')
print("\n")
print("aion_learner_status:",output)
print("\n")
|
aws_instance.py | import json
import sys,os
from pathlib import Path, PurePosixPath
from fabric import Connection
import tarfile
import copy
from hyperscalers.cloudServer import awsGPUTraining
import time
import shutil
import logging
import multiprocessing
from hyperscalers.mergeLogs import mergeLogs
class AION(awsGPUTraining):
def __init__(self, config):
config['AMAZON_EC2']['InstanceIds'] = [] #removing the support for Instance Id
super().__init__(config)
self.remoteUpload = {}
def copyDataOnServer(self, index):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDeployLocation']))
client.put(self.remoteUpload['configFile'], self.remoteUpload['remoteConfigLoc'])
if not Path(self.remoteUpload['dataLoc']).exists():
raise ValueError(" data location {} does not exist".format(self.remoteUpload['dataLoc']))
if Path(self.remoteUpload['dataLoc']).is_file():
client.put(self.remoteUpload['dataLoc'], self.remoteUpload['remoteDataLoc'])
else:
client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDataLoc']))
p = Path(self.remoteUpload['dataLoc']).glob('**/*')
files = [x for x in p if x.is_file()]
for file in files:
client.put(file, self.remoteUpload['remoteDataLoc'])
if self.remoteUpload.get('imgCsvLoc', None):
client.put(self.remoteUpload['imgCsvLoc'], self.remoteUpload['remoteDataLoc'])
except Exception as e:
raise ValueError("Error in copying data to cloud server. " + str(e))
def executeCode(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
cmd = '{} {} {}'.format("/home/ubuntu/aws/venv/aion-env/bin/python3.8", "/home/ubuntu/aws/venv/aion-env/lib/python3.8/site-packages/AION/aion.py", self.remoteUpload['remoteConfigLoc'])
output = client.run( cmd, warn=True)
except Exception as e:
raise ValueError("Error in running code on cloud server. " + str(e))
def downloadAndExtractModel(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
remote = PurePosixPath(self.remoteUpload['remoteDeployLocation'])
fileName = self.remoteUpload['deployName']
local = Path(self.remoteUpload['localDeployLocation'])
tarFileName = fileName+".tar.gz"
cmd = 'cd {};tar -czvf {} -C {}/ {}'.format(remote, tarFileName, remote, fileName)
client.run( cmd)
extractFile = str(local/tarFileName)
client.get( str(remote/tarFileName), extractFile)
with tarfile.open(extractFile, "r:gz") as tar:
tar.extractall(local)
Path(extractFile).unlink()
client.run( 'rm -r {}'.format(remote/fileName))
client.run( 'rm {}'.format(remote/tarFileName))
except Exception as e:
raise ValueError("Error in downloading file from server. " + str(e))
def deleteDataOnServer(self):
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
dataPaths = [self.remoteUpload['remoteDataLoc'], self.remoteUpload['remoteDeployLocation'], self.remoteUpload['remoteConfigLoc']]
for loc in dataPaths:
if Path(loc).is_file():
client.run( 'rm {}'.format(loc))
else:
client.run( 'rm -r {}'.format(loc))
# only for csv files
def updateConfigGetRemoteLoc(self, config, index=0):
remote_location = '/home/ubuntu/aws/usecase'
remoteInputLoc = PurePosixPath(remote_location)/"input"
remoteOutputLoc = PurePosixPath(remote_location)/"target"
if Path(config['basic']['dataLocation']).is_dir():
if Path(config['basic']['folderSettings']['labelDataFile']).parent !=Path(config['basic']['dataLocation']):
self.remoteUpload['imgCsvLoc'] = config['basic']['folderSettings']['labelDataFile']
config['basic']['folderSettings']['labelDataFile'] = Path(config['basic']['folderSettings']['labelDataFile']).name
csvFile = Path(config['basic']['dataLocation']).name
localFile = config['basic']['dataLocation']
localDeployLoc = config['basic']['deployLocation']
config['basic']['dataLocation'] = str(remoteInputLoc/csvFile)
config['basic']['deployLocation'] = str(remoteOutputLoc)
jsonFile = Path(__file__).parent/'remote_{}.json'.format(index)
with open(jsonFile,"w") as f:
json.dump(config, f)
self.remoteUpload['remoteDataLoc'] = config['basic']['dataLocation']
self.remoteUpload['remoteConfigLoc'] = str(remoteInputLoc)+ "/temp.json"
self.remoteUpload['remoteDeployLocation'] = config['basic']['deployLocation']
self.remoteUpload['dataLoc'] = localFile
self.remoteUpload['configFile'] = str(jsonFile)
self.remoteUpload['localDeployLocation'] = localDeployLoc
self.remoteUpload['deployName'] = "{}_{}".format(config['basic']['modelName'],config['basic']['modelVersion'])
def updateDeployPath(self):
import fileinput
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"model_training_logs.log"
self.remoteUpload['localDeployLocation'] = self.remoteUpload['localDeployLocation'].replace('\\','/')
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"output.json"
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"display.json"
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
def updateUserServerConfig(aws_config):
aws_config['ssh']['keyFilePath'] = str(Path(__file__).parent/"AION_GPU.pem")
return aws_config
def getKeyByValue(dictionary, refValue):
for key, value in dictionary.items():
if value == refValue:
return key
return None
def getKeysByValue(dictionary, refValue):
keys = []
for key, value in dictionary.items():
if value == refValue:
keys.append(key)
return keys
class openInstancesStatus():
def __init__(self):
pass
def addInstance(self, instanceId, args=None):
fileName = instanceId + '.ec2instance'
data = {}
data[instanceId] = args
with open(fileName, "w") as f:
json.dump( data, f, indent=4) #TODO do we need to encrypt
def removeInstance(self, instanceId):
fileName = instanceId + '.ec2instance'
if Path(fileName).exists():
Path(fileName).unlink()
def clearPreviousInstancesState(self):
# check and stop the previous instance
openInstances = Path().glob("*.ec2instance")
for file in openInstances:
with open(file, 'r') as f:
data = json.load(f)
prevConfig = list(data.values())[0]
key = Path(file).stem
if prevConfig['AMAZON_EC2']['amiId']:
prevConfig['AMAZON_EC2']['InstanceIds'] = [key]
prevConfig['AMAZON_EC2']['amiId'] = "" # clear amiId
instance = awsGPUTraining(prevConfig)
if len(prevConfig['AMAZON_EC2']['InstanceIds']) > 0:
try:
if instance.is_instance_running(prevConfig['AMAZON_EC2']['InstanceIds'][0]):
instance.stop_server_instance()
except:
pass
self.removeInstance(key)
class prepareConfig():
def __init__(self, config,noOfInstance,ComputeInfrastructure):
if isinstance(config, dict):
self.config = config
self.configDir = Path(__file__).parent
elif isinstance(config, str):
with open(config, 'r') as f:
self.config = json.load(f)
self.configDir = Path(config).parent
else:
raise TypeError("{} type object is not supported for config".format(type(config)))
self.problemType = getKeyByValue(self.config['basic']['analysisType'] ,"True")
self.algorithms = getKeysByValue(self.config['basic']['algorithms'][self.problemType] ,"True")
self.numInstances = int(noOfInstance)
self.computeInfrastructure = ComputeInfrastructure
self.isMultiInstance = False
self.validateMultiInstance()
self.newConfigs = []
def isRemoteTraining(self):
return True if(self.computeInfrastructure == "True") else False
def validateMultiInstance(self):
if self.isRemoteTraining():
if self.problemType == 'classification' or self.problemType == 'regression':
if self.numInstances > len(self.algorithms):
self.numInstances = len(self.algorithms)
if len(self.algorithms) > 1 and self.numInstances > 1:
self.isMultiInstance = True
def createNewConfigs(self):
configs = []
algos = self.algorithms
if len(algos) <= self.numInstances:
self.numInstances = len(algos)
algosPerInstances = (len(algos)+(self.numInstances - 1))//self.numInstances
remainingAlgos = len(algos)
for i in range(self.numInstances):
newConfig = copy.deepcopy(self.config)
for k,v in newConfig['basic']['algorithms'][self.problemType].items():
newConfig['basic']['algorithms'][self.problemType][k] = "False"
algosPerInstances = remainingAlgos // (self.numInstances - i)
for j in range(algosPerInstances):
newConfig['basic']['algorithms'][self.problemType][algos[len(algos) - remainingAlgos + j]] = "True"
newConfig['basic']['modelVersion'] = newConfig['basic']['modelVersion'] + "_{}".format(i)
newFileName = str(self.configDir/"splittedConfig_{}.json".format(i))
with open(newFileName, 'w') as jsonFile:
json.dump(newConfig, jsonFile, indent=4)
configs.append(newFileName)
remainingAlgos -= algosPerInstances
return configs
class Process(multiprocessing.Process):
def __init__(self, aws_config, configFile, index, openInstancesLog):
super(Process, self).__init__()
self.index = index
self.aws_config = aws_config
self.configFile = configFile
self.openInstances = openInstancesLog
def run(self):
log = logging.getLogger('eion')
serverStart = False
try:
server = AION(self.aws_config)
with open(self.configFile,"r") as f:
config = json.load(f)
server.updateConfigGetRemoteLoc(config, self.index)
instanceId = server.start_instance()
log.info('Status:-|... start instance: {}'.format(instanceId))
serverStart = True
self.openInstances.addInstance(instanceId, self.aws_config)
time.sleep(40)
log.info('Status:-|... copying data on instance: {}'.format(instanceId))
server.copyDataOnServer( config)
log.info('Status:-|... Training on instance: {}'.format(instanceId))
server.executeCode()
log.info('Status:-|... downloading data from instance: {}'.format(instanceId))
server.downloadAndExtractModel()
server.deleteDataOnServer()
log.info('Status:-|... stopping instance: {}'.format(instanceId))
server.stop_server_instance()
serverStart = False
self.openInstances.removeInstance(instanceId)
server.updateDeployPath()
except Exception as e:
print(e)
pass
finally:
if serverStart:
log.info('Status:-|... stopping instance: {}'.format(instanceId))
server.stop_server_instance()
self.openInstances.removeInstance(instanceId)
def training(config_path):
try:
# read the aion configuration file for server enabled
with open(config_path, "r") as f:
config = json.load(f)
deployFolder = config['basic']['deployLocation']
iterName = config['basic']['modelName']
iterVersion = config['basic']['modelVersion']
dataLocation = config['basic']['dataLocation']
deployLocation = os.path.join(deployFolder,iterName+'_'+iterVersion)
if Path(deployLocation).exists():
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
#read the server config
logFileName=os.path.join(deployLocation,'model_training_logs.log')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('Status:-|... Compute Infrastructure:AMAZON EC2')
with open(Path(__file__).parent/"../config/compute.conf", "r") as f:
aws_config = json.load(f)
aws_config = updateUserServerConfig(aws_config)
configSplitter = prepareConfig(config_path,aws_config['AMAZON_EC2']['NoOfInstance'],aws_config['ComputeInfrastructure'])
newConfigs = configSplitter.createNewConfigs()
print(newConfigs)
openInstances = openInstancesStatus()
openInstances.clearPreviousInstancesState()
folders = []
processes = [0] * len(newConfigs)
for index, config in enumerate(newConfigs):
processes[index] = Process(aws_config, config, index, openInstances)
processes[index].start()
for index, config in enumerate(newConfigs):
processes[index].join()
folders.append(deployLocation + '_{}'.format(index))
if Path(deployLocation+'_0').exists():
filehandler.close()
log.removeHandler(filehandler)
merge = mergeLogs(folders)
merge.mergeFolder()
else:
output = {"status":"FAIL","message":"Please check cloud server configuration."}
output = json.dumps(output)
log.info('server code execution failed !....')
log.info('\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\n')
print("\n")
print("aion_learner_status:",output)
print("\n")
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
log.info('server code execution failed !....'+str(inst))
log.info('\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\n')
print("\n")
print("aion_learner_status:",output)
print("\n")
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
cloudServer.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import boto3
import json
import time
import requests
import datetime
import uuid
import shutil
from websocket import create_connection
from botocore.exceptions import ClientError
import tarfile
from pathlib import Path, PurePosixPath
from stat import S_ISDIR
from fabric import Connection
import time
import logging
class awsGPUTraining():
def __init__(self, config):
local_config = {"location":{"data":"aion/data/od", "code":"", "pretrainedModel":"aion/pretrainedModels"},
"jupyter":{"header":{"Authorization":"Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2"}, "portNo":"8888",
"notebook_path":"aion/code/AWS_GPU_OD_Training.ipynb"}}
self.serverConfig = config["server"]
self.sshConfig = config["ssh"]
self.log = logging.getLogger('eion')
self.codeLocation = local_config["location"]["code"]
self.dataLocation = local_config["location"]["data"]
self.pretrainedModelLocation = local_config["location"]["pretrainedModel"]
self.jupyterConfig = local_config["jupyter"]
self.serverIP = ""
if self.serverConfig["awsAccessKeyId"] == "" or self.serverConfig["awsSecretAccessKey"] == "":
raise ValueError("Cloud server configuration is not available.")
if len(self.serverConfig["InstanceIds"]) == 0 and self.serverConfig["amiId"] == "":
raise ValueError("Please provide either InstanceIds or amiId in server config")
self.instanceId = []
self.separate_instance = False
if self.serverConfig["amiId"] != "":
self.separate_instance = True
else:
if len(self.serverConfig["InstanceIds"]):
if isinstance(self.serverConfig["InstanceIds"], list):
self.instanceId = self.serverConfig["InstanceIds"]
elif isinstance(self.serverConfig["InstanceIds"], str):
self.instanceId = [self.serverConfig["InstanceIds"]]
self.ec2_client = boto3.client(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"])
def __sftp_exists(self, sftp, path):
try:
sftp.stat(path)
return True
except:# IOError, e:
#if e.errno == errno.ENOENT:
return False
def __rmtree(self, sftp, remotepath, level=0):
for f in sftp.listdir_attr(remotepath):
rpath = str(PurePosixPath(remotepath)/f.filename)
if S_ISDIR(f.st_mode):
self.__rmtree(sftp, rpath, level=(level + 1))
sftp.rmdir(rpath)
else:
rpath = str(PurePosixPath(remotepath)/f.filename)
sftp.remove(rpath)
def copy_files_to_server(self, location):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
client.sudo('rm -rf {}/*'.format(self.dataLocation))
tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz"))
client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord')
client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord')
client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config')
client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt')
client.put(location+'/model.config', self.dataLocation+'/model.config')
if self.jupyterConfig != "":
client.run("touch {}".format(self.dataLocation+'/log.txt'))
except Exception as e:
raise ValueError("Error in copying data to cloud server. " + str(e))
def __myexec(self, ssh, cmd, timeout, want_exitcode=False):
# one channel per command
stdin, stdout, stderr = ssh.exec_command(cmd)
# get the shared channel for stdout/stderr/stdin
channel = stdout.channel
# we do not need stdin.
stdin.close()
# indicate that we're not going to write to that channel anymore
channel.shutdown_write()
# read stdout/stderr in order to prevent read block hangs
stdout_chunks = []
stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer)))
# chunked read to prevent stalls
while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready():
# stop if channel was closed prematurely, and there is no data in the buffers.
got_chunk = False
readq, _, _ = select.select([stdout.channel], [], [], timeout)
for c in readq:
if c.recv_ready():
stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))
got_chunk = True
if c.recv_stderr_ready():
# make sure to read stderr to prevent stall
stderr.channel.recv_stderr(len(c.in_stderr_buffer))
got_chunk = True
'''
1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file).
2) if no data arrived in the last loop, check if we already received the exit code
3) check if input buffers are empty
4) exit the loop
'''
if not got_chunk \
and stdout.channel.exit_status_ready() \
and not stderr.channel.recv_stderr_ready() \
and not stdout.channel.recv_ready():
# indicate that we're not going to read from this channel anymore
stdout.channel.shutdown_read()
# close the channel
stdout.channel.close()
break # exit as remote side is finished and our bufferes are empty
# close all the pseudofiles
stdout.close()
stderr.close()
if want_exitcode:
# exit code is always ready at this point
return (''.join(stdout_chunks), stdout.channel.recv_exit_status())
return ''.join(stdout_chunks)
def __myexec1(self, ssh, cmd, timeout, want_exitcode=False):
# one channel per command
stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True)
for line in iter(stderr.readline, ""):
print(line, end="")
stdin.close()
stdout.close()
stderr.close()
def executeCode(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation)
client.run( cmd)
except Exception as e:
raise ValueError("Error in running code on cloud server. " + str(e))
def start_executing_notebook(self):
try:
publicIp_Port = self.serverIP + ":" + self.jupyterConfig["portNo"]
conURL = "ws://" + publicIp_Port
base = 'http://' + publicIp_Port + ''
headers = self.jupyterConfig["header"]
url = base + '/api/kernels'
flag = True
while flag: # deadlock need to add timeout
response = requests.post(url, headers=headers)
flag = False
kernel = json.loads(response.text)
# Load the notebook and get the code of each cell
url = base + '/api/contents/' + self.jupyterConfig["notebook_path"]
response = requests.get(url, headers=headers)
file = json.loads(response.text)
code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ]
ws = create_connection(conURL + "/api/kernels/" + kernel["id"] + "/channels",
header=headers)
def send_execute_request(code):
msg_type = 'execute_request';
content = {'code': code, 'silent': False}
hdr = {'msg_id': uuid.uuid1().hex,
'username': 'test',
'session': uuid.uuid1().hex,
'data': datetime.datetime.now().isoformat(),
'msg_type': msg_type,
'version': '5.0'}
msg = {'header': hdr, 'parent_header': hdr,
'metadata': {},
'content': content}
return msg
for c in code:
ws.send(json.dumps(send_execute_request(c)))
# We ignore all the other messages, we just get the code execution output
# (this needs to be improved for production to take into account errors, large cell output, images, etc.)
error_msg = ''
traceback_msg = ''
for i in range(0, len(code)):
msg_type = '';
while msg_type != "stream":
rsp = json.loads(ws.recv())
msg_type = rsp["msg_type"]
if msg_type == 'error':
raise ValueError("Error on Cloud machine: "+rsp['content']['evalue'])
ws.close()
self.log.info('Status:- |...Execution Started`')
except ClientError as e:
raise ValueError(e)
def __wait_for_completion(self, sftp, remoteLogFile, localLogFile):
waiting = True
error_msg = ""
while waiting:
time.sleep(5 * 60)
try:
sftp.get(str(remoteLogFile), str(localLogFile))
with open(localLogFile, "r") as f:
content = f.readlines()
for x in content:
if "Error" in x:
waiting = False
error_msg = x
if "success" in x:
waiting = False
except:
raise (str(e))
return error_msg
def copy_file_from_server(self, localPath):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt'
localLogFile = Path(localPath)/'remote_log.txt'
client.get(str(remoteLogFile), str(localLogFile))
tarFile = (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz")
client.get(str(tarFile), str(Path(localPath)/tarFile.name))
except:
raise
return str(Path(localPath)/tarFile.name)
def create_instance(self):
instances = self.ec2_client.run_instances(
ImageId=self.serverConfig["amiId"],
MinCount=1,
MaxCount=1,
InstanceType="t2.xlarge",
KeyName="AION_GPU",
SecurityGroupIds = ["sg-02c3a6c8dd67edb74"]
)
self.instanceId = [instances['Instances'][0]['InstanceId']]
def start_instance(self):
if self.separate_instance:
self.create_instance()
try:
response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=True)
except Exception as e:
if 'DryRunOperation' not in str(e):
raise ValueError("Error in starting the EC2 instance, check server configuration. " + str(e))
try:
running_state_code = 16
response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=False)
instance_status_code = 0
while instance_status_code != running_state_code:
response = self.ec2_client.describe_instances(InstanceIds=self.instanceId)
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == running_state_code:
self.serverIP = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
break
except ClientError as e:
raise ValueError("Error in starting the EC2 instance. " + str(e))
def terminate_instance(self):
ec2 = boto3.resource(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"])
ec2.instances.filter(InstanceIds=self.instanceId).terminate() # for terminating an ec2 instance
def stop_server_instance(self):
try:
self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=True)
except Exception as e:
if 'DryRunOperation' not in str(e):
raise
stopped_state_code = 80
# Dry run succeeded, call stop_instances without dryrun
try:
response = self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=False)
response = self.ec2_client.describe_instances(InstanceIds=self.instanceId)
instance_status_code = 0
while instance_status_code != stopped_state_code:
response = self.ec2_client.describe_instances(InstanceIds=self.instanceId)
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == stopped_state_code:
break
except:
raise ValueError("Error in stopping the EC2 instance {}.Please stop it manually ".format(self.instanceId[0]))
if self.separate_instance:
try:
self.terminate_instance()
except:
raise ValueError("Error in terminating the EC2 instance {}.Please terminate it manually ".format(self.instanceId[0]))
|
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
urls.py | """mpgWebApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include, re_path
from appfe.api import inferenceApis
from django.urls import path, re_path
urlpatterns = [
#path('predict', inferenceApis.apipredict,name='PredictAPI'),
path('predict', inferenceApis.apipredict,name='PredictAPI'),
path('spredict',inferenceApis.apispredict,name='SecurePredictAPI'),
path('monitoring', inferenceApis.apiinputdrift,name='MonitoringAPI'),
path('performance', inferenceApis.apioutputdrift,name='Performance'),
path('xplain', inferenceApis.apixplain,name='Xplain'),
path('features',inferenceApis.apifeatures,name='Features'),
path('uploadfile',inferenceApis.uploadfile,name='uploadfile'),
path('retrain',inferenceApis.retrain,name='retrain'),
path('trainstatus',inferenceApis.trainstatus,name='trainstatus'),
path('publish',inferenceApis.publish,name='publish'),
path('geteda',inferenceApis.geteda,name='geteda'),
path('pattern_anomaly_settings',inferenceApis.apiPatternAnomalySettings,name='PatternAnomalySettings'),
path('pattern_anomaly_predict',inferenceApis.apiPatternAnomalyPredict,name='PatternAnomalyPredict')
]
#df=pd.read_csv("C:\Project\Analytics\Deployment\germancredit_9\germancreditdata.csv")
#
#bool_cols = [col for col in df if np.isin(df[col].dropna().unique(), [0, 1]).all()]
#
#bool_cols
|
inferenceApis.py | from django.shortcuts import render
from django.http import HttpResponse
from appbe.dataPath import DEPLOY_LOCATION
from rest_framework import status
from django.db.models import Max, F
import os,sys
import time
import json
import re
import pandas as pd
from rest_framework.permissions import IsAuthenticated
from django.views.decorators.csrf import csrf_exempt
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import subprocess
from pathlib import Path
user_records = {}
@csrf_exempt
def geteda(request):
if request.method == 'POST':
if request.content_type == 'application/json':
output = {}
try:
data=request.body.decode('utf-8')
data = json.loads(data)
file_id = data['fileid']
edaOptions = 'All'
if 'options' in data:
edaOptions = data['options']
dataFile = os.path.join(DATA_FILE_PATH,file_id)
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile)
if 'dataoverview' in edaOptions.lower() or 'all' in edaOptions.lower():
dataDistributionDF = eda_obj.dataDistribution()
dataDistributionJson = dataDistributionDF.to_json(orient = 'records')
output['DataOverview'] = json.loads(dataDistributionJson)
if 'top10records' in edaOptions.lower() or 'all' in edaOptions.lower():
top10df = eda_obj.getTopRows(10)
top10dfJson = top10df.to_json(orient = 'records')
output['Top10Records'] = json.loads(top10dfJson)
if 'datadistribution' in edaOptions.lower() or 'all' in edaOptions.lower():
distributionJson = eda_obj.getDistribution()
output['DataDistribution'] = distributionJson
if "featureimportance" in edaOptions.lower() or 'all' in edaOptions.lower():
pca_map = eda_obj.getPCATop10Features()
pca_details = pca_map
pca_df=pd.DataFrame()
if len(pca_details) > 0:
pca_df = pd.DataFrame({'Feature':pca_details.index, 'Explained Variance Ratio':pca_details.values}).round(2)
pca_json = pca_df.to_json(orient="records")
output['FeatureImportance'] = json.loads(pca_json)
else:
pca_json = 'Error During feature importance processing'
output['FeatureImportance'] = pca_json
if "correlationanalysis" in edaOptions.lower() or 'all' in edaOptions.lower():
corr_mat = eda_obj.getCorrelationMatrix()
if not corr_mat.empty:
corr_mat = corr_mat.to_json(orient="columns")
output['CorrelationAnalysis'] = json.loads(corr_mat)
else:
output['CorrelationAnalysis'] = 'Error during correlation analysis'
if "unsupervisedclustering" in edaOptions.lower() or 'all' in edaOptions.lower():
clusteringDetails,hopkins_val = eda_obj.getClusterDetails()
output['UnsupervisedClustering'] = clusteringDetails
output['HopkinsValue'] = hopkins_val
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"Success","output":output}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
@csrf_exempt
def publish(request):
usecaseid = request.GET["usecaseid"]
currentVersion = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
try:
from appbe.models import publishmodel
status,msg,url = publishmodel(request,usecaseid,currentVersion,Existusecases,usecasedetails)
return HttpResponse(json.dumps({"status":status,"msg":msg,"url":url}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"model training exception"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def trainstatus(request):
usecaseid = request.GET["usecaseid"]
currentVersion = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
try:
data=request.body.decode('utf-8')
data = json.loads(data)
trainingid = int(data['trainingid'])
model = Existusecases.objects.get(id=trainingid)
if model.Status.lower() == 'success':
return HttpResponse(json.dumps({"status":"success","trainingStatus":"Trained","usecaseid":str(usecaseid),"version":str(model.Version)}),content_type="application/json")
else:
from appbe.training import checkversionrunningstatus
status = checkversionrunningstatus(trainingid,usecasedetails,Existusecases)
if status.lower() == 'success':
return HttpResponse(json.dumps({"status":"success","trainingStatus":"Trained","usecaseid":str(usecaseid),"version":str(model.Version)}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"success","trainingStatus":status,"usecaseid":str(usecaseid),"version":str(model.Version)}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"model training exception"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def retrain(request):
usecaseid = request.GET["usecaseid"]
currentVersion = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
try:
data=request.body.decode('utf-8')
data = json.loads(data)
file_id = data['fileid']
p = usecasedetails.objects.get(usecaseid=usecaseid)
s1 = Existusecases.objects.filter(ModelName=p).annotate(maxver=Max('ModelName__existusecases__Version'))
config_list = s1.filter(Version=F('maxver'))
if config_list.count() > 0:
Version = config_list[0].Version
Version = Version + 1
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
indexVal = 0
configfile = str(model[indexVal].ConfigPath)
f = open(configfile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
configSettingsJson['basic']['modelVersion'] = str(Version)
dataFile = configSettingsJson['basic']['dataLocation']
if os.path.isfile(dataFile):
data = pd.read_csv(dataFile,encoding='utf-8',skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
dataFile = os.path.join(DATA_FILE_PATH,file_id)
data2 = pd.read_csv(dataFile,encoding='utf-8',skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
data = data.append(data2,ignore_index=True)
data.to_csv(dataFile,index=False)
dataFile = os.path.join(DATA_FILE_PATH,file_id)
configSettingsJson['basic']['dataLocation'] = str(dataFile)
updatedConfigSettings = json.dumps(configSettingsJson)
filetimestamp = str(int(time.time()))
outputfile = os.path.join(CONFIG_FILE_PATH, 'AION_OUTPUT_' + filetimestamp + '.json')
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'AION_' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(updatedConfigSettings)
fpWrite.close()
ps = Existusecases(DataFilePath=str(dataFile), DeployPath='', Status='Not Trained',ConfigPath=str(config_json_filename), Version=Version, ModelName=p,TrainOuputLocation=str(outputfile))
ps.save()
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','training','-c',config_json_filename])
ps.Status = 'Running'
ps.trainingPID = outputStr.pid
ps.save()
return HttpResponse(json.dumps({"status":"success","trainingid":str(ps.id),"version":str(ps.Version),"usecaseid":usecaseid}),content_type="application/json")
'''
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resultJsonObj = json.loads(outputStr)
ps.Status = resultJsonObj['status']
if resultJsonObj['status'] == 'SUCCESS':
ps.modelType = resultJsonObj['data']['ModelType']
ps.DeployPath = str(resultJsonObj['data']['deployLocation'])
if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection', 'timeSeriesAnomalyDetection']: #task 11997
ps.ProblemType = 'unsupervised'
else:
ps.ProblemType = 'supervised'
ps.save()
'''
else:
return HttpResponse(json.dumps({"status":"error","msg":'Existing trained model not found'}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"model training exception"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def uploadfile(request):
try:
if 'file' not in request.FILES:
msg = 'No file part in the request'
return HttpResponse(json.dumps({"status":"error","msg":msg}),content_type="application/json")
else:
file = request.FILES['file']
if file.size > 31457280:
msg = 'Upload limit is 30 MB only'
ext = str(file).split('.')[-1]
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
filetimestamp = str(int(time.time()))
file_id = 'AION_' + filetimestamp+'.'+ext
dataFile = os.path.join(DATA_FILE_PATH,file_id)
with open(dataFile, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
destination.close()
return HttpResponse(json.dumps({"status":"success","fileid":file_id}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"File extension not supported"}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"File upload exception"}),content_type="application/json")
def help_text(request,usecaseid,version):
hosturl =request.get_host()
url='http://'+hosturl+'/api/'
msg = """
Request_Type: Post
Content_Type: applicattion/json
For Prediction URL: {url}'predict?usecaseid={usecaseid}&version={version}
For Explanations URL: {url}xplain?usecaseid={usecaseid}&version={version}
For Input Drift URL: {url}monitoring?usecaseid={usecaseid}&version={version}
For Output Drift URL: {url}performance?usecaseid={usecaseid}&version={version}
BODY: Data in json format
""".format(url=url,usecaseid=usecaseid,version=version)
return msg
@csrf_exempt
def apispredict(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'aion_spredict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apipredict(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
#print(request.content_type)
if request.method == 'POST':
if request.content_type in ['application/json','multipart/form-data']:
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data = ''
msg = 'Bad request'
if 'file' not in request.FILES:
data=request.body.decode('utf-8')
else:
file = request.FILES['file']
if file.size > 31457280:
msg = 'Upload limit is 30 MB only'
else:
ext = str(file).split('.')[-1]
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
urlData = file.read()
import io
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
else:
msg = 'Extension not supported'
if data != '':
predict_path = os.path.join(model_path,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":msg}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apiinputdrift(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'aion_ipdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apioutputdrift(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'aion_opdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apixplain(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = (Path(DEPLOY_LOCATION)/usecaseid)/str(version)
if model_path.is_dir():
try:
with open( (model_path/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
is_explainable = True
try:
if not is_explainable:
return HttpResponse(json.dumps({"status":"error","msg":"explain api is not supported when text features are used for training"}),content_type="application/json")
data=request.body.decode('utf-8')
predict_path = model_path/'aion_xai.py'
outputStr = subprocess.check_output([sys.executable,predict_path,'local',data]) #BugId:13304
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
#@api_view(['POST','GET'])
def apifeatures(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'featureslist.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
@csrf_exempt
def apiPatternAnomalySettings(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
data = json.loads(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
filename = os.path.join(model_path,'clickstream.json')
print(filename)
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
return HttpResponse(json.dumps({"status":'Success'}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
#@api_view(['POST'])
@csrf_exempt
def apiPatternAnomalyPredict(request):
import pandas as pd
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
configfilename = os.path.join(model_path,'datadetails.json')
filename = os.path.join(model_path,'clickstream.json')
clusterfilename = os.path.join(model_path,'stateClustering.csv')
probfilename = os.path.join(model_path,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
outputStr = {'status':'SUCCESS','data':{'Anomaly':str(anomaly),'Remarks':str(remarks)}}
return HttpResponse(json.dumps(outputStr),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain") |
urls.py | """mpgWebApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include, re_path
from appfe.modelTraining import views
from appfe.modelTraining import upload_views
from appfe.modelTraining import bc_views
from appfe.modelTraining import mltest_views
from appfe.modelTraining import train_views
from appfe.modelTraining import dg_views
from appfe.modelTraining import settings_views
from appfe.modelTraining import drift_views
from appfe.modelTraining import landing_views
from appfe.modelTraining import mllite_views
from appfe.modelTraining import trustedAI_views
from appfe.modelTraining import llm_views
from appfe.modelTraining import visualizer_views as v
from appfe.modelTraining import prediction_views
from django.urls import path, re_path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('appfe.api.urls')),
path('', views.index, name="index"),
re_path('^$',views.index,name='Homepage'),
re_path('prediction', prediction_views.Prediction, name="Prediction"),
path('edit/<int:id>', views.edit),
path('update/<int:id>', views.update),
path('opentraining/<int:id>/<int:currentVersion>',views.opentraining),
path('opentraininglogs/<int:id>/<int:currentVersion>',landing_views.opentraininglogs),
path('show',views.show,name="show"),
path('ucdetails/<int:id>',views.ucdetails,name='ucdetails'),
path('delete/<int:id>', views.destroy,name='DeleteUseCase'),
path('deleteversion/<int:id>',views.remove_version,name='RemoveVersion'),
path('deletes3Bucket/<str:name>', settings_views.removes3bucket,name='removes3bucket'),
path('deleteGcsBucket/<str:name>', settings_views.removegcsbucket,name='removegcsbucket'),
path('deleteAzureBucket/<str:name>', settings_views.removeazurebucket,name='removeazurebucket'),
path('publish/<int:id>',views.publish),
path('createpackagedocker/<int:id>/<int:version>',views.createpackagedocker),
path('stoptraining',train_views.stoptraining),
path('downloadPackage/<int:id>/<int:version>',views.downloadpackage),
re_path('startmodelservice',views.startmodelservice,name="startmodelservice"),
re_path('stopmodelservice',views.stopmodelservice,name="stopmodelservice"),
path('retrain/<int:id>/<int:currentVersion>', landing_views.retrain),
re_path('computetoAWS',settings_views.computetoAWS,name='computeInfrastructure'),
re_path('computetoLLaMMA7b',settings_views.computetoLLaMMA7b,name='computeInfrastructure'),
re_path('computetoGCPLLaMA13B',settings_views.computetoGCPLLaMA13B,name='computeInfrastructure'),
re_path('help',views.help,name = "help"),
re_path('mlac_userguide',views.mlac_userguide,name = "mlac_userguide"),
path('launchmodel/<int:id>/<int:version>', landing_views.launchmodel),
path('modxplain/<int:id>/<int:version>', landing_views.modxplain),
path('moddrift/<int:id>/<int:version>',landing_views.moddrift),
re_path('ConvertOnnx', mllite_views.ConvertOnnx, name="ConvertOnnx"),
re_path('runtimeutility', mllite_views.runtimeutility, name="runtimeutility"),
re_path('sagepredict', mllite_views.sageprediction, name="sageprediction"),
re_path('mlstyles', views.mlstyles, name="mlstyles"),
re_path('mltrain', views.mltrain, name="mltrain"),
re_path('usecasefilter', views.usecasefilter, name="usecasefilter"),
re_path('mlpredict', views.mlpredict, name="mlpredict"),
re_path('getdataclasses',views.getdataclasses,name="getdataclasses"),
re_path('usecases', views.AIusecases, name="AIusecases"),
re_path('modelkafka',views.modelkafka,name="ModelKafka"),
re_path('AionProblem', views.AionProblem, name="AionProblem"),
re_path('UQTesting', mltest_views.UQTesting, name="UQTesting"),
re_path('maaccommand',views.maaccommand,name='MAAC'),
re_path('GCSbucketAdd',settings_views.GCSbucketAdd,name="gcsbucket"),
re_path('adds3bucket',settings_views.adds3bucket,name="adds3bucket"),
re_path('azurestorageAdd',settings_views.azurestorageAdd,name="azurestorageAdd"),
re_path('features', views.features, name="features"),
re_path('downloadedareport',upload_views.downloadedareport,name="downloadedareport"),
re_path('downloadxplainreport',views.downloadxplainreport,name="downloadxplainreport"),
re_path('downlpredictreport',views.downlpredictreport,name="DownloadPrediction"),
re_path('LoadBasicConfiguration',views.LoadBasicConfiguration,name='LoadBasicConfiguration'),
re_path('LoadAdvanceConfiguration',views.LoadAdvanceConfiguration,name='LoadAdvanceConfiguration'),
re_path('uploaddatafromscript',upload_views.uploaddatafromscript,name='uploaddatafromscript'),
re_path('features', views.features, name="features"),
re_path('uploadDatafromSatandardDataset',upload_views.uploadDatafromSatandardDataset,name="uploadDatafromSatandardDataset"),
re_path('uploadDatafromunsupervisedmodel',views.uploadDatafromunsupervisedmodel,name="uploadDatafromunsupervisedmodel"),
re_path('mltesting',mltest_views.mltesting,name='mltesting'),
re_path('mllite',views.mllite,name="MLLite"),
re_path('settings',settings_views.settings_page,name="settings"),
re_path('openneural',mllite_views.openneural,name="openneural"),
re_path('Tfliteneural',mllite_views.Tfliteneural,name="Tfliteneural"),
re_path('encryptedpackage',views.encryptedpackage,name='encryptedpackage'),
re_path('ABtesting', mltest_views.ABtest, name="ABtesting"),
re_path('uploadedData', upload_views.uploadedData, name='uploadedData'),
# Text Data Labelling using LLM related changes
# --------------------------------------------------------
re_path('uploadedTextData', llm_views.uploadedTextData, name='uploadedTextData'),
re_path('getTextLabel', llm_views.getTextLabel, name='getTextLabel'),
re_path('downloadTextLabelReport',llm_views.downloadTextLabelReport,name="downloadTopicReport"),
# --------------------------------------------------------
# QnA Generator using LLM related changes
# --------------------------------------------------------
re_path('genearateQA', llm_views.genearateQA, name='genearateQA'),
re_path('downloadQnAReport',llm_views.downloadQnAReport,name="downloadQnAReport"),
# --------------------------------------------------------
re_path('advanceconfig', bc_views.savebasicconfig, name='Advance'),
re_path('edaReport',upload_views.EDAReport,name='edareport'),
re_path('readlogfile',views.readlogfile,name="readlogfile"),
re_path('flcommand',views.flcommand,name="flcommand"),
re_path('openmlflow',views.mlflowtracking,name="MLflow"),
re_path('basicconfig',bc_views.basicconfig,name='basicConfig'),
re_path('Advance',views.Advance,name='Advance'),
re_path('uploaddata', views.uploaddata, name='uploaddata'),
re_path('dataupload', views.Dataupload, name='dataupload'),
re_path('trainmodel', train_views.trainmodel, name='next'),
#Sagemaker
re_path('Sagemaker',mllite_views.Sagemaker,name="Sagemaker"),
re_path('batchlearning',views.batchlearning,name="batchlearning"),
# EDA Reports changes
re_path('gotoreport', views.gotoreport, name='report'),
re_path('llmmodelevaluate',train_views.llmmodelevaluate, name='llmmodelevaluate'),
# EDA Visualization changes
re_path('getgraph',views.getgraph,name="getgraph"),
# Fairness Metrics changes
re_path('getmetrics',views.getmetrics,name="getmetrics"),
re_path('getDeepDiveData',views.getDeepDiveData,name="getDeepDiveData"),
# 12686:Data Distribution related Changes
re_path('getDataDistribution',views.getDataDistribution,name="getDataDistribution"),
re_path('licensekey',views.licensekey,name="licensekey"),
# -------------------------------- Graviton-Integration Changes S T A R T --------------------------------
re_path('getuserdata',views.getuserdata,name="getuserdata"),
re_path('getdataservice',views.getdataservice,name="getdataservice"),
# ------------------------------------------------ E N D -------------------------------------------------
re_path('getdataimbalance',views.getdataimbalance,name="getdataimbalance"),
re_path('trainresult',train_views.trainresult,name='trainresult'),
re_path('LoadDataForSingleInstance',views.LoadDataForSingleInstance,name='LoadDataForSingleInstance'),
re_path('PredictForSingleInstance',prediction_views.PredictForSingleInstance,name='PredictForSingleInstance'),
re_path('stateTransitionSettings',views.stateTransitionSettings,name='stateTransitionSettings'),
re_path('instancepredict',views.instancepredict,name='predict'),
re_path('onnxruntime',views.onnxruntime,name='onnxruntime'),
re_path('home',views.Dataupload,name='manage'),
re_path('show',views.show,name="show"),
re_path('delete',views.show,name="delete"),
re_path('inputdrift', landing_views.inputdrift, name='inputdrift'),
re_path('dotextSummarization',views.dotextSummarization,name='textSummarization'),
re_path('outputdrift', views.outputdrift, name='outputdrift'),
re_path('xplain', v.xplain, name='xplain'),
re_path('sensitivity', trustedAI_views.sensitivityAnalysis, name='sensitivity'),
re_path('fairnesmetrics', trustedAI_views.fairnesmetrics, name='fairnesmetrics'),
re_path('handlefairness', trustedAI_views.handlefairness, name='handlefairness'),
re_path('performance', trustedAI_views.performance_metrics, name='performance'),
re_path('uquncertainty', trustedAI_views.uquncertainty, name='uquncertainty'),
re_path('uqtransparency', trustedAI_views.uqtransparency, name='uqtransparency'),
re_path('RLpath',views.RLpath,name='RLpath'),
path('opendetailedlogs/<int:id>/<int:currentVersion>', views.opendetailedlogs, name='logfile'),
path('downloadlogfile/<int:id>/<int:currentVersion>',views.downloadlogfile),
path('openmodelevaluation/<int:id>',views.openmodelevaluation,name='openmodelevaluation'),
re_path('startPublishServices',settings_views.startPublishServices,name="PublishService"),
re_path('startKafka',settings_views.startKafka,name='startKafka'),
re_path('startService',views.startService,name='startService'),
re_path('startTracking',views.startTracking,name="startTracking"),
re_path('Drift', drift_views.Drift, name='Drift'),
re_path('Distribution', drift_views.Distribution, name='Distribution'),
re_path('newfile', views.newfile, name='newfile'),
re_path('Evaluate', drift_views.Evaluate, name='Evaluate'),
re_path('qlearning',views.qlearning,name='qlearning'),
re_path('listfiles',upload_views.listfiles,name='listfiles'),
#url('actionavalanche',views.actionavalanche,name='actionavalanche'),
re_path('sqlAlchemy',upload_views.sqlAlchemy,name='sqlAlchemy'),
re_path('submitquery',upload_views.submitquery,name='submitquery'),
re_path('validatecsv',upload_views.validatecsv,name='validatecsv'),
path('ObjLabelAdd/<int:id>',views.ObjLabelAdd),
path('objectlabel/<int:id>',views.objectlabel),
path('imagelabel/<int:id>',views.imagelabel),
path('ObjLabelRemove/<int:id>',views.ObjLabelRemove),
re_path('objectlabelling',views.objectlabelling,name='objectlabelling'),
re_path('imagelabelling',views.imagelabelling,name='imagelabelling'),
re_path('ObjLabelDiscard',views.ObjLabelDiscard,name='ObjLabelDiscard'),
re_path('ObjLabelNext',views.ObjLabelNext,name='ObjLabelNext'),
re_path('ObjLabelPrev',views.ObjLabelPrev,name="ObjLabelPrev"),
re_path('saveaionconfig',settings_views.saveaionconfig,name='saveaionconfig'),
re_path('savegravitonconfig',settings_views.savegravitonconfig,name='savegravitonconfig'),
re_path('saveopenaiconfig',settings_views.saveopenaiconfig,name="saveopenaiconfig"),
re_path('getvalidateddata',views.getvalidateddata,name="getvalidateddata"),
re_path('updateawsconfig',settings_views.amazonec2settings,name="amazonec2settings"),
re_path('updategcpconfig',settings_views.gcpcomputesettings,name="gcpcomputesettings"),
re_path('localsetings',views.localsetings,name="localsetings"),
re_path('ImgLabelNext',views.ImgLabelNext,name='ImgLabelNext'),
re_path('objectlabeldone',views.objectlabeldone,name='ObjectLabeling'),
re_path(r'^get_table_list', upload_views.get_table_list, name='get_table_list'),
re_path(r'^getdatasetname', views.getdatasetname, name='getdatasetname'),
re_path(r'^get_tables_fields_list', upload_views.get_tables_fields_list, name='get_tables_fields_list'),
re_path(r'^validate_query', upload_views.validate_query, name='validate_query'),
re_path(r'^trigger_DAG', views.trigger_DAG, name = 'trigger_DAG'),
# The home page
path('dataoperations', views.dataoperations, name='dataoperations'),
path('summarization',views.summarization,name='summarization'),
path('datalabel', views.datalabel, name='datalabel'),
path('upload_and_read_file_data', views.upload_and_read_file_data, name='upload_and_read_file_data'),
path('apply_rule', views.apply_rule, name='apply_rule'),
path('apply_rule_ver2', views.apply_rule_ver2, name='apply_rule_ver2'),
path('download_result_dataset', views.download_result_dataset, name='download_result_dataset'),
path('get_sample_result_of_individual_rule', views.get_sample_result_of_individual_rule,
name='get_sample_result_of_individual_rule'),
path('get_sample_result_of_individual_rule_ver2', views.get_sample_result_of_individual_rule_ver2,
name='get_sample_result_of_individual_rule_ver2'),
path('upload_and_read_test_data', views.upload_and_read_test_data, name='upload_and_read_test_data'),
path('get_label_and_weightage', views.get_label_and_weightage, name='get_label_and_weightage'),
path('datagenrate', dg_views.datagenrate, name='datagenrate'),
path('generateconfig', dg_views.generateconfig, name='generateconfig'),
path('StationarySeasonalityTest', views.StationarySeasonalityTest, name='StationarySeasonalityTest'),
path('modelcompare', views.modelcompare, name='modelcompare'),
path('textsummarization', views.textsummarization, name='textsummarization'),
path('azureOpenAiDavinci', llm_views.azureOpenAiDavinci, name='azureOpenAiDavinci'),
path('azureOpenAiDavinciSumarization', llm_views.azureOpenAiDavinciSumarization, name='azureOpenAiDavinciSumarization'),
# LLM Testing
path('llmtesting', views.llmtesting, name='llmtesting'),
path('llmtestingresult', views.llmtestingresult, name='llmtestingresult'),
re_path('llmtestreport',views.llmtestreport,name="llmtestreport"),
# Code Clone Detection
path('codeclonedetectionresult', views.codeclonedetectionresult, name='codeclonedetectionresult'),
re_path('codeclonereport',views.codeclonereport,name="codeclonereport"),
re_path('evaluateprompt',views.evaluatepromptmetrics,name="evaluatepromptmetrics"),
path('libraries', views.libraries, name='libraries'), #To display libraries
]
#df=pd.read_csv("C:\Project\Analytics\Deployment\germancredit_9\germancreditdata.csv")
#
#bool_cols = [col for col in df if np.isin(df[col].dropna().unique(), [0, 1]).all()]
#
#bool_cols
|
error_handler.py | from django.http import HttpResponse
from django.conf import settings
import traceback
class ErrorHandlerMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_exception(self, request, exception):
if not settings.DEBUG:
if exception:
# Format your message here
message = "**{url}**\n\n{error}\n\n````{tb}````".format(
url=request.build_absolute_uri(),
error=repr(exception),
tb=traceback.format_exc()
)
# Do now whatever with this message
# e.g. requests.post(<slack channel/teams channel>, data=message)
return HttpResponse("Error processing the request.", status=500) |
wsgi.py | """
WSGI config for ux project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ux.settings')
application = get_wsgi_application()
|
asgi.py | """
ASGI config for ux project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ux.settings')
application = get_asgi_application()
|
settings.py | """
Django settings for mpgWebApp project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from os.path import expanduser
import platform
from appbe.dataPath import DATA_DIR
#from cloghandler import ConcurrentRotatingFileHandler
sql_database_path = os.path.join(DATA_DIR,'sqlite')
if os.path.isdir(sql_database_path) == False:
os.makedirs(sql_database_path)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
DATA_UPLOAD_MAX_MEMORY_SIZE = None
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath()))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y8d*&k0jv4c*zu^ykqz$=yyv@(lcmz495uj^()hthjs=x&&g0y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appfe.modelTraining',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'appfe.ux.error_handler.ErrorHandlerMiddleware'
]
ROOT_URLCONF = 'appfe.ux.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'appfe.ux.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(sql_database_path, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,'static')
|
forms.py | from django import forms
from modelTraining.models import usecasedetails
import os
class usecasedetailsForm(forms.ModelForm):
class Meta:
model = usecasedetails
fields = "__all__"
from modelTraining.models import Existusecases
class ExistusecasesForm(forms.ModelForm):
class Meta:
model = Existusecases
fields = "__all__" |
forms.py | from django import forms
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
class usecasedetailsForm(forms.ModelForm):
class Meta:
model = usecasedetails
fields = "__all__"
class ExistusecasesForm(forms.ModelForm):
class Meta:
model = Existusecases
fields = "__all__" |
views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import time
from django.template import loader
from django import template
from django.views.decorators.csrf import csrf_exempt
from os import walk
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from appbe import help_Text as ht
import random
from appbe import service_url
from appbe import compute
from appbe import installPackage
from appbe.pages import getusercasestatus
from appbe import images_analysis as ia
from django.db.models import Max, F
from appbe.aion_config import settings
from appbe.aion_config import get_graviton_data
from appbe.aion_config import get_llm_data
from appbe.aion_config import get_edafeatures
from appbe.training import calculate_total_activities
from appbe.training import calculate_total_interations
from appbe.training import checkModelUnderTraining
from appbe.training import checkversionrunningstatus
from appbe.training import changeModelStatus
from appbe.training import getStatusCount
from appbe.training import getModelStatus
from appbe.training import check_unsupported_col
from appbe.publish import chech_publish_info
from appbe.publish import check_input_data
import uuid
import numpy as np
from appbe.aion_config import kafka_setting
from appbe.aion_config import running_setting
from appbe.validatecsv import csv_validator
from appbe.aion_config import addKafkaModel
from appbe.aion_config import getrunningstatus
from appbe.aion_config import aion_service
from appbe.pages import getversion
from appbe.s3bucketsDB import get_s3_bucket
from appbe.s3bucketsDB import read_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.gcsbucketsDB import read_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
from appbe.azureStorageDB import read_azureStorage
from appbe.dataIngestion import getcommonfields
from appbe.dataIngestion import ingestDataFromFile
from appbe.dataIngestion import delimitedsetting
import pdfplumber
from docx import Document
from appbe.trainresult import ParseResults
import pandas as pd
import numpy as np
import re
import xml.etree.ElementTree as ET
import json
import glob
from appbe import dataPath
from pathlib import Path
import urllib, base64
import os
from os.path import expanduser
import platform
import time
import sys
import csv
import subprocess
import base64
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.forms import usecasedetailsForm
from appfe.modelTraining.models import Existusecases
from django.shortcuts import get_list_or_404, get_object_or_404
from pandas import json_normalize
from django.contrib.sessions.models import Session
import logging
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from utils.file_ops import read_df_compressed
from appbe.dataPath import LOG_LOCATION
from appbe.log_ut import logg
LOG_FILE_NAME = 'model_training_logs.log'
LOG_FOLDER = 'log'
if os.path.isdir(DATA_FILE_PATH) == False:
os.makedirs(DATA_FILE_PATH)
if os.path.isdir(CONFIG_FILE_PATH) == False:
os.makedirs(CONFIG_FILE_PATH)
if os.path.isdir(DEPLOY_LOCATION) == False:
os.makedirs(DEPLOY_LOCATION)
# EION_SCRIPT_PATH = 'C:\\Project\\Analytics\\eion\\eion\\eion.py'
PYTHON_PATH = 'python.exe'
AION_VERSION = getversion()
usecasetab = settings()
#AION_VERSION
# MainPage
logg_obj = logg(LOG_LOCATION)
log = logg_obj.create_log(AION_VERSION)
def index(request):
from appbe.pages import index_page
status,context,action = index_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
def localsetings(request):
from appbe.pages import get_usecase_page
try:
compute.updatelocalsetings(request)
time.sleep(2)
request.session['IsRetraining'] = 'No'
#print(1)
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
#print(2)
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update localsetings','version':AION_VERSION})
def computetoAWS(request):
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings(request)
time.sleep(2)
#print(1)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def licensekey(request):
try:
command = request.POST['licensesubmit']
if command.lower() == 'generatelicense':
userkey = request.POST['userkey']
from records import pushrecords
msg = pushrecords.generateLicenseKey(userkey)
context = {'msg':msg}
context['selected'] = 'License'
print(context)
return render(request,'licenseexpired.html',context)
else:
licensekey = request.POST['licensekey']
from records import pushrecords
pushrecords.updateLicense(licensekey)
from appbe.pages import get_usecase_page
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fails in loading the page','version':AION_VERSION})
def help(request):
context = {'selected': 'userguide', 'usecasetab': usecasetab}
context['version'] = AION_VERSION
return render(request, 'help.html', context)
def mlac_userguide(request):
context = {'selected': 'mlac_userguide', 'usecasetab': usecasetab}
context['version'] = AION_VERSION
return render(request, 'help.html', context)
def AionProblem(request):
if request.method == "POST":
AionProblem = request.POST["Algorithm"]
request.session["AionProblem"] = AionProblem
return HttpResponse(AionProblem)
def features(request):
if request.method == "POST":
typedata = request.POST['datatype']
if typedata == "datapath":
datapath = request.POST['datap']
if(os.path.isfile(datapath) and os.path.isfile(datapath)):
df = pd.read_csv(datapath)
modelfeature = df.columns.tolist()
modelfeatures = json.dumps(modelfeature)
return HttpResponse(modelfeatures)
else:
return HttpResponse(json.dumps("Data path does not exist "), content_type="application/error")
elif typedata == "scriptpath":
scriptPath = request.POST['scriptp']
#print(scriptPath)
f = open(scriptPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
df = ldict['dfpy']
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False)
modelfeature = df.columns.tolist()
output = {'features':modelfeature,'datafile':dataFile}
output = json.dumps(output)
# return render(request,'prediction.html',{'modelfeatures':modelfeatures,'test':'test'})
return HttpResponse(output)
def mllite(request):
from appbe.pages import mllite_page
context = mllite_page(request)
context['version'] = AION_VERSION
return render(request, 'ConvertOnnx.html',context)
def usecasefilter(request):
from appbe import mlstyles as mls
selectedoption = request.GET["selectedoption"]
context = mls.Aiusecases(request,selectedoption)
context['listtype'] = selectedoption
context['version'] = AION_VERSION
return render(request, 'aiUseCases.html',context)
def AIusecases(request):
from appbe import mlstyles as mls
context = mls.Aiusecases(request,'Implemented')
context['listtype'] = 'Implemented'
context['version'] = AION_VERSION
return render(request, 'aiUseCases.html',context)
def mlstyles(request):
from appbe import mlstyles as mls
context = mls.mlstyles(request)
context['selected'] = 'DataOperations'
context['version'] = AION_VERSION
return render(request, 'mlstyles.html',context)
def mlpredict(request):
from appbe import mlstyles as mls
context, button_flag = mls.mlpredict(request)
context['selected'] = 'DataOperations'
context['version'] = AION_VERSION
if button_flag in ['prediction','predictsingle']:
return render(request, 'mlstyles.html', context)
else:
return context
def mltrain(request):
from appbe import mlstyles as mls
context, button_flag = mls.mltrain(request)
context['selected'] = 'DataOperations'
context['version'] = AION_VERSION
if button_flag == 'training':
return render(request, 'mlstyles.html', context)
else:
return context
def getdatasetname(request):
try:
from appbe.dataPath import DATA_DIR
from appbe.sqliteUtility import sqlite_db
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
temp_data = sqlite_obj.read_data('dataingest')
data = []
for x in temp_data:
data_dict = {}
data_dict['datasetname'] = x[1]
data.append(data_dict)
except Exception as e:
print(e)
data = []
return HttpResponse(json.dumps(data))
def outputdrift(request):
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
if ModelStatus != 'SUCCESS':
context = {'error': 'Please train the model first or launch an existing trained model', 'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure}
else:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem = problem_type
ser_url = service_url.read_performance_service_url_params()
iterName = request.session['UseCaseName'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion)
if problem.lower() not in ['classification','regression']:
context = {'error': 'Output drift only available for classification and regression problems type', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,
'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
else:
context = {'SUCCESS': 'Model is trained', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'usecasetab':usecasetab,
'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'ser_url':ser_url,'trainingDataLocation':request.session['datalocation']}
return render(request, 'outputdrif.html', context)
except:
return render(request, 'outputdrif.html', {'error':'Fail to do outputdrift analysis','usecasetab':usecasetab})
# -------------------------------- Graviton-Integration Changes S T A R T --------------------------------
def getuserdata(request):
import requests
data = []
try:
graviton_url,graviton_userid = get_graviton_data()
gravitonURL = graviton_url
gravitonUserId = graviton_userid
# url = 'https://xenius.azurewebsites.net/api/dataservices?userid='+
url = gravitonURL + 'dataservices?userid=' + gravitonUserId
print(url)
response = requests.get(url)
statuscode = response.status_code
print(statuscode)
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['result']
print(data)
except Exception as e:
print(e)
data = []
data_json = json.dumps(data)
return HttpResponse(data_json)
def getdataservice(request):
import requests
data = []
dataServiceId = request.GET.get('DataServiceId')
try:
graviton_url,graviton_userid = get_graviton_data()
gravitonURL = graviton_url
gravitonUserId = graviton_userid
# url = 'https://xenius.azurewebsites.net/api/getmetadata?userid=1&dataserviceid='+str(dataServiceId)
url = gravitonURL + 'getmetadata?userid=' + gravitonUserId +'&dataserviceid='+str(dataServiceId)
response = requests.get(url)
statuscode = response.status_code
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['result']
except Exception as e:
print(e)
data = []
data_json = json.dumps(data)
return HttpResponse(data_json)
# ------------------------------------------------ E N D -------------------------------------------------
def getvalidateddata(request):
import requests
computeinfrastructure = compute.readComputeConfig()
taskid = request.POST.get('elixirdatataskid')
try:
url = 'http://'+elixir_ip+':'+elixir_port+'/api/get_validation_result?task_id='+str(taskid)
#print(url)
response = requests.get(url)
statuscode = response.status_code
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['Result']
else:
data = []
except Exception as e:
print(e)
data = []
try:
df = pd.DataFrame.from_dict(data)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False,'computeinfrastructure':computeinfrastructure}
return render(request, 'upload.html', context)
except:
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,"usecaseerror":"Error in validating data!"}
return render(request, 'upload.html', context)
def trigger_DAG(request):
from appfe.modelTraining import AirflowLib
response = AirflowLib.TriggerDag("example_complex", "")
return HttpResponse(response, content_type="application/json")
def Airflow(request):
try:
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'selected': 'monitoring', 'airflow': True}
return render(request, 'upload.html', context)
except:
return render(request, 'upload.html', {'error':'interrupted error'})
def Results(request):
return render(request, 'modeltraning.html', context)
def uploadnext(request):
return render(request, 'basicconfig.html', {'selected': 'modeltraning','version':AION_VERSION})
def basicconfignext(request):
from appbe import advance_Config as ac
context = ac.basicconfignex(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
return render(request, 'advancedconfig.html', context)
def updateRunConfig(_trainingTime, _filesize, _features, _modelname, _problem_type):
returnVal = 'Success'
try:
import psutil
memInGB = round(psutil.virtual_memory().total / (1024 * 1024 * 1024))
_resource = str(memInGB) + " GB"
_time = str(_trainingTime) + " Mins"
new_record = {
"sampleSize": _filesize,
"features": _features,
"algorithm": _modelname,
"machineResource": _resource,
"trainingTime": _time,
"problemtype": _problem_type
}
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training_runs.json')
if(os.path.isfile(configfilepath)):
with open(configfilepath,'r+') as file:
# load existing data into a dict.
file_data = json.load(file)
# join new_record with file_data inside runs
file_data["runs"].append(new_record)
# sets file's current position at offset.
file.seek(0)
# convert back to json.
json.dump(file_data, file, indent = 4)
except Exception as inst:
returnVal = 'Fail'
pass
return returnVal
def objectlabeldone(request):
try:
computeinfrastructure = compute.readComputeConfig()
request.session['datatype'] = 'Object'
request.session['csvfullpath'] = request.session['objectLabelFileName']
df = pd.read_csv(request.session['csvfullpath'])
df1 = df.groupby(['Label']).agg({"File":{"count","nunique"}})
df1.columns = df1.columns.droplevel(0)
df1 = df1.reset_index()
class_count = []
for i in range(len(df1)):
dct = {}
dct['Label'] = df1.loc[i, "Label"]
dct['TotalAnnotations'] = df1.loc[i, "count"]
dct['Images'] = df1.loc[i, "nunique"]
class_count.append(dct)
#orxml_file in glob.glob(request.session['datalocation'] + '/*.xml'):
status_msg = 'Successfully Done'
wordcloudpic = ''
bargraph = ''
firstFile = pd.DataFrame()
#print(class_count)
context = {'tab': 'upload','firstFile':firstFile,'dataa': class_count,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure}
return render(request, 'upload.html', context)
except:
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,"usecaseerror":"Error in labeling object!"}
return render(request, 'upload.html', context)
def ObjLabelDiscard(request):
return redirect(reverse('objectlabelling'))
def ObjLabelAdd(request,id):
angle = request.GET.get("angle")
gid = request.GET.get("gid")
xMin = min(int(request.GET.get("xMin")),int(request.GET.get("xMax")))
xMax =max(int(request.GET.get("xMin")),int(request.GET.get("xMax")))
yMin = min(int(request.GET.get("yMin")),int(request.GET.get("yMax")))
yMax = max(int(request.GET.get("yMin")),int(request.GET.get("yMax")))
height = request.GET.get("height")
width = request.GET.get("width")
#print("=====> "+str(angle) +" "+ str(gid) +" "+ str(xMin) + " " + str(xMax) + " " +str(yMin) +" "+ str(yMax)+" "+str(width))
# with open("out.csv", 'w') as f:
# # writer = csv.writer(f)
# # writer.writerow([angle, id, gid, xMin, xMax, yMin, yMax])
# f.write(angle +" "+ gid +" "+ xMin + " " + xMax + " " +yMin +" "+ yMax)
labels = request.session['labels']
labels.append({"id":id, "name":"", "xMin":xMin, "xMax":xMax, "yMin":yMin, "yMax":yMax, "height":height,"width":width, "angle":angle})
request.session['labels'] = labels
return redirect(reverse('objectlabelling'))
def imageeda(request):
try:
computeinfrastructure = compute.readComputeConfig()
request.session['datatype'] = 'Image'
filename = request.session['csvfullpath']
os.remove(filename)
request.session['csvfullpath'] = request.session['LabelFileName']
df = pd.read_csv(request.session['csvfullpath'])
eda_result = ''
duplicate_img = ''
color_plt = ''
df2 = df.groupby('Label', as_index=False)['File'].count().reset_index()
df_json = df2.to_json(orient="records")
df_json = json.loads(df_json)
cfig = go.Figure()
xaxis_data = df2['Label'].tolist()
yaxis_data = df2['File'].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data))
cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520)
firstFile = df.groupby('Label').first().reset_index()
#firstFile['FilePath'] = firstFile['File'].apply(lambda x: os.path.join(request.session['datalocation'], x))
images = []
qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation'])
for i in range(len(firstFile)):
filename = firstFile.loc[i, "File"]
filePath = os.path.join(request.session['datalocation'], filename)
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
firstFile.loc[i, "Image"] = image_64
firstFile.loc[i, "Quality"] = qualityscore[filename]
status_msg = 'Successfully Done'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'upload', 'featuregraph': bargraph,'dataa': df_json, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'validcsv': True,'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt, 'firstFile': firstFile,
'status_msg': status_msg,'computeinfrastructure':computeinfrastructure}
return(context)
except:
context={'error':'Fail to load Eda result'}
return (context)
def imagelabelling(request):
if (request.session['currentIndex']) == (request.session['endIndex']+1):
try:
context = imageeda(request)
return render(request, 'upload.html', context)
except:
context = {'error': 'Image labeling error'}
return render(request, 'upload.html', context)
else:
try:
df = pd.read_csv(request.session['csvfullpath'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'tab': 'upload','id':request.session['currentIndex'],'labels': request.session['labels'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df)}
return render(request, 'imagelabelling.html', context)
except:
context = {'error': 'Image labeling error'}
return render(request, 'upload.html', context)
def objecteda(request):
request.session['datatype'] = 'Object'
filename = request.session['csvfullpath']
try:
os.remove(filename)
except:
pass
try:
request.session['csvfullpath'] = request.session['LabelFileName']
df = pd.read_csv(request.session['csvfullpath'])
df1 = df.groupby(['Label']).agg({"File":{"count","nunique"}})
df1.columns = df1.columns.droplevel(0)
df1 = df1.reset_index()
class_count = []
for i in range(len(df1)):
dct = {}
dct['Label'] = df1.loc[i, "Label"]
dct['TotalAnnotations'] = df1.loc[i, "count"]
dct['Images'] = df1.loc[i, "nunique"]
class_count.append(dct)
#orxml_file in glob.glob(request.session['datalocation'] + '/*.xml'):
status_msg = 'Successfully Done'
wordcloudpic = ''
bargraph = ''
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'dataa': class_count,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True}
return(context)
except:
context={'tab': 'upload','error':'Fail to load Eda result'}
return(context)
def objectlabelling(request):
if (request.session['currentIndex']) == (request.session['endIndex']+1):
try:
context = objecteda(request)
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except:
return render(request, 'upload.html', {'error':'objectlabelling error','version':AION_VERSION})
else:
try:
df = pd.read_csv(request.session['csvfullpath'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
bounds = []
context = {'tab': 'upload','bounds':bounds,'labels': request.session['labels'],'directory':request.session['datalocation'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'filelist':df,'selectedfile':df["File"].iloc[request.session['currentIndex']]}
context['version'] = AION_VERSION
return render(request, 'objectlabelling.html',context)
except:
return render(request, 'objectlabelling.html',{'tab': 'upload','error':'Objectlabelling Error','version':AION_VERSION})
def imagelabel(request,id):
request.session['labels'] = request.GET.get("name")
return redirect(reverse('imagelabelling'))
def objectlabel(request,id):
name = request.GET.get("name")
labels = request.session['labels']
labels[int(id) - 1]["name"] = name
request.session['labels'] = labels
return redirect(reverse('objectlabelling'))
def ObjLabelRemove(request,id):
index = int(id) - 1
labels = request.session['labels']
del labels[index]
for label in labels[index:]:
label["id"] = str(int(label["id"]) - 1)
request.session['labels'] = labels
return redirect(reverse('objectlabelling'))
def ImgLabelNext(request):
df = pd.read_csv(request.session['csvfullpath'])
filePath = df["File"].iloc[request.session['currentIndex']]
if request.session['labels'] != '':
dataFile = request.session['LabelFileName']
#print(dataFile)
with open(dataFile,'a') as f:
f.write(filePath + "," +
request.session['labels'] + "\n")
f.close()
request.session['currentIndex'] = request.session['currentIndex']+1
request.session['labels'] = ''
return redirect(reverse('imagelabelling'))
def ObjLabelPrev(request):
df = pd.read_csv(request.session['csvfullpath'])
imagePath = df["File"].iloc[request.session['currentIndex']]
request.session['currentIndex'] = request.session['currentIndex'] - 1
process_marked_area_on_image(imagePath,request)
return redirect(reverse('objectlabelling'))
def remove_labelling_from_csv(imagePath,request):
dataFile = request.session['LabelFileName']
df = pd.read_csv(dataFile)
if not df.empty:
if imagePath in df.values:
df = df.set_index("File")
df = df.drop(imagePath, axis=0)
df.to_csv(dataFile, index=True)
def process_marked_area_on_image(imagePath,request):
df = pd.read_csv(request.session['csvfullpath'])
dataFile = request.session['LabelFileName']
remove_labelling_from_csv(imagePath,request)
write_coordinates_and_label_to_csv(imagePath,request)
if request.session['currentIndex'] < len(df):
image = df["File"].iloc[request.session['currentIndex']]
request.session['labels'] = []
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
return True
def write_coordinates_and_label_to_csv(imagePath,request):
dataFile = request.session['LabelFileName']
with open(dataFile, 'a') as f:
for label in request.session['labels']:
f.write(imagePath + "," +
str(round(float(label["id"]))) + "," +
str(label["angle"]) + "," +
str(round(float(label["xMin"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "," +
str(round(float(label["height"]))) + "," +
str(round(float(label["width"]))) + "," +
label["name"] + "\n")
f.close()
def ObjLabelSelect(request):
selectedimage=request.GET.get('file')
df = pd.read_csv(request.session['csvfullpath'])
filePath = df["File"].iloc[request.session['currentIndex']]
remove_labelling_from_csv(filePath,request)
dataFile = request.session['LabelFileName']
with open(dataFile,'a') as f:
for label in request.session['labels']:
f.write(filePath + "," +
str(round(float(label["id"]))) + "," +
str(label["angle"]) + "," +
str(round(float(label["xMin"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "," +
str(round(float(label["height"]))) + "," +
str(round(float(label["width"]))) + "," +
label["name"] + "\n")
f.close()
currentIndex = 0
for index,row in df.iterrows():
#print(row['File'])
if row['File'] == selectedimage:
break
else:
currentIndex = currentIndex+1
request.session['currentIndex'] = currentIndex
if request.session['currentIndex'] < len(df):
image = df["File"].iloc[request.session['currentIndex']]
request.session['labels'] = []
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
return redirect(reverse('objectlabelling'))
def ObjLabelNext(request):
df = pd.read_csv(request.session['csvfullpath'])
filePath = df["File"].iloc[request.session['currentIndex']]
remove_labelling_from_csv(filePath,request)
dataFile = request.session['LabelFileName']
with open(dataFile,'a') as f:
for label in request.session['labels']:
f.write(filePath + "," +
str(round(float(label["id"]))) + "," +
str(label["angle"]) + "," +
str(round(float(label["xMin"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "," +
str(round(float(label["height"]))) + "," +
str(round(float(label["width"]))) + "," +
label["name"] + "\n")
f.close()
request.session['currentIndex'] = request.session['currentIndex']+1
if request.session['currentIndex'] < len(df):
image = df["File"].iloc[request.session['currentIndex']]
request.session['labels'] = []
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
return redirect(reverse('objectlabelling'))
def encryptedpackage(request):
from appbe.encryptedPackage import encrptpackage_command
from appbe.encryptedPackage import download_sclient
context = encrptpackage_command(request,Existusecases,usecasedetails)
context['version'] = AION_VERSION
try:
return download_sclient(request,context) #Task 9981
except Exception as e:
print(e)
return render(request, 'usecases.html', context)
def StationarySeasonalityTest(request):
from appbe.stationarity_seasonality_check import StationarySeasonalityTest as sst
datapath = request.GET.get('datapath')
datetimefeature = request.GET.get('datefeature')
featurename = request.GET.get('targetfeature')
seasonality_status = request.GET.get('seasonality_status')
stationarity_status = request.GET.get('stationarity_status')
df=pd.read_csv(datapath)
ss_obj=sst(df,featurename,datetimefeature)
result_dict=ss_obj.analysis(seasonality_status,stationarity_status)
return HttpResponse(json.dumps(result_dict), content_type="application/json")
def dataoverframe(df):
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
gfsg = GenericFeatureStatisticsGenerator()
proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': df}])
protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8")
return protostr
def getimpfeatures(dataFile, numberoffeatures):
imp_features = []
if numberoffeatures > 20:
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile, optimize=1)
pca_map = eda_obj.getPCATop10Features()
imp_features = pca_map.index.values.tolist()
return imp_features
def uploaddata(request):
from appbe import exploratory_Analysis as ea
from appbe.aion_config import eda_setting
# context={'test':'test'}
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
try:
if selected_use_case == 'Not Defined':
context = {'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'tab': 'tabconfigure',
'usecaseerror': 'Please create a new use case for training the model or select an existing use case for retraining', 'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage()
,'usecasetab':usecasetab,'version':AION_VERSION}
return render(request, 'upload.html', context)
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
ModelStatus = 'Not Trained'
if request.session['finalstate'] > 0:
if request.session['datatype'] in ['Video', 'Image','Document','Object']:
folderLocation = str(request.session['datalocation'])
dataFile = os.path.join(folderLocation, request.session['csvfullpath'])
df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
if df['Label'].isnull().sum() > 0:
if request.session['datatype'] == 'Document':
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDf = pd.DataFrame.from_dict(dataDict)
tcolumns=['text']
wordcloudpic,df_text = ea.getWordCloud(dataDf,tcolumns)
status_msg = 'Successfully Done'
request.session['currentstate'] = 0
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'validcsv': True,'singletextdetails':wordcloudpic,'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage()
,'usecasetab':usecasetab,'version':AION_VERSION}
return render(request, 'upload.html', context)
eda_result = ''
duplicate_img = ''
color_plt = ''
df2 = df.groupby('Label', as_index=False)['File'].count().reset_index()
df_json = df2.to_json(orient="records")
df_json = json.loads(df_json)
cfig = go.Figure()
xaxis_data = df2['Label'].tolist()
yaxis_data = df2['File'].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data))
cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520)
firstFile = df.groupby('Label').first().reset_index()
images = []
if request.session['datatype'] == 'Image':
qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation'])
for i in range(len(firstFile)):
filename = firstFile.loc[i, "File"]
filePath = os.path.join(request.session['datalocation'], filename)
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
firstFile.loc[i, "Image"] = image_64
firstFile.loc[i, "Quality"] = qualityscore[filename]
elif request.session['datatype'] == 'Document':
dataDrift = ''
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDict["Label"].append(df.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
wordcloudpic = ea.getCategoryWordCloud(dataDf)
status_msg = 'Successfully Done'
context = {'tab': 'upload','dataa': df_json,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket()
,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request, 'upload.html', context)
status_msg = 'Successfully Done'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
context = {'tab': 'upload', 'featuregraph': bargraph, 'validcsv': True, 'firstFile': firstFile,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt,'azurestorage':get_azureStorage(),
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,
'usecasetab':usecasetab,'version':AION_VERSION
}
return render(request, 'upload.html', context)
elif request.session['datatype'].lower() in ['llm_document', 'llm_code']:
request.session['currentstate'] = 0
dataFile = request.session['csvfullpath']
df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
filesCount = 0
filesSize = 0
files = []
for index, row in df.iterrows():
filename = row['File']
files.append(filename)
filesCount = filesCount + 1
get_size = os.path.getsize(filename)
filesSize = round(filesSize + get_size, 1)
if filesSize > 1048576:
size = round((filesSize / (1024 * 1024)), 1)
filesSize = str(size) + ' M'
elif filesSize > 1024:
size = round((filesSize /1024), 1)
filesSize = str(size) + ' K'
else:
filesSize = str(filesSize) + ' B'
files = pd.DataFrame(files, columns=['File'])
files.index = range(1, len(files) + 1)
files.reset_index(level=0, inplace=True)
files = files.to_json(orient="records")
files = json.loads(files)
from appbe.prediction import get_instance
hypervisor, instanceid,region,image = get_instance(selected_use_case + '_' + str(ModelVersion))
if hypervisor != '':
computeinfrastructure['computeInfrastructure'] = hypervisor
else:
computeinfrastructure['computeInfrastructure'] = 'AWS'
context = {'tab': 'upload',"selected_use_case":selected_use_case,"selectedPath":request.session['datalocation'],"selectedfile":request.session['fileExtension'],'csvgenerated': True,'filesCount':filesCount,'filesSize':filesSize,'files':files,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'azurestorage':get_azureStorage(),
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'datatype':request.session['datatype'],
'usecasetab':usecasetab,'version':AION_VERSION,"selectedfile":request.session['fileExtension'],"selectedPath":request.session['datalocation']
}
return render(request, 'upload.html', context)
else:
dataFile = str(request.session['datalocation'])
check_df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
check_df.rename(columns=lambda x: x.strip(), inplace=True)
featuresList = check_df.columns.tolist()
numberoffeatures = len(featuresList)
imp_features = getimpfeatures(dataFile, numberoffeatures)
# check_df = pd.read_csv(dataFile)
# check_df.rename(columns=lambda x: x.strip(), inplace=True)
# ----------------------------
# EDA Performance change
# ----------------------------
sample_size = int(eda_setting())
samplePercentage = 100
samplePercentval = 0
showRecommended = False
#dflength = len(eda_obj.getdata())
dflength = len(check_df)
if dflength > sample_size:
samplePercentage = round(float((sample_size/dflength) * 100),2)
samplePercentval = samplePercentage / 100
showRecommended = True
# ----------------------------
# df_top = df.head(10)
df_top = check_df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = ''
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
# EDA Subsampling changes
context = {'range':range(1,101),'samplePercentage':samplePercentage,'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList, 'selected_use_case': selected_use_case,'data': df_json,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'imp_features':imp_features,'numberoffeatures':numberoffeatures,
'version':AION_VERSION,
'selected': 'modeltraning','exploratory':False,'computeinfrastructure':computeinfrastructure}
else:
request.session['uploaddone'] = False
request.session['currentstate'] = 0
request.session['finalstate'] = 0
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),'clusteringModels':clusteringModels,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure
}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(e)
return render(request, 'upload.html', {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Fail to upload Data','usecasetab':usecasetab,'version':AION_VERSION})
def mlflowtracking(request):
import requests
response = requests.get("http://localhost:5000/")
#response = requests.get(url)
statuscode = response.status_code
data = []
context = {'statuscode':statuscode}
context['version'] = AION_VERSION
return render(request, 'openmlflow.html', context)
def readlogfile(request):
file_path = request.session['logfilepath']
try:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+")
configSettingsData = f.read()
configSettings = json.loads(configSettingsData)
f.close()
if os.path.exists(file_path):
my_file = open(file_path, 'r',encoding="utf-8")
file_content = my_file.read()
my_file.close()
matched_lines = [line.replace('Status:-', '') for line in file_content.split('\n') if "Status:-" in line]
matched_status_lines = matched_lines[::-1]
if len(matched_status_lines) > 0:
no_lines = len(matched_lines)
if 'noflines' not in request.session:
request.session['noflines'] = 0
request.session['noflines'] = request.session['noflines'] + 1
if request.session['ModelStatus'] != 'SUCCESS':
numberoflines = request.session['noflines']
if numberoflines > no_lines:
numberoflines = no_lines
request.session['noflines'] = no_lines
matched_lines = matched_lines[0:numberoflines]
matched_status_lines = matched_status_lines[0]
output = getStatusCount(matched_lines,request.session['total_steps'])
matched_status_lines = matched_status_lines.split('...')
matched_status_lines = matched_status_lines[1]
output2=[]
output2.append(matched_status_lines)
from appbe import leaderboard
import pandas
result = leaderboard.get_leaderboard(file_content)
if result.empty==False:
result = result.to_html(classes='table',col_space='100px', index=False)
else:
result = 'Leaderboard is not available'
data_details = {'status':output2,'logs':output,'log_file':file_content,'leaderboard': result,'trainingstatus':request.session['ModelStatus']}
return HttpResponse(json.dumps(data_details), content_type="application/json")
else:
matched_lines = []
matched_lines.append('Initializing Training Engine')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
else:
stepsdone = 0
matched_lines = []
if request.session['ModelStatus'] == 'Running':
matched_lines.append('Initializing Training Engine')
else:
matched_lines.append('Not Trained')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
matched_lines = []
if request.session['ModelStatus'] == 'Running':
stepsdone = 0
matched_lines.append('Initializing Training Engine')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
else:
matched_lines.append('Not Trained')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines,'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
# EDA Visualization changes
# ----------------------------
def getgraph(request):
from appbe import exploratory_Analysis as ea
output = ea.get_edaGraph(request)
return HttpResponse(output)
# ----------------------------
# --- 12686:Data Distribution related Changes S T A R T ---
def getDataDistribution(request):
from appbe import exploratory_Analysis as ea
output = ea.get_DataDistribution(request)
return HttpResponse(output)
# ---------------------- E N D ----------------------
def getDeepDiveData(request):
from appbe import exploratory_Analysis as ea
output = ea.get_DeepDiveData(request)
return HttpResponse(output)
# Fairness Metrics changes
# ----------------------------
def getmetrics(request):
from appbe import exploratory_Analysis as ea
output = ea.get_fairmetrics(request)
return HttpResponse(output)
# ----------------------------
def getdataimbalance(request):
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8')
targetFeature = configSettingsJson['basic']['targetFeature']
df1 = df[targetFeature].value_counts().to_frame()
if (len(df1) < 1):
response = 'Data balancing detail is not available due to no class is found in target feature.'
elif (len(df1) > 30):
response = 'Data balancing detail is not available due to high number of classes in target feature.'
else:
dfStyler = df1.style.set_properties(**{'text-align': 'right'})
dfStyler.set_table_styles([dict(selector='th', props=[('text-align', 'right')])])
valueCount = dfStyler.to_html()
import matplotlib.pyplot as plt
import mpld3
fig, ax = plt.subplots(figsize=[6.5,6])
df2 = df[targetFeature].value_counts().sort_values()
_ncol = 1
_radius = 0.5
if (len(df1) > 10):
_radius = 0.4
_ncol = 1
else:
_radius = 0.6
_ncol = 1
ax = df2.plot(kind = 'pie', ylabel='', title=targetFeature, labeldistance=None, radius=_radius, autopct='%1.0f%%')
ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol = _ncol)
# ax.legend(bbox_to_anchor=(1,1), bbox_transform=plt.gcf().transFigure)
plt.subplots_adjust(left=0.02, bottom=0.05, right=0.9)
ax.get_yaxis().set_visible(False)
html_graph = mpld3.fig_to_html(fig,d3_url=d3_url,mpld3_url=mpld3_url)
response = valueCount + ' ' + html_graph
return HttpResponse(response)
def dotextSummarization(request):
from appbe.textSummarization import startSummarization
context = startSummarization(request,DEFAULT_FILE_PATH,CONFIG_FILE_PATH,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request, 'summarization.html', context)
def openmodelevaluation(request,id):
deploypath = request.session['deploypath']
if id == 1:
contentFile= os.path.join(deploypath,'log','boosting_overfit.html')
if id == 2:
contentFile= os.path.join(deploypath,'log','boosting_overfit_condition.html')
if id == 3:
contentFile= os.path.join(deploypath,'log','smc.html')
if id == 4:
contentFile= os.path.join(deploypath,'log','smc_condition.html')
if id == 5:
contentFile= os.path.join(deploypath,'log','mi.html')
if id == 6:
contentFile= os.path.join(deploypath,'log','mi_con.html')
try:
my_file = open(contentFile, 'r', encoding="utf-8")
file_content = my_file.read()
my_file.close()
context = {'content': file_content,'status':request.session['ModelStatus']}
context['version'] = AION_VERSION
return render(request, 'deepcheck.html', context, content_type="text/html")
except:
context = {'content': 'Not available'}
context['version'] = AION_VERSION
return render(request, 'deepcheck.html', context, content_type="text/html")
def downloadlogfile(request,id,currentVersion):
import mimetypes
from django.http import FileResponse
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
if model[0].DeployPath != 'NA':
file_path = os.path.join(str(model[0].DeployPath),'log','model_training_logs.log')
else:
file_path = os.path.join(DEPLOY_LOCATION,model[0].ModelName.usecaseid,str(currentVersion),'log','model_training_logs.log')
try:
if os.path.exists(file_path):
my_file = open(file_path, 'r', encoding="utf-8")
file_content = my_file.read()
my_file.close()
mime_type, _ = mimetypes.guess_type(file_path)
response = HttpResponse(file_content, content_type=mime_type)#bugid 12513
# Set the HTTP header for sending to browser
filename = p.usecaseid+'.log'
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
else:
response = HttpResponse('File Not Found')#bugid 12513
# Set the HTTP header for sending to browser
filename = p.usecaseid+'.log'
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
except Exception as e:
response = HttpResponse('File Not Found')#bugid 12513
# Set the HTTP header for sending to browser
filename = p.usecaseid+'.log'
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
def opendetailedlogs(request,id,currentVersion):
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
if model[0].DeployPath != 'NA':
file_path = os.path.join(str(model[0].DeployPath),'log','model_training_logs.log')
else:
file_path = os.path.join(DEPLOY_LOCATION,model[0].ModelName.usecaseid,str(currentVersion),'log','model_training_logs.log')
try:
if os.path.exists(file_path):
my_file = open(file_path, 'r', encoding="utf-8")
file_content = my_file.read()
my_file.close()
context = {'content':file_content}
return HttpResponse(json.dumps(context),content_type="application/json")
else:
context = {'content':'Status not available'}
return HttpResponse(json.dumps(context),content_type="application/json")
except Exception as e:
print(e)
context = {'content':'Status not available'}
return HttpResponse(json.dumps(context),content_type="application/json")
def batchlearning(request):
from appbe.onlineLearning import startIncrementallearning
action,context = startIncrementallearning(request,usecasedetails,Existusecases,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request,action,context)
def downlpredictreport(request):
predictionResults = request.POST.get('predictionResults')
predictionResults = pd.DataFrame.from_dict(eval(predictionResults))
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
predictFileName = usename + '_prediction.xlsx'
from io import BytesIO as IO
excel_file = IO()
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
predictionResults.to_excel(excel_writer, sheet_name='Predictions')
workbook = excel_writer.book
#excel_writer.save()
excel_writer.close()
excel_file.seek(0)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + predictFileName
return response
# EDA Reports changes
# ----------------------------
def downloadxplainreport(request):
from appbe.xplain import global_explain
status,msg,ale_view,sentences,bargraph,inputFields,nrows,ncols,targetFeature,dataPoints,target_classes,df_proprocessed,numberofclasses,modelfeatures,problemType,mfcount,topTwoFeatures,topFeaturesMsg,most_influencedfeature,interceppoint,anchorjson,labelMaps = global_explain(request)
if status == 'Success':
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
predictFileName = usename + '_xplain.xlsx'
df = pd.DataFrame({'What kind of data does the system learn from?': ['This dataset is a dataset of measurements taken for '+str(numberofclasses)+' categories of '+str(targetFeature),'The '+str(numberofclasses)+' different categories of '+str(targetFeature)+' as per the data are:']})
i = 1
df1 = []
for x in target_classes:
df1.append({'What kind of data does the system learn from?':' '+str(i)+':'+str(x)})
i = i+1
df1.append({'What kind of data does the system learn from?':'The total number of data points is '+str(dataPoints)})
df = pd.concat([df, pd.DataFrame(df1)], ignore_index = True)
from io import BytesIO as IO
excel_file = IO()
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
df.to_excel(excel_writer, sheet_name='Dashboard',index=False)
pd.DataFrame(df_proprocessed).to_excel(excel_writer, sheet_name='Top 5 Rows',index=False)
df = pd.DataFrame({'What are the various features of the data used for model training?': ['The various features of the data are:']})
i = 1
df1 = []
for x in modelfeatures:
df1.append({'What are the various features of the data used for model training?':' '+str(i)+': '+str(x)})
i = i+1
df = pd.concat( [df, pd.DataFrame( df1)], ignore_index = True)
df.to_excel(excel_writer, sheet_name='Features',index=False)
topFeaturesMsg = pd.DataFrame(topFeaturesMsg,columns=["Feature Importance"])
topFeaturesMsg.to_excel(excel_writer, sheet_name='Feature importance',index=False)
achors = pd.DataFrame(anchorjson)
achors.to_excel(excel_writer, sheet_name='Prediction',index=False)
workbook = excel_writer.book
#excel_writer.save()
excel_writer.close()
excel_file.seek(0)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + predictFileName
return response
else:
response = HttpResponse()
return response
def gotoreport(request):
report_button = request.POST.get('trainmodel')
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
if report_button == 'download_edafile':
from appbe.reports import downloadtrainingfile
edaFileName,excel_file = downloadtrainingfile(request,Existusecases)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + edaFileName
return response
def LoadBasicConfiguration(request):
try:
from appbe import exploratory_Analysis as ea
configFile = DEFAULT_FILE_PATH + 'eion_config.json'
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
temp = {}
temp['ModelName'] = request.session['UseCaseName']
temp['Version'] = request.session['ModelVersion']
dataLocation = str(request.session['datalocation'])
df = pd.read_csv(dataLocation, encoding='latin1')
featuresList = df.columns.values.tolist()
datetimeFeatures = []
sequenceFeatures = []
unimportantFeatures = []
featuresRatio = {}
for i in featuresList:
check = ea.match_date_format(df[i])
if check == True:
datetimeFeatures.append(i)
unimportantFeatures.append(i)
continue
seq_check = ea.check_seq_feature(df[i])
if seq_check == True:
sequenceFeatures.append(i)
unimportantFeatures.append(i)
continue
ratio = ea.check_category(df[i])
if ratio != 0:
featuresRatio[i] = ratio
else:
unimportantFeatures.append(i)
targetFeature = min(featuresRatio, key=featuresRatio.get)
unimportantFeatures.append(targetFeature)
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = datetimeFeatures
config['sequenceFeatures'] = sequenceFeatures
config['FeaturesList'] = featuresList
config['unimportantFeatures'] = unimportantFeatures
config['targetFeature'] = targetFeature
context = {'tab': 'configure', 'temp': temp, 'config': config}
context['version'] = AION_VERSION
return render(request, 'modeltraning.html', context)
except:
return render(request, 'modeltraning.html', {'error':'Fail to load basic config file','version':AION_VERSION})
def LoadDataForSingleInstance(request):
try:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemtypes = configSettingsJson['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict = {'noofforecasts': 10}
elif problem_type == 'recommenderSystem':
inputFieldsDict = {"uid": 1, "iid": 31, "rating": 0}
elif problem_type == 'videoForecasting':
inputFieldsDict = {'VideoPath': 'person01_boxing_d1_uncomp.avi'}
else:
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df = pd.read_csv(dataFilePath, encoding='latin1')
singleInstanceData = df.loc[0, inputFeaturesList]
inputFieldsDict = singleInstanceData.to_dict()
inputFields = []
inputFields.append(inputFieldsDict)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'predict', 'inputFields': inputFields, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction'}
return render(request, 'prediction.html', context=context)
except:
return render(request, 'prediction.html', {'tab': 'predict', 'error': 'Fail to load inputfields', 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction'})
def uploadDatafromunsupervisedmodel(request):
computeinfrastructure = compute.readComputeConfig()
try:
modelid = request.POST.get('modelid')
p = Existusecases.objects.get(id=modelid)
dataFile = str(p.DataFilePath)
deploypath = str(p.DeployPath)
if(os.path.isfile(dataFile) == False):
context = {'tab': 'tabconfigure', 'error': 'Data file does not exist','computeinfrastructure':computeinfrastructure}
return render(request, 'prediction.html', context)
predictionScriptPath = os.path.join(deploypath,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
if (predict_dict['status'] == 'SUCCESS'):
predictionResults = predict_dict['data']
df2 = pd.json_normalize(predictionResults)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df2.to_csv(dataFile, index=False)
request.session['datalocation'] = str(dataFile)
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures()
# ----------------------------
samplePercentage = 100
samplePercentval = 0
showRecommended = False
df = pd.read_csv(dataFile,nrows=100)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
No_of_Permissible_Features_EDA = get_edafeatures()
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'tab': 'tabconfigure','range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage,'computeinfrastructure':computeinfrastructure, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'clusteringModels':clusteringModels,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','version':AION_VERSION})
def qlearning(request):
return render(request, 'qlearning.html', {})
def RLpath(request):
return render(request, 'rl_path.html', {})
def stateTransitionSettings(request):
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
import requests
setting_url = service_url.read_service_url_params(request)
usecasename = request.session['usecaseid'].replace(" ", "_")
setting_url = setting_url+'pattern_anomaly_settings?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion'])
#print(setting_url)
inputFieldsDict = {}
inputFieldsDict['groupswitching'] = request.POST.get('groupswitching')
inputFieldsDict['transitionprobability'] = request.POST.get('transitionprobability')
inputFieldsDict['transitionsequence'] = request.POST.get('transitionsequence')
inputFieldsDict['sequencethreshold'] = request.POST.get('sequencethreshold')
# print(inputFieldsDict)
inputFieldsJson = json.dumps(inputFieldsDict)
#print(inputFieldsJson)
try:
response = requests.post(setting_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
context = {'tab': 'tabconfigure', 'error': outputStr.decode('utf-8'), 'selected': 'prediction'}
return render(request, 'prediction.html', context)
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
context = {'tab': 'tabconfigure', 'error': 'AION Service needs to be started', 'selected': 'prediction'}
else:
context = {'tab': 'tabconfigure', 'error': 'Prediction Error '+str(inst), 'selected': 'prediction'}
return render(request, 'prediction.html', context)
try:
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
#print(outputStr)
predict_dict = json.loads(str(outputStr))
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'}
inputFields = []
inputFields.append(inputFieldsDict)
iterName = request.session['UseCaseName'].replace(" ", "_")
settings_url = ''
problemtypes = configSettingsJson['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'StateTransition':
ser_url = service_url.read_pattern_anomaly_url_params(request)
settings_url = service_url.read_pattern_anomaly_setting_url_params(request)
else:
ser_url = service_url.read_service_url_params(request)
ser_url = ser_url+'predict?usecaseid='+iterName+'&version='+str(ModelVersion)
onnx_runtime = False
if str(configSettingsJson['advance']['deployer']['edge_deployment']) == 'True':
if str(configSettingsJson['advance']['deployer']['edge_format']['onnx']) == 'True':
onnx_runtime = True
analyticsTypes = problem_type
imagedf = ''
return render(request, 'prediction.html',
{'inputFields': inputFields,'imagedf':imagedf, 'selected_use_case': selected_use_case,'ser_url':ser_url,'analyticsType':analyticsTypes,'settings_url':settings_url,'usecasetab':usecasetab,
'ModelStatus': ModelStatus,'onnx_edge':onnx_runtime,'ModelVersion': ModelVersion, 'selected': 'prediction'})
except Exception as e:
print(e)
return render(request, 'prediction.html', {'error': 'Fail to do state Transition Settings', 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction'})
def flcommand(request):
try:
from appbe.flconfig import fl_command
context = fl_command(request,Existusecases,usecasedetails)
return render(request, 'usecases.html', context)
except Exception as e:
print(e)
return render(request, 'models.html',{'error': 'Failed to generate federated learning client code'})
def maaccommand(request):
from appbe.models import maac_command
try:
context,page = maac_command(request,Existusecases,usecasedetails)
context['version'] = AION_VERSION
return render(request,page,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'errormlac': 'Failed to generate code: '+str(e),'version':AION_VERSION})
def onnxruntime(request):
try:
onnx_scriptPath = os.path.join(request.session['deploypath'],'edge','onnxvalidation.py')
outputStr = subprocess.check_output([sys.executable, onnx_scriptPath])
#print(outputStr)
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'predict', 'predictionResults': predict_dict, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction','onnx_edge':True,'version':AION_VERSION}
return render(request, 'prediction.html', context=context)
except Exception as inst:
print('-------------------->'+str(inst))
context = {'tab': 'tabconfigure', 'error': 'Failed To Perform Prediction', 'selected': 'prediction','version':AION_VERSION}
return render(request, 'prediction.html', context)
def instancepredict(request):
log = logging.getLogger('log_ux')
from appbe.train_output import get_train_model_details
modelType=''
trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request)
computeinfrastructure = compute.readComputeConfig()
selected_use_case, ModelVersion, ModelStatus = getusercasestatus(request)
try:
t1 = time.time()
if request.FILES:
Datapath = request.FILES['DataFilePath']
from io import StringIO
ext = str(Datapath).split('.')[-1]
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet','txt']:
content = StringIO(Datapath.read().decode('utf-8'))
reader = csv.reader(content)
df = pd.DataFrame(reader)
df.columns = df.iloc[0]
df = df[1:]
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet','txt','pdf']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
dataPath = dataFile
if(os.path.isfile(dataFile) == False):
context = {'tab': 'tabconfigure', 'error': 'Data file does not exist','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Data file does not exist')
return render(request, 'prediction.html', context)
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py')
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ''
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
PredictionResultsOfTextSum = []
if (predict_dict['status'] == 'SUCCESS'):
predictionResults = predict_dict['data']
predictionResultsTextSum= predict_dict['data']
if problem_type in ['similarityIdentification','contextualSearch']:
for x in predictionResults:
msg=''
for y in x['prediction']:
msg += str(y)
msg += '\n'
msg += '\n'
msg += '\n'
msg += '\n'
msg += '\n'
x['prediction'] = msg
if problem_type == 'textSummarization':
Results = {}
Results['msg'] = predict_dict['msg']
PredictionResultsOfTextSum.append(Results)
Results['prediction'] = predict_dict['data']
PredictionResultsOfTextSum.append(Results)
t2 = time.time()
log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str(
round(t2 - t1)) + ' sec' + ' : ' + 'Success')
else:
context = {'tab': 'tabconfigure', 'error': 'Failed To perform prediction','version':AION_VERSION}
log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction')
return render(request, 'prediction.html', context)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
from appfe.modelTraining.train_views import getMLModels
problem_type,dproblemtype,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
from appbe.prediction import createInstanceFeatures
ser_url = service_url.read_service_url_params(request)
inputFields,ser_url = createInstanceFeatures(configSettingsJson,problem_type,mlmodels,request.session['usecaseid'],request.session['ModelVersion'],ser_url)
from appfe.modelTraining.prediction_views import getTrainingStatus
result = getTrainingStatus(request)
context = {'tab': 'predict','ser_url':ser_url,'predictionResults': predictionResults, 'selected_use_case': selected_use_case,'problem_type':problem_type,'result':result,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'bestmodel':bestmodel,'usecasetab':usecasetab,'version':AION_VERSION,'modelType':modelType,'inputFields':inputFields,'configSettingsJson':configSettingsJson}
if problem_type == 'textSummarization':
context={'tab': 'predict','predictionResultsTextSum': predictionResultsTextSum, 'PredictionResultsOfTextSum': PredictionResultsOfTextSum,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction','problem_type':problem_type}
return render(request, 'prediction.html', context=context)
except Exception as inst:
print(inst)
context = {'tab': 'tabconfigure', 'error': 'Failed To perform prediction', 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Predict Batch :' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction, '+str(inst))
return render(request, 'prediction.html', context)
def LoadAdvanceConfiguration(request):
try:
if request.method == 'POST':
configFile = request.session['config_json']
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
context = {'tab': 'advconfig', 'advconfig': configSettingsJson}
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
return render(request, 'modeltraning.html', context)
except:
return render(request, 'modeltraning.html', {'error':'Fail to load advance config file','version':AION_VERSION,'usecasetab':usecasetab})
# advance
def Advance(request):
try:
from appbe import advance_Config as ac
request.session['defaultfilepath'] = DEFAULT_FILE_PATH
context = ac.save(request)
submittype = request.POST.get('AdvanceSubmit')
computeinfrastructure = compute.readComputeConfig()
if submittype != 'AdvanceDefault':
from appfe.modelTraining.train_views import trainmodel
return trainmodel(request)
else:
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
context['computeinfrastructure'] = computeinfrastructure
return render(request, 'advancedconfig.html', context)
except Exception as e:
print(e)
return render(request, 'advancedconfig.html', {'erroradvance':'Fail to save','version':AION_VERSION,'usecasetab':usecasetab,'computeinfrastructure':computeinfrastructure})
def templatepage(request):
computeinfrastructure = compute.readComputeConfig()
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
ser_url = service_url.read_service_url_params(request)
packagetip='''
Call From Command Line
1. Click AION Shell
2. python {packageAbsolutePath}/aion_prediction.py {json_data}
Call As a Package
1. Go To package_path\WHEELfile
2. python -m pip install {packageName}-py3-none-any.whl
Call the predict function after wheel package installation
1. from {packageName} import aion_prediction as p1
2. p1.predict({json_data})
'''
usecase = usecasedetails.objects.all()
models = Existusecases.objects.filter(Status='SUCCESS')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
context = {'usecasedetail': usecase, 'nouc': nouc,'models': models, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ser_url':ser_url,'packagetip':packagetip,'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab}
return (context)
except:
context = {'error':'Fail to load usecases details','usecasetab':usecasetab}
return (context)
def modelkafka(request):
try:
addKafkaModel(request,request.session['datalocation'])
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
ser_url = service_url.read_service_url_params(request)
packagetip='''
Call From Command Line
1. Click AION Shell
2. python {packageAbsolutePath}/aion_prediction.py {json_data}
Call As a Package
1. Go To package_path\WHEELfile
2. python -m pip install {packageName}-py3-none-any.whl
Call the predict function after wheel package installation
1. from {packageName} import aion_prediction as p1
2. p1.predict({json_data})
'''
models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id')
usecase = usecasedetails.objects.all().order_by('-id')
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
return render(request, 'usecases.html',
{'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting})
except:
return render(request, 'usecases.html',{'selected': 'usecase', 'selected_use_case': selected_use_case,'error': 'Fail to load modelkafka'})
def startTracking(request):
from appbe.aion_config import aion_tracking
from appbe.aion_config import start_tracking
try:
status = aion_tracking()
if status.lower() == 'error':
start_tracking()
status = 'MLflowSuccess'
else:
status = 'MLflowSuccess'
context = {'selected':'DataOperations','usecasetab':usecasetab,'status':status}
context['version'] = AION_VERSION
return render(request, "dataoperations.html",context)
except:
context = {'selected':'DataOperations','usecasetab':usecasetab,'status':'Error'}
context['version'] = AION_VERSION
return render(request, "dataoperations.html",context)
def startService(request):
try:
status = aion_service()
if status == 'Running':
status = 'AION service already running'
elif status == 'Started':
status = 'AION service started successfully'
else:
status = 'Error in starting'
context = settings(request)
context['status'] = status
return render(request, 'settings_page.html', context)
except:
return render(request, 'settings_page.html', {'error':'Fail to start service'})
def Dataupload(request):
from appbe.pages import usecases_page
checkModelUnderTraining(request,usecasedetails,Existusecases)
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
context['currentstate'] =0
from appbe.aion_config import get_telemetryoptout
telemetryoptout = get_telemetryoptout()
if telemetryoptout == 'No':
from appbe.telemetry import checkTelemtry
checkTelemtry()
return render(request,action,context)
def show(request):
try:
models = Existusecases.objects.all()
# print(models)
return render(request, "usecases.html", {'models': models, 'selected': 'usecase'})
except:
return render(request, "usecases.html", {'error': 'Error to show Usecases', 'selected': 'usecase'})
def edit(request, id):
try:
usecasedetail = usecasedetails.objects.get(id=id)
return render(request, 'edit.html', {'usecasedetail': usecasedetail, 'selected': 'usecase'})
except:
return render(request, "usecases.html", {'error': 'Error in editing usecase', 'selected': 'usecase'})
def opentraining(request, id,currentVersion):
from appbe.pages import usecases_page
try:
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
Version = model[0].Version
usecasename = p.UsecaseName
request.session['ModelName'] = p.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.usecaseid
request.session['ModelVersion'] = Version
request.session['ModelStatus'] = 'Not Trained'
request.session['finalstate'] = 0
usecase = usecasedetails.objects.all().order_by('-id')
configfile = str(model[0].ConfigPath)
dataFile = ''
if configfile != '':
request.session['finalstate'] = 2
f = open(configfile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
dataFile = configSettings['basic']['dataLocation']
if configSettings['basic']['folderSettings']['fileType'] == 'Object':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['objectLabelFileName'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
return objectlabeldone(request)
elif configSettings['basic']['folderSettings']['fileType'] in ['LLM_Document','LLM_Code']:
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['fileExtension'] = configSettings['basic']['folderSettings']['fileExtension']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
else:
request.session['datalocation'] = str(configSettings['basic']['dataLocation'])
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
if dataFile == '':
dataFile = str(model[0].DataFilePath)
if dataFile != '':
request.session['finalstate'] = 2
request.session['datalocation'] = dataFile
return uploaddata(request)
except Exception as e:
print(e)
checkModelUnderTraining(request,usecasedetails,Existusecases)
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
context['Status'] = 'Error'
context['Msg'] = 'Error in retraining usecase. Check log file for more details'
return render(request,action,context)
def stopmodelservice(request):
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
id = request.POST.get('modelid')
pid = request.POST.get('pid')
installPackage.stopService(pid)
time.sleep(5)
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.UsecaseName
runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
model.maacsupport = 'False'
model.flserversupport = 'False'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath), 'output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
supportedmodels = ["Logistic Regression",
"Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge"]
if model.deploymodel in supportedmodels:
model.maacsupport = 'True'
else:
model.maacsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
except Exception as e:
pass
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
nouc = 0
usecase = usecasedetails.objects.all()
return render(request, 'models.html',
{'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id})
except:
return render(request, 'models.html',{'error': 'Fail to stop model service'})
def startmodelservice(request):
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
installPackage.startService(request.POST.get('modelName'),request.POST.get('ip'),request.POST.get('portNo'))
time.sleep(5)
id = request.POST.get('modelid')
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.UsecaseName
runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
installationStatus,modelName,modelVersion=insallPackage.checkInstalledPackge(usecasename)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
model.maacsupport = 'False'
model.flserversupport = 'False'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc', 'output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
supportedmodels = ["Logistic Regression",
"Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge"]
if model.deploymodel in supportedmodels:
model.maacsupport = 'True'
else:
model.maacsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
except Exception as e:
pass
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
nouc = 0
usecase = usecasedetails.objects.all()
return render(request, 'models.html',
{'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id})
except:
return render(request, 'models.html',{'error': 'Fail to start model service'})
def downloadpackage(request, id,version):
return(installPackage.downloadPackage(request,id,version,usecasedetails,Existusecases))
def createpackagedocker(request, id,version):
try:
context = installPackage.createPackagePackage(request,id,version,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
except Exception as e:
return render(request, 'usecases.html',{'error': str(e)})
def publish(request, id):
print("Inside Publish Tab")
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.UsecaseName
publish_version,publish_status,publish_drift_status =chech_publish_info(usecasename)
runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
model.maacsupport = 'False'
model.flserversupport = 'False'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc', 'output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
model.featuresused = eval(outputconfig['data']['featuresused'])
model.targetFeature = outputconfig['data']['targetFeature']
if 'params' in outputconfig['data']:
model.modelParams = outputconfig['data']['params']
model.modelType = outputconfig['data']['ModelType']
model.dataPath = os.path.join(str(model.DeployPath),'data', 'postprocesseddata.csv')
supportedmodels = ["Logistic Regression",
"Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge","Extreme Gradient Boosting (XGBoost)","Light Gradient Boosting (LightGBM)","Categorical Boosting (CatBoost)","LSTM"]
print(model.deploymodel)
if model.deploymodel in supportedmodels:
model.maacsupport = 'True'
else:
model.maacsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
supportedmodels = ["Extreme Gradient Boosting (XGBoost)"]
if model.deploymodel in supportedmodels:
model.encryptionsupport = 'True'
else:
model.encryptionsupport = 'False'
except Exception as e:
print(e)
pass
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
nouc = 0
usecase = usecasedetails.objects.all()
print(models)
return render(request, 'models.html',
{'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,'usecasetab':usecasetab,'publish_version':publish_version,'publish_status':publish_status,'publish_drift_status':publish_drift_status,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id})
except Exception as e:
print(e)
return render(request, 'models.html',{'error': 'Fail to publish model'})
def remove_version(request, id):
from appbe.pages import get_usecase_page
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
if request.method == 'GET':
try:
model = Existusecases.objects.get(id=id)
usecaseid = model.ModelName.id
if os.path.isdir(str(model.DeployPath)):
import shutil
if DEPLOY_LOCATION != str(model.DeployPath):
shutil.rmtree(str(model.DeployPath))
else:
uname = model.ModelName.usecaseid.replace(" ", "_")
usecaseversion = model.Version
deployLocation = os.path.join(str(model.DeployPath),uname+'_'+str(usecaseversion))
if os.path.isdir(str(deployLocation)):
shutil.rmtree(str(deployLocation))
model.delete()
usecasedetail = usecasedetails.objects.get(id=model.ModelName.id)
models = Existusecases.objects.filter(ModelName=usecasedetail)
if len(models) == 0:
usecasedetail.delete()
Status = 'SUCCESS'
Msg = 'Version Deleted Successfully'
except Exception as e:
print(e)
Status = 'Error'
Msg = str(e)
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = Status
context['Msg'] = Msg
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
except Exception as e:
print(e)
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = 'Error'
context['Msg'] = 'Usecase Version Deletion Error'
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
def destroy(request, id):
from appbe.pages import get_usecase_page
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
if request.method == 'GET':
try:
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.usecaseid
models = Existusecases.objects.filter(ModelName=usecasedetail)
for model in models:
if os.path.isdir(str(model.DeployPath)):
import shutil
if DEPLOY_LOCATION != str(model.DeployPath):
shutil.rmtree(str(model.DeployPath))
else:
uname = usecasename.replace(" ", "_")
usecaseversion = model.Version
deployLocation = os.path.join(str(model.DeployPath),uname+'_'+str(usecaseversion))
if os.path.isdir(str(deployLocation)):
shutil.rmtree(str(deployLocation))
usecasedetail.delete()
Status = 'SUCCESS'
Msg = 'Deleted Successfully'
except Exception as e:
print(e)
Status = 'Error'
Msg = str(e)
else:
usecasename = 'Not Defined'
if 'UseCaseName' in request.session:
if (usecasename == request.session['UseCaseName']):
selected_use_case = 'Not Defined'
request.session['UseCaseName'] = selected_use_case
request.session['ModelVersion'] = 0
request.session['ModelStatus'] = 'Not Trained'
else:
selected_use_case = request.session['UseCaseName']
else:
selected_use_case = 'Not Defined'
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = Status
context['Msg'] = Msg
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
except:
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = 'Error'
context['Msg'] = 'Usecase Deletion Error'
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
def update(request, id):
try:
lab = get_object_or_404(usecasedetails, id=id)
if request.method == 'POST':
form = usecasedetailsForm(request.POST, instance=lab)
request.session['usecaseid'] = form['id']
# print(request.session['usecaseid'])
if form.is_valid():
form.save()
return redirect('/show')
else:
form = usecasedetailsForm(instance=lab)
request.session['usecaseid'] = form['id']
# print(request.session['usecaseid'])
return render(request, 'edit.html', {'form': form, 'selected': 'usecase'})
except:
return render(request, 'edit.html', {'error': 'Error in updating usecase', 'selected': 'usecase'})
def newfile(request):
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
try:
model = Existusecases.objects.get(ModelName=request.session['ModelName'], Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
training_output = json.loads(training_output)
dataFile = request.POST.get('localfilePath')
if(os.path.isfile(dataFile) == False):
context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'outputdrif.html', context)
df = pd.read_csv(dataFile)
request.session['drift_datalocations'] = dataFile
request.session['Features_dr'] = df.columns.values.tolist()
Featrs = request.session['Features_dr']
statusmsg = 'Data File Uploaded Successfully'
context = {'tab': 'tabconfigure', 'status_msg': statusmsg,
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'selected': 'monitoring', 'z': Featrs}
context['version'] = AION_VERSION
return render(request, 'outputdrif.html', context)
except Exception as Isnt:
context = {'error': 'Error during output drift.'+str(Isnt), 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'outputdrif.html', context)
def summarization(request):
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "summarization.html",context)
# ------------------ Debiasing Changes ------------------
def getdataclasses(request):
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8')
classeslist = []
selectedFeature = request.GET.get('features')
classeslist = df[selectedFeature].unique().tolist()
_list = []
for item in classeslist:
_list.append("<option value='"+ item +"'>" + item +"</option>")
return HttpResponse(_list)
# ------------------ ------------------
def ucdetails(request, id):
from appbe.pages import usecases_page
checkModelUnderTraining(request, usecasedetails, Existusecases)
request.session['IsRetraining'] = 'No'
status, context, action = usecases_page(request, usecasedetails, Existusecases, id)
context['version'] = AION_VERSION
return render(request, 'usecasedetails.html', context)
def dataoperations(request):
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "dataoperations.html",context)
# @login_required(login_url="/login/")
def datalabel(request):
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "label_dataset_ver2.html",context)
# @login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template('page-404.html')
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template('page-500.html')
return HttpResponse(html_template.render(context, request))
def delimitedsetting(delimiter='',textqualifier='',other=''):
if delimiter != '':
if delimiter.lower() == 'tab' or delimiter.lower() == '\t':
delimiter = '\t'
elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';':
delimiter = ';'
elif delimiter.lower() == 'comma' or delimiter.lower() == ',':
delimiter = ','
elif delimiter.lower() == 'space' or delimiter.lower() == ' ':
delimiter = ' '
elif delimiter.lower() == 'other' or other.lower() != '':
if other != '':
delimiter = other
else:
delimiter = ','
elif delimiter != '':
delimiter = delimiter
else:
delimiter = ','
else:
delimiter = ','
if textqualifier == '':
textqualifier = '"'
return delimiter,textqualifier
@csrf_exempt
def upload_and_read_file_data(request):
file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'])
file_delim = request.POST.get("file_delim")
textqualifier = request.POST.get("qualifier")
delimiters = request.POST.get("delimiters")
delimiter,textqualifier = delimitedsetting(request.POST.get('file_delim'),request.POST.get('qualifier'),request.POST.get('delimiters_custom_value'))
size_take = 100
if file_ext in ["csv", "tsv"]:
num_records = sum(1 for line in open(file_path)) - 1
num_rows = num_records
if num_records > size_take:
skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take))
else:
skip = 0
# with open(file_path, 'r') as file:
# data = file.readline(10)
# from detect_delimiter import detect
# row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t', ' '])
# if file_delim == "custom" and request.POST["custom_delim"] != "":
# row_delimiter = request.POST["custom_delim"]
# print('row_delimiter',row_delimiter)
file_content = pd.read_csv(file_path, sep=delimiter,quotechar=textqualifier, engine='python',skiprows=skip,encoding='utf-8-sig',skipinitialspace = True)
elif file_path.endswith(".json"):
file_content_df = pd.read_json(file_path)
file_content = pd.json_normalize(file_content_df.to_dict("records"))
num_rows = len(file_content)
elif file_path.endswith(".avro"):
import pandavro as pdx
from avro.datafile import DataFileReader
from avro.io import DatumReader
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
file_content = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
num_rows = len(file_content)
elif file_path.endswith(".parquet"):
from pyarrow.parquet import ParquetFile
import pyarrow as pa
import pyarrow.parquet as pq
pf = ParquetFile(file_path)
take_rows = next(pf.iter_batches(batch_size=size_take))
file_content = pa.Table.from_batches([take_rows]).to_pandas()
table = pq.read_table(file_path, columns=[])
num_rows = table.num_rows
# file_content = pd.read_parquet(file_path, engine="pyarrow")
else:
raise ValueError("Invalid file format")
response = {}
column_list = []
for key, val in dict(file_content.dtypes).items():
if str(val) == 'object':
try:
pd.to_datetime(file_content[str(key)])
column_list.append({"column_name": str(key), 'data_type': 'datetime64'})
except ValueError:
column_list.append({"column_name": str(key), 'data_type': 'string'})
pass
else:
column_list.append({"column_name": str(key), 'data_type': str(val)})
response["column_list"] = column_list
response["data_html"] = file_content.to_html(classes='table table-striped table-bordered table-hover dataTable no-footer', justify='left', index=False)
response["record_count"] = num_rows
response["file_ext"] = file_ext
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def handle_uploaded_file(path, file, test_dataset=False):
print('path',path)
if test_dataset:
filename = os.path.join(path,"test_data_file." + file.name.split('.')[1])
with open(filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
return filename, file.name.split('.')[1]
else:
filename = os.path.join(path,"uploaded_file." + file.name.split('.')[1])
with open(filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
return filename, file.name.split('.')[1]
@csrf_exempt
def apply_rule(request):
from appbe import labelling_utils as utils
rule_list = json.loads(request.POST['rule_list'])
file_ext = request.POST.get("file_ext")
label_list = json.loads(request.POST['label_list'])
not_satisfy_label = request.POST.get("non_satisfied_label")
response = utils.label_dataset(rule_list, file_ext, label_list, not_satisfy_label)
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def get_sample_result_of_individual_rule(request):
from appbe import labelling_utils as utils
rule_json = json.loads(request.POST['rule_json'])
file_ext = request.POST.get("file_ext")
label_list = json.loads(request.POST['label_list'])
not_satisfy_label = request.POST.get("non_satisfied_label")
print("rule_json>>>", rule_json)
print("file_ext>>>", file_ext)
print("label_list>>>>", label_list)
print("not_satisfied_label", not_satisfy_label)
response = utils.get_sample_result_of_individual_rule(rule_json, file_ext, label_list, not_satisfy_label)
return HttpResponse(json.dumps(response), content_type="application/json")
def download_result_dataset(request):
#file_name = request.GET.get("filename")
file_name = request.session['AION_labelled_Dataset']
file_path = os.path.join(DATA_FILE_PATH, file_name)
is_exist = os.path.exists(file_path)
if is_exist:
with open(file_path, "rb") as file:
response = HttpResponse(file, content_type="application/force-download")
response["Content-Disposition"] = "attachment; filename=%s" % file_name
return response
else:
return HttpResponse(json.dumps("file not found"), content_type="application/error")
@csrf_exempt
def get_sample_result_of_individual_rule_ver2(request):
from appbe import labelling_utils as utils
rule_json = json.loads(request.POST['rule_json'])
file_ext = request.POST.get("file_ext")
label_list = json.loads(request.POST['label_list'])
not_satisfy_label = request.POST.get("non_satisfied_label")
response = utils.get_sample_result_of_individual_rule_ver2(rule_json, file_ext, label_list, not_satisfy_label)
return HttpResponse(json.dumps(response), content_type="application/json")
def get_label_list(label_json):
label_list = []
label_weightage = []
for item in label_json:
label_list.append(item["label_name"])
if item["label_weightage"] != "":
weightage_perc = float(item["label_weightage"]) / 100
label_weightage.append(np.around(weightage_perc, 2))
else:
label_weightage.append(100 / len(label_json))
return label_list, label_weightage
@csrf_exempt
def apply_rule_ver2(request):
from appbe import labelling_utils as utils
rule_list = json.loads(request.POST['rule_list'])
file_ext = request.POST.get("file_ext")
label_json = json.loads(request.POST['label_list'])
label_list, label_weightage = get_label_list(label_json)
not_satisfy_label = request.POST.get("non_satisfied_label")
include_proba = request.POST.get("is_include_proba") == 'true'
response = utils.label_dataset_ver2(request,rule_list, file_ext, label_list, not_satisfy_label, label_weightage,
include_proba)
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def upload_and_read_test_data(request):
file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'], test_dataset=True)
# file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'])
file_delim_test = request.POST.get("file_delim_test")
size_take = 100
if file_ext in ["csv", "tsv"]:
num_records = sum(1 for line in open(file_path)) - 1
num_rows = num_records
if num_records > size_take:
skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take))
else:
skip = 0
with open(file_path, 'r') as file:
data = file.readline(10)
from detect_delimiter import detect
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t', ' '])
if file_delim_test == "custom" and request.POST["custom_test_delim"] != "":
row_delimiter = request.POST["custom_test_delim"]
file_content = pd.read_csv(file_path, sep=row_delimiter, quotechar="'", escapechar="/", engine='python',skiprows=skip,encoding='utf-8-sig',skipinitialspace = True)
elif file_path.endswith(".json"):
file_content_df = pd.read_json(file_path)
file_content = pd.json_normalize(file_content_df.to_dict("records"))
num_rows = len(file_content)
elif file_path.endswith(".avro"):
import pandavro as pdx
from avro.datafile import DataFileReader
from avro.io import DatumReader
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
file_content = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
num_rows = len(file_content)
elif file_path.endswith(".parquet"):
from pyarrow.parquet import ParquetFile
import pyarrow as pa
import pyarrow.parquet as pq
pf = ParquetFile(file_path)
take_rows = next(pf.iter_batches(batch_size=size_take))
file_content = pa.Table.from_batches([take_rows]).to_pandas()
table = pq.read_table(file_path, columns=[])
num_rows = table.num_rows
# file_content = pd.read_parquet(file_path, engine="pyarrow")
else:
raise ValueError("Invalid file format")
response = {}
column_list = []
for key, val in dict(file_content.dtypes).items():
if str(val) == 'object':
try:
pd.to_datetime(file_content[str(key)])
column_list.append({"column_name": str(key), 'data_type': 'datetime64'})
except ValueError:
column_list.append({"column_name": str(key), 'data_type': 'string'})
pass
else:
column_list.append({"column_name": str(key), 'data_type': str(val)})
response["column_list"] = column_list
response["data_html"] = file_content.to_html(classes='table table-striped text-left',table_id='testdata', justify='left', index=False)
response["record_count"] = num_rows
response["file_ext"] = file_ext
response["file_delim_test"] = file_delim_test
response["custom_test_delim"] = request.POST["custom_test_delim"]
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def get_label_and_weightage(request):
from appbe import labelling_utils as utils
test_file_ext = request.POST.get("test_file_ext")
file_delim_test = request.POST.get("file_delim_test")
marked_label_column = request.POST.get("marked_label_column")
custom_test_delim = request.POST.get("custom_test_delim")
label_list_with_weightage = utils.get_label_and_weightage(test_file_ext, marked_label_column, file_delim_test, custom_test_delim)
return HttpResponse(json.dumps(label_list_with_weightage), content_type="application/json")
def modelcompare(request):
deploypath = request.GET.get('DeployLocation')
filepath = os.path.join(deploypath,'etc','output.json')
with open(filepath) as file:
config = json.load(file)
file.close()
# training/testing data needs to be updated as below once it is available in deployment folder
#trainingDataPath = os.path.join(deploypath,'data','trainData.csv')
#testingDataPath = os.path.join(deploypath,'data','testData.csv')
trainingDataPath = os.path.join(deploypath,'data','postprocesseddata.csv.gz')
testingDataPath = os.path.join(deploypath,'data','postprocesseddata.csv.gz')
featureUsedInTraining=config['data']['featuresused']
targetFeature= config['data']['targetFeature']
scoringCriteria=config['data']['ScoreType']
scoringCriteria=scoringCriteria.lower()
problemType=config['data']['ModelType']
problemType=problemType.lower()
tempFeatureUsedInTraining = featureUsedInTraining.split(',')
finalFeatures=[]
for i in range (len(tempFeatureUsedInTraining)) :
tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace('[', '')
tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace(']', '')
tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace("'", '')
tempFeatureUsedInTraining[i] = tempFeatureUsedInTraining[i].lstrip()
tempFeatureUsedInTraining[i] = tempFeatureUsedInTraining[i].rstrip()
finalFeatures.append(tempFeatureUsedInTraining[i])
featureUsedInTraining = finalFeatures
#print("trainingDataPath----",trainingDataPath)
#print("testingDataPath----",testingDataPath)
#print("problemType----",problemType)
#print("scoringCriteria----",scoringCriteria)
#print("featureUsedInTraining----",featureUsedInTraining,type(featureUsedInTraining))
#print("targetFeature----",targetFeature)
if problemType == 'classification':
try:
df1 = pd.read_csv(trainingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
df2 = pd.read_csv(testingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
trainX=df1[featureUsedInTraining]
trainY=df1[targetFeature]
testX=df2[featureUsedInTraining]
testY=df2[targetFeature].to_numpy()
from sklearn import linear_model
estimator = linear_model.LogisticRegression()
estimator.fit(trainX, trainY)
predictedData = estimator.predict(testX)
from learner.aion_matrix import aion_matrix
scoring = aion_matrix()
score = scoring.get_score(scoringCriteria, testY, predictedData)
context = {'Model': 'Logistic regression','Testing Score': score, 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
except Exception as e:
print("exception "+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
context = {'Model': 'Logistic regression','Testing Score': "Exception Occured", 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
if problemType == 'regression':
try:
df1 = pd.read_csv(trainingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
df2 = pd.read_csv(testingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
trainX=df1[featureUsedInTraining]
trainY=df1[targetFeature]
testX=df2[featureUsedInTraining]
testY=df2[targetFeature].to_numpy()
from sklearn import linear_model
estimator = linear_model.LinearRegression()
estimator.fit(trainX, trainY)
predictedData = estimator.predict(testX)
from learner.aion_matrix import aion_matrix
scoring = aion_matrix()
score = scoring.get_score(scoringCriteria, testY, predictedData)
context = {'Model': 'Linear regression','Testing Score': score, 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
except Exception as e:
print("exception")
context = {'Model': 'Linear regression','Testing Score': "Exception Occured", 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
def textsummarization(request):
return render(request, "textsummarization.html",context={'version':AION_VERSION,'selected': 'textsummarization'})
# LLM Testing Task ID 14533
def validate_llm(prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample):
default = {'temperature':{'default':0.9,'lower':0.0,'upper':1.0},'similarity_threshold':{'default':0.75,'lower':0.0,'upper':1.0},'perturbations_per_sample':5}
if not isinstance( prompts, (list,str)):
raise ValueError(f"Prompt should be of type str, got '{prompt}' of type {type(prompt)}")
elif prompts == '':
raise ValueError("Prompt field can not be empty")
if not isinstance( reference_generation, str):
raise ValueError(f"Reference Generated Answer should be of type str, got '{reference_generation}' of type {type(reference_generation)}")
# elif reference_generation == '':
# raise ValueError("Reference Generation field can not be empty")
if not isinstance( temperature, float) or temperature < default['temperature']['lower'] or temperature > default['temperature']['upper']:
if isinstance( temperature, str) and temperature == '':
temperature = default['temperature']['default']
else:
raise ValueError(f"Model Parameter Temperature should be of type float with range {default['temperature']['lower']} - {default['temperature']['upper']}, got {temperature} of type {type(temperature)}")
if not isinstance( similarity_threshold, float) or similarity_threshold < default['similarity_threshold']['lower'] or similarity_threshold > default['similarity_threshold']['upper']:
if isinstance( similarity_threshold, str) and similarity_threshold == '':
similarity_threshold = default['similarity_threshold']['default']
else:
raise ValueError(f"Similarity Threshold should be of type float with range {default['similarity_threshold']['lower']} - {default['similarity_threshold']['upper']}, got {similarity_threshold} of type {type(similarity_threshold)}")
if not isinstance( perturbations_per_sample, int):
if isinstance( perturbations_per_sample, str) and perturbations_per_sample == '':
perturbations_per_sample = default['perturbations_per_sample']
else:
raise ValueError(f"Perturbations Per Sample should be of type integer, got {perturbations_per_sample} of type {type(perturbations_per_sample)}")
return prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample
def llmtesting(request):
ftmodels = []
usecase = usecasedetails.objects.all().order_by('-id')
for x in usecase:
#print(x.id)
models = Existusecases.objects.filter(Status='SUCCESS',ModelName=x.id).order_by('-id')
if len(models) > 0:
for model in models:
#print(str(model.ConfigPath))
version = model.Version
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc','output.json')
with open(modelPath) as file:
outputconfig = json.load(file)
problemType = outputconfig['data']['ModelType']
if problemType.lower() == 'llm fine-tuning':
from appbe.models import get_instance
hypervisor,instanceid,region,image,status = get_instance(x.usecaseid+ '_' + str(version))
with open(str(model.ConfigPath)) as file:
configSettingsJson = json.load(file)
file.close()
from appbe.pages import getMLModels
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
ft = mlmodels+'-'+smodelsize+'-'+x.usecaseid+'_'+str(version)
finetunedModel = {}
finetunedModel['ft']=ft
finetunedModel['basemodel'] = mlmodels+'-'+smodelsize
finetunedModel['usecaseid'] = x.usecaseid+'_'+str(version)
ftmodels.append(finetunedModel)
return render(request, "llmtesting.html",context={'version':AION_VERSION,'selected': 'llmtesting','ftmodels':ftmodels})
# LLM Testing Result Task ID 14533
def llmtestingresult(request):
try:
context = {'result':result,'provider':provider,'tabledata':tabledata,'summary':summary,'modelName':modelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompt,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'version':AION_VERSION,'selected': 'llmtestingresults','success':'success'}
return render(request, "llmtestingresults.html",context)
except Exception as e:
print(e)
context = {'error': 'Fail to Generate LLM Testing Report '+str(e),'version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'}
return render(request, "llmtestingresults.html",context)
# LLM Testing Result Task ID 14533
def llmtestingresult(request):
try:
generate_test = request.POST['prompt_temp']
if generate_test == "generatetest":
UseCaseName = request.POST['selectusecase']
ModelName = request.POST['selectmodel']
temperature = request.POST['modelparam']
similarity_threshold = request.POST['similarity_threshold']
perturbations_per_sample = request.POST['perturbations_per_sample']
selecttype = request.POST['selectquestion']
reference_generation = (request.POST['reference_generation'])
baseModel = request.POST['basemodel']
from appbe.llmTesting import test_LLM
if selecttype == "Single":
prompts = request.POST['prompt']
else:
data_file = request.POST['dataFilePath']#Task 16794
file_name = os.path.splitext(data_file)[0]
file_extension = os.path.splitext(data_file)[-1].lower()
if file_extension != ".csv":
questions = []
answers = []
if file_extension == ".pdf":
with pdfplumber.open(data_file) as pdf:
for page in pdf.pages:
text = page.extract_text()
lines = text.split("\n")
current_question = ""
current_answer = ""
reading_question = False
for line in lines:
line = line.strip()
if line.endswith("?"):
if reading_question:
questions.append(current_question)
answers.append(current_answer)
current_question = ""
current_answer = ""
current_question = line
reading_question = True
elif reading_question:
current_answer += " " + line
if reading_question:
questions.append(current_question)
answers.append(current_answer)
elif file_extension == ".docx":
doc = Document(data_file)
current_question = ""
current_answer = ""
reading_question = False
for paragraph in doc.paragraphs:
text = paragraph.text.strip()
if text.endswith("?"):
if reading_question:
questions.append(current_question)
answers.append(current_answer)
current_question = ""
current_answer = ""
current_question = text
reading_question = True
elif reading_question:
current_answer += " "+ text
if reading_question:
questions.append(current_question)
answers.append(current_answer)
else:
print("unsupported file format. please provide a pdf or docx file.")
faq = pd.DataFrame({'Question':questions, 'Answers':answers})
# print(faq)
data_file_csv = file_name+".csv"
faq.to_csv(data_file_csv, index=False, encoding='utf-8')
else:
faq = pd.read_csv(data_file,encoding='cp1252')
rows = faq.shape[0]
prompts = list(faq['Question'])
try:
temperature = float( temperature)
similarity_threshold = float(similarity_threshold)
perturbations_per_sample = int( perturbations_per_sample)
except:
pass
prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample = validate_llm(prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample)
from appbe.aion_config import get_llm_data
llm_key,llm_url,api_type,api_version=get_llm_data()
urls = {
'OPENAI_API_BASE' : llm_url,
'OPENAI_API_KEY' : llm_key,
'OPENAI_API_TYPE' :api_type,
'OPENAI_API_VERSION':api_version
}
llm_obj = test_LLM()
llm_obj.set_params(urls)
if selecttype == "Single":
print(UseCaseName,ModelName)
if ModelName.lower() == 'basemodel':
result = llm_obj.run_offline_model( UseCaseName,baseModel,temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,False )
llmModelName = baseModel
else:
result = llm_obj.run_offline_model( UseCaseName,ModelName,temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,True )
llmModelName = ModelName+'-'+UseCaseName
print(result)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'llmreport_' + filetimestamp+'.html')
result = result.split("LLMTestingResultOutput:")[-1]
output = json.loads(result)
with open(dataFile,'w') as htmlfile:
htmlfile.write(output['data']['html_file'])
request.session['llmtestreport'] = str(dataFile)
# provider = result.generation_kwargs['Provider']
provider = ""
# metric_name = list(result.metric[0].keys())[0]
metric_name = output['data']['metric_name']
# metric_values = output['data']['metric_values']
metric_values = eval(output['data']['metric_values'])
passed_tests = output['data']['passed_tests']
total_tests = output['data']['total_tests']
summary = f'{passed_tests}/{total_tests}'
tabledata = {}
prompts = output['data']['prompts']
generations= output['data']['generations']
Generations = []
for sub in generations:
Generations.append(sub.replace("\n", ""))
metricvalues = metric_values
text = [eval(x) for x in generations]
gen = [x[0]['generated_text'].split('\n')[1:] for x in text]
Generations = [' '.join(x) for x in gen]
resultoutput = eval(output['data']['resultoutput'])[0]
for index,val in enumerate(Generations):
Generations[index]= Generations[index].strip()
if len(Generations[index])<=2:
metricvalues[index] = 0
resultoutput[index] = 0
tabledata = zip(prompts,Generations,metricvalues,resultoutput)
context = {'result':result,'provider':provider,'tabledata':tabledata,'summary':summary,'modelName':llmModelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompts,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'single':'single','version':AION_VERSION,'selected': 'llmtestingresults','success':'success'}
# context = {'result':result,'provider':"provider",'tabledata':"tabledata",'summary':"summary",'modelName':modelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompts,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'single':'single','version':AION_VERSION,'selected': 'llmtestingresults','success':'success'}
else:
if ModelName.lower() == 'basemodel':
result_str =llm_obj.run_multiple_offline_model(UseCaseName,baseModel,temperature, similarity_threshold, perturbations_per_sample,faq,False)
llmModelName = baseModel
else:
result_str =llm_obj.run_multiple_offline_model(UseCaseName,ModelName,temperature, similarity_threshold, perturbations_per_sample,faq,True)
llmModelName = ModelName+'-'+UseCaseName
result_str = result_str.split("LLMTestingResultOutput:")[-1]
output = json.loads(result_str)
# result will be df converted from output['data']
result = pd.DataFrame(json.loads(output['data']))
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'llmreport_' + filetimestamp+'.csv')
request.session['llmtestreport'] = str(dataFile)
result.rename(columns={'Perturbed Prompts':'PerturbedPrompts','Similarity [Generations]':'Similarity'},inplace=True)
result_df = result.head(5)
result.to_csv(dataFile, index=False)
context={'result':result_df,'modelName':llmModelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'perturbations_per_sample':perturbations_per_sample,'selected': 'llmtestingresults','multiple':'multiple','success':'success'}
return render(request, "llmtestingresults.html",context)
if generate_test == "download_prompt":
csvdata= os.path.join(DEFAULT_FILE_PATH,"Prompt_template.csv")
if os.path.isfile(csvdata) and os.path.exists(csvdata):
df = pd.read_csv(csvdata,encoding='utf8')
downloadFileName = 'llmreport.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+downloadFileName
df.to_csv(response, index=False)
return response
else:
context = {'error': 'Fail to Download File','version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'}
return render(request, "llmtestingresults.html",context)
except Exception as e:
print(e)
errormsg = str(e)
if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg:
errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.'
if 'Max retries exceeded with url' in errormsg:
errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.'
context = {'error':errormsg,'version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'}
return render(request, "llmtestingresults.html",context)
def llmtestreport(request):
file_path = request.session['llmtestreport']
# file_path = "C:\AION\To_Kiran\To_Kiran\codeCloneReport\code_clone_report.txt"
report_path = os.path.join(file_path)
if os.path.exists(report_path):
with open(report_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(report_path)
return response
else:
return render(request, "llmtestingresults.html",context={"error":"Fail To Download File",'version':AION_VERSION,'result':'result','selected': 'llmtestingresults'})
### To display libraries in UI ####
def libraries(request):
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.normpath(os.path.join(current_dir,'..','..','lic',"requirement.csv"))
library_data = []
with open(file_path, 'r') as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
library_info = {
"library" :row["Library"] if row.get("Library") else "none",
"version" :row["Version"] if row.get("Version") else "none",
"license" :row["License"] if row.get("License") else "none"
}
library_data.append(library_info)
# print(library_data)
return render(request, "libraries.html", context={"data":library_data,'version':AION_VERSION,'selected': 'libraries'})
# For Code Clone Detection
def codeclonedetectionresult(request):
from appbe.codeclonedetection import CodeCloneDetectionFiles
try:
codecloneconfig = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','code_clone_config.json')
f = open(codecloneconfig, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
rootdir = request.POST.get('rootdirectory')
ccdmode = request.POST.get('ccdmode')
if(os.path.isdir(rootdir)):
llm_key,llm_url,api_type,api_version = get_llm_data()
openai_apiKey = llm_key
openai_baseUrl = llm_url
try:
openai_apiType = api_type
openai_apiVersion = api_version
except:
openai_apiType = configSettings['openaiApiType']
openai_apiVersion = configSettings['openaiApiVersion']
openai_embeddingEngine = configSettings['codeCloneDetection']['openaiEmbeddingEngine']
openai_embeddingModel = configSettings['codeCloneDetection']['openaiEmbeddingModel']
openai_chatModel = configSettings['codeCloneDetection']['openaiChatModel']
openai_deploymentId = configSettings['codeCloneDetection']['openaiDeploymentId']
rootDirFilesType = configSettings['codeCloneDetection']['rootDirFilesType']
else:
return render(request, "codeclone.html",context={"codeclonedetectionerror":"Please provide valid root directory file path.",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
filetimestamp = str(int(time.time()))
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'code_clone_config_' + filetimestamp + '.json')
updatedConfigSettings = json.dumps(configSettings)
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(updatedConfigSettings)
fpWrite.close()
from appbe.dataPath import DEPLOY_LOCATION
codeclonedir_path = os.path.join(DEPLOY_LOCATION,('codeCloneDetection_'+filetimestamp))
os.makedirs(codeclonedir_path,exist_ok=True)
request.session['clonereport'] = str(codeclonedir_path)
try:
if (rootDirFilesType.lower() == "python" and ccdmode.lower() == "openai"):
cdobj = CodeCloneDetectionFiles(rootdir,openai_baseUrl, openai_apiKey,openai_apiType,openai_apiVersion,codeclonedir_path,openai_embeddingEngine,openai_embeddingModel,openai_chatModel,openai_deploymentId)
report_str,report_dict,report_df,report_json = cdobj.getCloneReport()
clonetype = report_dict['Code_clones_count_by_clone_type'].to_dict()
for i in clonetype:
clonevalues = clonetype[i].values()
clonekeys = clonetype[i].keys()
clonetype = zip(clonekeys,clonevalues)
return render(request, "codeclonedetectionresult.html",context={'report_json':json.loads(report_json),'report_dict':report_dict,'clonetype':clonetype,'clonefunctions':report_dict['clone_functions'],'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult','openai':'openai'})
## Pls uncomment below code if you need to use sklearn based code clone detection.
# elif (ccdmode.lower() =="sklearn"):
# from appbe.codeclonedetection_sklearn import codeCloneDetectionSklearn
# chunk_size = 10000
# cdobj = codeCloneDetectionSklearn(rootdir,codeclonedir_path,chunk_size)
# report_dict = cdobj.get_clone()
# return render(request, "codeclonedetectionresult.html",context={'report_dict':report_dict,'function_df':report_dict['result_df'],'function_dict':report_dict['result_df'].to_dict(),'sklearn':'sklearn'})
else:
raise Exception ("Invalid clonedetection input.")
return render(request, "codeclone.html",context={"codeclonedetectionerror":"Python Files Are Only Supported."})
except Exception as e:
return render(request, "codeclone.html",context={"codeclonedetectionerror":"OpenAI Model Connection Error",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
except Exception as e:
print('code clone detection interface issue.Error message: ',e)
return render(request, "codeclone.html",context={"codeclonedetectionerror":"OpenAI Model Connection Error",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
def codeclonereport(request):
file_path = request.session['clonereport']
report_path = os.path.join(file_path, 'codeCloneReport','code_clone_report.txt')
if os.path.exists(report_path):
with open(report_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(report_path)
return response
else:
return render(request, "codeclonedetectionresult.html",context={"codeclonedetectionerror":"Fail To Download File",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
def evaluatepromptmetrics(request):
""" Evaluate prompt only information for LLM Evaluation."""
import whylogs as why
from langkit import light_metrics
from whylogs.experimental.core.udf_schema import udf_schema
from whylogs.experimental.core.udf_schema import register_dataset_udf
from langkit import lang_config, response_column
import json
prompt_msg = request.GET.get('instruction')
text_schema = udf_schema()
llm_schema = light_metrics.init()
df = pd.DataFrame({
"prompt": [
prompt_msg
]})
results = why.log(df, schema=udf_schema()) # .profile()
view = results.view()
# import pdb
# pdb.set_trace()
from appbe.evaluate_prompt import evaluate_prompt_metrics
final_output_json,prompt_results = evaluate_prompt_metrics(prompt_msg)
prompt_results_json = json.dumps(prompt_results, indent=4)
# return prompt_results_json,prompt_results
return HttpResponse(final_output_json)
|
bc_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getversion
from appbe.aion_config import running_setting
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
from appbe import compute
AION_VERSION = getversion()
def basicconfig(request):
try:
from appbe.aion_config import settings
usecasetab = settings()
from appbe import basic_Config as bc
request.session['defaultfilepath'] = DEFAULT_FILE_PATH
request.session['configfilepath'] = CONFIG_FILE_PATH
request.session['deploylocation'] = DEPLOY_LOCATION
computeinfrastructure = compute.readComputeConfig()
submit_button = request.POST.get('upload_submit')
ModelVersion = request.session['ModelVersion']
ruuningSetting = running_setting()
selected_use_case = request.session['UseCaseName']
ModelStatus = request.session['ModelStatus']
#print(request.method)
# Retraing settings changes
if request.method == 'POST' and request.session['finalstate'] == 0:
context = bc.gotoconf(request)
else:
context = bc.openbasicconf(request)
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
return render(request, 'basicconfig.html', context)
except Exception as e:
print(e)
import sys,os
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'basicconfig.html', {'error':'Fail to retreive basic config file inputs','version':AION_VERSION,'usecasetab':usecasetab})
def savebasicconfig(request):
from appbe import basic_Config as bc
from appbe import advance_Config as ac
from appfe.modelTraining.train_views import trainmodel
try:
if request.method != 'GET':
status,msg,context =bc.save(request)
else:
status = 'pass'
msg = ''
except Exception as e:
print(e)
if status.lower()!='error':
if request.method == 'GET':
context = ac.basicconfignex(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
context['currentstate'] = 1
return render(request, 'advancedconfig.html', context)
elif request.POST.get('BasicSubmit') == 'GotoAdvance':
context = ac.basicconfignex(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
context['currentstate'] = 1
return render(request, 'advancedconfig.html', context)
else:
return trainmodel(request)
else:
context = bc.openbasicconf(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['config_error']= msg
context['version'] = AION_VERSION
return render(request, 'basicconfig.html', context)
|
models.py | from django.db import models
class usecasedetails(models.Model):
id = models.AutoField(primary_key=True)
UsecaseName = models.CharField(max_length=50)
usecaseid = models.CharField(max_length=10, default=UsecaseName)
Description = models.CharField(max_length=200)
class Meta:
db_table = "usecasedetails"
class Existusecases(models.Model):
id = models.AutoField(primary_key=True)
ModelName = models.ForeignKey(usecasedetails, on_delete=models.CASCADE)
Version = models.IntegerField(default=0)
DataFilePath = models.FileField(upload_to=None)
ConfigPath = models.FileField(upload_to=None)
DeployPath = models.FileField(upload_to=None)
Status = models.CharField(max_length=200)
publishStatus = models.CharField(max_length=20, default='')
publishPID = models.IntegerField(default=0)
trainingPID = models.IntegerField(default=0)
driftStatus = models.CharField(max_length=20, default='')
ProblemType = models.CharField(max_length=20, default='')
modelType = models.CharField(max_length=40, default='')
portNo = models.IntegerField(default=0)
TrainOuputLocation = models.CharField(max_length=200, default='')
class Meta:
db_table = "Existusecases" |
mllite_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getusercasestatus
import pandas as pd
import numpy as np
from appbe.pages import getversion
import logging
import json
import time
import os
import subprocess
import sys
import base64
from appbe import compute
import urllib
AION_VERSION = getversion()
def Sagemaker(request):
if request.method == "POST":
try:
datafile = request.POST['datap']
endpoint = request.POST['endpoint']
awsaccountid = request.POST['awsaccountid']
accesskeyid = request.POST['accesskeyid']
secretaccesskey = request.POST['secretaccesskey']
sessionToken = request.POST['sessionToken']
region = request.POST['region']
if (awsaccountid != "" and accesskeyid != "" and secretaccesskey != "" and sessionToken != "" and endpoint != "") :
awsSagemaker = {}
awsSagemaker['awsID'] = awsaccountid
awsSagemaker['accesskeyID'] = request.POST['accesskeyid']
awsSagemaker['secretAccesskey'] = request.POST['secretaccesskey']
awsSagemaker['sessionToken'] = request.POST['sessionToken']
awsSagemaker['region'] = request.POST['region']
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
configSettingsJson['awsSagemaker'] = awsSagemaker
if(os.path.exists(datafile)):
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
df = pd.read_csv(datafile)
# df1 = df.iloc[0, :]
df2 = df.head(1)
df3 =df2.to_dict(orient='records')[0]
inputFields = []
inputFields.append(df3)
# models = models.rsplit('.', 1)[1]
context = {'sagepredict':'sagepredict','endpoint':endpoint,'datafile':datafile,'inputFields':inputFields,'sagemaker':configSettingsJson,'version':AION_VERSION}
else:
context = {'exception':'exception','error':'Data File Error','version':AION_VERSION}
else:
context = {'error': 'Error: Please enter valid input','runtimeerror':'runtimeerror','version':AION_VERSION}
except Exception as e:
context = {'exception':'exception','error':'Exception :'+str(e),'sagepredict':'sagepredict','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
def Tfliteneural(request):
try:
if request.method == "POST":
try:
models = request.POST['model1']
datafile = request.POST['datafile1']
if(os.path.isfile(models)):
modelformat = models.rsplit('.', 1)[1]
if(os.path.isfile(models) and os.path.exists(datafile) and modelformat.lower()=='tflite'):
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
df = pd.read_csv(datafile)
df2 = df.head(1)
df3 =df2.to_dict(orient='records')[0]
inputFields = []
inputFields.append(df3)
context = {'mlalgotf':'mlalgotf','models':models,'datafile':datafile,'inputFields':inputFields,'selected':'mllite','version':AION_VERSION}
elif inputDataType.lower() == 'jpg':
from PIL import Image
img = Image.open(datafile)
string = base64.b64encode(open(datafile, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'dlalgotf':'dlalgotf','models':models,'datafile':datafile,'im':image_64,'selected':'mllite','version':AION_VERSION}
else:
context={'error':'Either model path or data path does not exists','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
except Exception as e:
context={'error':'Exception i.e., '+str(e),'runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
except:
context={'error':'Failed to perform TFlite Runtime Prediction','runtimeerror':'runtimeerror','selected':'mllite'}
return render(request, 'ConvertOnnx.html',context)
def openneural(request):
try:
if request.method == "POST":
models = request.POST['model']
datafile = request.POST['datafile']
if(os.path.isfile(models)):
modelformat = models.rsplit('.', 1)[1]
if(os.path.isfile(models) and os.path.exists(datafile)) and modelformat.lower()=='onnx':
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
df = pd.read_csv(datafile)
df2 = df.head(1)
df3 =df2.to_dict(orient='records')[0]
inputFields = []
inputFields.append(df3)
# models = models.rsplit('.', 1)[1]
context = {'mlalgo':'mlalgo','models':models,'datafile':datafile,'selected':'mllite','inputFields':inputFields,'version':AION_VERSION}
elif inputDataType.lower() == 'jpg':
from PIL import Image
img = Image.open(datafile)
string = base64.b64encode(open(datafile, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'dlalgo':'dlalgo','models':models,'datafile':datafile,'im':image_64,'selected':'mllite','version':AION_VERSION}
else:
context={'error':'Either model path or data path does not exists','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
except:
context={'error':'Failed to perform ONNX Runtime Prediction','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
def ConvertOnnx(request):
try:
if request.method == "POST":
modelpath = request.POST['models']
deploypath = request.POST['deploy']
outputonnx = request.POST['outputonnx']
inputtonnx = request.POST['inputtonnx']
outputonnx = request.POST['outputonnx']
Features = request.POST['Features']
modelinput = inputtonnx
modeloutput = outputonnx
if (os.path.exists(modelpath) == False) and (outputonnx !="sagemaker") and (os.path.exists(deploypath) == False):
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'error2':'error2','convert':'convert','logfile':'','selected':'mllite','version':AION_VERSION}
elif outputonnx !="sagemaker":
filetimestamp = str(int(time.time()))
convjson = os.path.join(DEFAULT_FILE_PATH, 'conversion.json')
with open(convjson, 'r+') as f:
conv = json.load(f)
f.close()
conv['basic']['modelName'] = 'conversion_'+ str(filetimestamp)
conv['basic']['modelVersion'] = "1"
conv['advance']['aionConversionUtility']['modelpath'] = modelpath
conv['advance']['aionConversionUtility']['deployedlocation'] = deploypath
conv['advance']['aionConversionUtility']['numberoffeatures'] = Features
temp = {}
temp['inputModelType'] = inputtonnx
temp['outputModelType'] = outputonnx
inputtype = conv['advance']['aionConversionUtility']['inputModelType']
outputtype = conv['advance']['aionConversionUtility']['outputModelType']
for i in list(inputtype.keys()):
conv['advance']['aionConversionUtility']['inputModelType'][i] = 'False'
for i in list(outputtype.keys()):
conv['advance']['aionConversionUtility']['outputModelType'][i] = 'False'
conv['advance']['aionConversionUtility']['inputModelType'][temp['inputModelType'][0].lower() + temp['inputModelType'][1:]] = 'True'
conv['advance']['aionConversionUtility']['outputModelType'][temp['outputModelType'][0].lower() + temp['outputModelType'][1:]] = 'True'
conv = json.dumps(conv)
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'conv' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(conv)
fpWrite.close()
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
try:
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','convertmodel','-c',config_json_filename])
outputStr = outputStr.decode('utf-8')
outputStr= outputStr.replace('\'','\"')
#print('ou',outputStr)
outputStr = outputStr.strip()
MLlite = json.loads(outputStr)
logsfile = MLlite['logfiles']
if MLlite['Convert'] == 'Success':
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'convert1':'convert1','convert':'convert','logfile':MLlite['logfiles'],'selected':'mllite','version':AION_VERSION}
else:
logfile = logsfile.replace('\\','@')
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'error1':'error1','convert':'convert','logfile':logfile,'selected':'mllite','version':AION_VERSION}
except Exception as e:
print(e)
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'Notconvert':'Notconvert','convert':'convert','version':AION_VERSION}
elif ( outputonnx =="sagemaker") :
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
#print(configFile)
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
configSettingsJson['modelInput'] = request.POST.get('ModelInput')
#print('pushonly:',request.POST.get('sagemaker'))
if request.POST.get('sagemaker') == 'CreateDeploy':
configSettingsJson['sagemakerDeploy'] = 'True'
configSettingsJson['deployExistingModel']['status'] = 'False'
else:
configSettingsJson['sagemakerDeploy'] = 'False'
if request.POST.get('sagemaker') == 'DeployOnly':
configSettingsJson['deployExistingModel']['status'] = 'True'
else:
configSettingsJson['deployExistingModel']['status'] = 'False'
#configSettingsJson['deployExistingModel']['status'] = request.POST.get('Status')
configSettingsJson['deployExistingModel']['dockerImageName'] = request.POST.get('imagename')
configSettingsJson['deployExistingModel']['deployModeluri'] = request.POST.get('deploymodel')
configSettingsJson['modelOutput']['cloudInfrastructure'] = request.POST.get('problemname')
configSettingsJson['endpointName'] = request.POST.get('endpointname')
configSettingsJson['awsSagemaker']['awsID'] = request.POST.get('awskeyid1')
configSettingsJson['awsSagemaker']['accesskeyID'] = request.POST.get('accesskey1')
configSettingsJson['awsSagemaker']['secretAccesskey'] = request.POST.get('secretaccess1')
configSettingsJson['awsSagemaker']['sessionToken'] = request.POST.get('token1')
configSettingsJson['awsSagemaker']['region'] = request.POST.get('region1')
configSettingsJson['awsSagemaker']['IAMSagemakerRoleArn'] = request.POST.get('fullaccess')
conv = json.dumps(configSettingsJson)
'''
filetimestamp = str(int(time.time()))
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'Sagemaker' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(conv)
fpWrite.close()
'''
from bin.aion_sagemaker import aion_sagemaker
aion_sagemaker(configSettingsJson)
#print(conv)
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_sagemaker.py'))
#outputStr = subprocess.check_output([sys.executable, scriptPath, conv])
#outputStr = outputStr.decode('utf-8')
#outputStr=outputStr.strip()
#print('kir',outputStr)
context = {'convert':'convert','sagemaker1':'sagemaker1','mlflow':'mlflow','inputtype':modelinput,'outputtype':modeloutput,'deploy':outputStr,'selected':'mllite','version':AION_VERSION}
else:
context={'exception':'exception','error':'Please Enter Valid Inputs','selected':'mllite','version':AION_VERSION}
except Exception as e:
print(e)
context={'exception':'exception','error':'Error during Conversion','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
def sageprediction(request):
#print("=========asdecdefefefefefefefef=======")
values = request.POST['value']
keys = request.POST['keys']
endpoint = request.POST['endpointname']
x = keys.split(",")
y = values.split(",")
dictionary = {key:value for key, value in zip(x,y)}
awsSagemaker={}
awsSagemaker['awsID'] = request.POST['awsaccountid']
awsSagemaker['accesskeyID'] = request.POST['accesskeyid']
awsSagemaker['secretAccesskey'] = request.POST['secretaccesskey']
awsSagemaker['sessionToken'] = request.POST['sessionToken']
awsSagemaker['region'] = request.POST['region']
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
awsSagemaker['IAMSagemakerRoleArn'] = configSettingsJson['awsSagemaker']['IAMSagemakerRoleArn']
configSettingsJson['awsSagemaker'] = awsSagemaker
configSettingsJson['data'] = dictionary
configSettingsJson['endpointName'] = endpoint
configSettingsJson['prediction']['status'] = 'True'
conv = json.dumps(configSettingsJson)
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_sagemaker.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath, conv])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr=outputStr.strip()
output = json.loads(outputStr)
if output['status'] == 'SUCCESS':
outputStr = output['data']
outputStr = pd.json_normalize(outputStr)
outputStr = outputStr.to_html()
else:
outputStr = output['msg']
return HttpResponse(outputStr)
def runtimeutility(request):
if request.method == "POST":
models = request.POST['model']
datafile = request.POST['datafile']
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
values = request.POST['value']
keys = request.POST['keys']
x = keys.split(",")
y = values.split(",")
dictionary = {key:value for key, value in zip(x,y)}
jsondata = json.dumps(dictionary, indent = 4)
#print(jsondata)
config_json_filename = os.path.join(DEFAULT_FILE_PATH, 'runtime.json')
#print(config_json_filename)
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(jsondata)
fpWrite.close()
from conversions.runtime_utility import runTimeTesting
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','conversions', 'runtime_utility.py'))
config_json_file = os.path.join(DEFAULT_FILE_PATH, 'runtime.json')
#outputStr = subprocess.check_output([sys.executable, scriptPath, models, config_json_file])
#outputStr = outputStr.decode('utf-8')
outputStr=runTimeTesting(models,config_json_file)
# context = {'outputStr':outputStr,'modeltype':modeltype}
else:
from conversions.runtime_utility import runTimeTesting
outputStr=runTimeTesting(models,datafile)
return HttpResponse(outputStr) |
mltest_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getversion
AION_VERSION = getversion()
import os
import time
import subprocess
import sys
import re
import pandas as pd
def mltesting(request):
from appbe.pages import mltesting_page
context = mltesting_page(request)
context['selected']='mltesting'
context['version'] = AION_VERSION
return render(request, 'mltesting.html',context)
def ABtest(request):
try:
if request.method == "POST":
models = request.POST["model"]
data = request.POST["data"]
#context['version'] = AION_VERSION
if(os.path.isfile(models) and os.path.isfile(data)):
AlgorithmNames={'LogisticRegression':'Logistic Regression','SGDClassifier':'Stochastic Gradient Descent','GaussianNB':'Naive Bayes','SVC':'Support Vector Machine','KNeighborsClassifier':'K Nearest Neighbors','DecisionTreeClassifier':'Decision Tree','RandomForestClassifier':'Random Forest','GradientBoostingClassifier':'Gradient Boosting','XGBClassifier':'Extreme Gradient Boosting (XGBoost)','DecisionTreeRegressor':'Decision Tree','LinearRegression':'Linear Regression','Lasso':'Lasso','Ridge':'Ridge','RandomForestRegressor':'Random Forest','XGBRegressor':'Extreme Gradient Boosting (XGBoost)'}
filetimestamp = str(int(time.time()))
mltestjson = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json')
with open(mltestjson, 'r+') as f:
mltest = json.load(f)
f.close()
with open(request.session['MLTestResult'], 'r+') as f:
mltestresult = json.load(f)
f.close()
models = mltestresult['models']
datapath = mltestresult['datap']
featurs = mltestresult['feature']
featurs = featurs.split(",")
tar = mltestresult['target']
tar = tar.split(",")
# models,datap,Problemtype,targ,feature,Problem,Parameters,Accuracy
# models,datap,Problemtype,targ,feature,Problem,Parameters,Accuracy
mltest['basic']['modelName'] = 'MLtest_'+ str(filetimestamp)
mltest['basic']['modelVersion'] = "1"
mltest['basic']['dataLocation'] = mltestresult['datap']
mltest['basic']['deployLocation'] = DEPLOY_LOCATION
mltest['basic']['trainingFeatures'] = mltestresult['feature']
mltest['basic']['targetFeature'] = mltestresult['target']
mltest['advance']['profiler']['featureDict']=[]
temp = {}
Problemtype = mltestresult['Problemtype']
if Problemtype == 'Classification':
Accuracyscore1 = mltestresult['Score']
Accuracyscore = float(Accuracyscore1)*100
temp['ScoringCriteria'] = 'Accuracy'
else:
R2_Score = round(float(mltestresult['Score']),2)
temp['ScoringCriteria'] = 'R2'
baselineparam = mltestresult['Params']
temp['algorithm'] = []
if request.session["AionProblem"] == 'Samebaseline':
baselineprob = AlgorithmNames[mltestresult['ProblemName']]
temp['algorithm'].append(baselineprob)
else:
baselineprob = request.session["AionProblem"]
temp['algorithm'] = baselineprob.split(",")
#print(baselineprob)
temp['ProblemType'] = Problemtype
#temp['algorithm'] = ['K Nearest Neighbors']
problemtyp = mltest['basic']['analysisType']
scoring = mltest['basic']['scoringCriteria']
for i in list(scoring.keys()):
for x in list(mltest['basic']['scoringCriteria'][i].keys()):
mltest['basic']['scoringCriteria'][i][x] = 'False'
if temp['ProblemType'].lower() in ["classification","regression",]:
mltest['basic']['scoringCriteria'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][temp['ScoringCriteria']] = 'True'
for i in list(problemtyp.keys()):
mltest['basic']['analysisType'][i]='False'
algorithm = mltest['basic']['algorithms']
for i in list(algorithm.keys()):
for x in list(mltest['basic']['algorithms'][i].keys()):
mltest['basic']['algorithms'][i][x] = 'False'
mltest['basic']['analysisType'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]] = 'True'
for X in temp['algorithm']:
mltest['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][X] = 'True'
mltest = json.dumps(mltest)
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'MLtest' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(mltest)
fpWrite.close()
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','training','-c',config_json_filename])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
#print(decoded_data)
if decoded_data['data']['ScoreType'] == 'R2':
decoded_data['data']['BestScore'] = str(round(float(decoded_data['data']['BestScore']),2))
if decoded_data['data']['ScoreType'].lower() == 'accuracy':
decoded_data['data']['BestScore'] = str(round(float(decoded_data['data']['BestScore']),2))
#print(decoded_data)
#print('123',Accuracyscore)
if Problemtype == 'Classification':
if Accuracyscore >= float(decoded_data['data']['BestScore']) :
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'abtest':'abtest','message':'message','msg':'Existing model is good to be used.','classification':'classification','classuccess':'classuccess','selected':'mltesting','version':AION_VERSION}
else:
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'tableab':'tableab','abtest':'abtest','decoded_data':decoded_data,'score':Accuracyscore,'basealgo':baselineprob,'Problem':AlgorithmNames[mltestresult['ProblemName']],'baseparm':baselineparam,'classification':'classification','classuccess':'classuccess','selected':'mltesting','version':AION_VERSION}
else:
if R2_Score >= float(decoded_data['data']['BestScore']) :
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'abtest':'abtest','message':'message','msg':'Existing model is good to be used.','regression':'regression','regsuccess':'regsuccess','selected':'mltesting'}
else:
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'tableab':'tableab','abtest':'abtest','decoded_data':decoded_data,'score':R2_Score,'basealgo':baselineprob,'Problem':AlgorithmNames[mltestresult['ProblemName']],'baseparm':baselineparam,'regression':'regression','regsuccess':'regsuccess','selected':'mltesting','version':AION_VERSION}
else:
context= {'error':'Error - Model file or Data file does not exist','abtesting':'abtesting','selected':'mltesting'}
context['version'] = AION_VERSION
return render(request, 'mltesting.html', context)
except Exception as e:
print(e)
context= {'error':'Error - Fail to perform A/B Testing','abtesting':'abtesting','selected':'mltesting'}
context['version'] = AION_VERSION
return render(request, 'mltesting.html', context)
def UQTesting(request):
try:
if request.method == "POST":
models = request.POST['modeluq']
datap = request.POST['datauq']
if(os.path.isfile(models) and os.path.isfile(datap)):
df = pd.read_csv(datap)
trainfea = df.columns.tolist()
featurs = request.POST.getlist('Traininguq')
feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = request.POST['Targetuq']
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(models,datap,feature,tar)
print(outputStr)
uq_test = json.loads(outputStr)
#print("==============")
#print(uq_test)
#print("==============")
Problemtype= uq_test['Problem']
msg = uq_test['msg']
if Problemtype == 'Regression':
# Confidence_Interval_Plot = uq_test['Confidence Interval Plot']
# #print(Confidence_Interval_Plot)
# if Confidence_Interval_Plot != '':
# string = base64.b64encode(open(Confidence_Interval_Plot, "rb").read())
# Confidence_Interval_Plot = 'data:image/png;base64,' + urllib.parse.quote(string)
# PICP_Plot = uq_test['PICP Plot']
# if PICP_Plot != '':
# string = base64.b64encode(open(PICP_Plot, "rb").read())
# PICP_Plot = 'data:image/png;base64,' + urllib.parse.quote(string)
# Confidence_Plot = uq_test['Confidence Plot']
# if Confidence_Plot != '':
# string = base64.b64encode(open(Confidence_Plot, "rb").read())
# Confidence_Plot = 'data:image/png;base64,' + urllib.parse.quote(string)
if msg == "Good":
context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'Green':'Green','selected':'mllite','version':AION_VERSION}
elif msg == "Bad":
context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'Red':'Red','selected':'mllite','version':AION_VERSION}
else:
context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'orange':'orange','selected':'mllite','version':AION_VERSION}
elif Problemtype == 'Classification':
# df3 = pd.DataFrame.from_dict(uq_test,orient='index')
df3 = pd.DataFrame.from_dict(uq_test, orient='index', columns=['value'])
df3 = df3.rename_axis('Attributes').reset_index()
# risk_plot = uq_test['Risk Plot']
# if risk_plot != '':
# string = base64.b64encode(open(risk_plot, "rb").read())
# risk_plot = 'data:image/png;base64,' + urllib.parse.quote(string)
# reliability_plot = uq_test['Reliability Plot']
# if reliability_plot != '':
# string = base64.b64encode(open(reliability_plot, "rb").read())
# reliability_plot = 'data:image/png;base64,' + urllib.parse.quote(string)
df3['Attributes'] = df3['Attributes'].str.replace(r'_', ' ')
# df3.loc[(df3.Attributes == 'Model_total_confidence_score'),'Attributes']='Model Total Confidence'
# df3.loc[(df3.Attributes == 'Expected_Calibration_Error'),'Attributes']='Expected Calibration Error'
df3 = df3.iloc[4:, :]
report = df3.to_html(index=False)
if msg == "Good":
context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION}
elif msg == "Bad":
context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION}
else:
context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION}
elif Problemtype == 'None':
#print('hi')
context={'Uqerror':'Uqerror','errormsg':"Error:"+str(msg),'version':AION_VERSION}
else:
context= {'error':'Please enter valid inputs','UQtesting':'UQtesting','selected':'mllite','version':AION_VERSION}
return render(request, 'mltesting.html', context)
except Exception as e:
print("uqregression error: ",e)
context= {'error':'Error - Fail to perform Uncertainty Quantification ','UQtesting':'UQtesting','selected':'mllite','version':AION_VERSION}
return render(request, 'mltesting.html', context) |
trustedAI_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getusercasestatus
import pandas as pd
import numpy as np
from appbe.pages import getversion
import logging
import json
import time
import os
from appbe import compute
AION_VERSION = getversion()
def sensitivityAnalysis(request): #usnish
from appbe.pages import usecases_page
t1 = time.time()
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
try:
from trusted_ai.sensitivity_analysis import startSA
# request.session['deploypath'] = str(p.DeployPath)
sensitivitystr= startSA(request)
sensitivitystr = json.loads(sensitivitystr)
ModelStatus = request.session['ModelStatus']
if sensitivitystr['Status']=='Success':
sensitivityGraph = sensitivitystr['graph']
t2 = time.time()
log.info('Sensitivity Analysis : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return HttpResponse(json.dumps(sensitivitystr))
else:
error = sensitivitystr['reason']
raise Exception(error)
except Exception as e:
print(e)
log.info('Sensitivity Analysis : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to Perform Sensitivity Analysis, ' + str(e))
outputstr = json.dumps({'Status':'','msg':'Failed to Perform Sensitivity Analysis. '+str(e)})
return HttpResponse(outputstr)
def handlefairness(request):
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemType = 'classification'
for key in configSettings['basic']['analysisType']:
if configSettings['basic']['analysisType'][key] == 'True':
problemType = key
break
trainingfeature = configSettings['basic']['trainingFeatures']
targetfeature = configSettings['basic']['targetFeature']
featuretype = configSettings['advance']['profiler']['featureDict']
catfeature = []
for feat_conf in featuretype:
colm = feat_conf.get('feature', '')
if feat_conf['type'] == "categorical":
catfeature.append(feat_conf['feature'])
output={'targetfeature':targetfeature,'trainingfeature':trainingfeature,'catfeature':catfeature,'problemType':problemType}
return HttpResponse(json.dumps(output))
def fairnesmetrics(request): #Richard--Task-13581
from appbe.pages import usecases_page
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
t1 = time.time()
log = logging.getLogger('log_ux')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
from trusted_ai.fairness_metrics import get_metrics
output = get_metrics(request)
t2 = time.time()
log.info('Fairness Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return HttpResponse(output)
except Exception as e:
print(e)
log.info('Fairness Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Fairness Metrics, ' + str(e))
return HttpResponse('')
def performance_metrics(request):
from appbe.pages import usecases_page
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
t1 = time.time()
log = logging.getLogger('log_ux')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
from trusted_ai.performance import get_metrics
output = get_metrics(request)
t2 = time.time()
log.info('Performance Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
print( output)
return HttpResponse(json.dumps(output))
except Exception as e:
print(e)
log.info('Performance Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Performance Metrics, ' + str(e))
return HttpResponse('')
def uquncertainty(request):
from trusted_ai.trustedai_uq import trustedai_uq
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
output = trustedai_uq(request)
return HttpResponse(output)
def uqtransparency(request):
t1 = time.time()
log = logging.getLogger('log_ux')
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
selected_use_case = request.session['UseCaseName']
model_version = request.session['ModelVersion']
try:
deploypath = request.session['deploypath']
configpath = os.path.join(deploypath,'etc','display.json')
f = open(configpath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemType = configSettings['problemType']
model_Features = configSettings['modelFeatures']
if problemType.lower() == 'classification':
from trusted_ai.brier_score import get_brier_score
problem_type, brier_score = get_brier_score(request)
display_dict = {"ProblemType":problem_type.title(),"BrierScore":round(brier_score, 2),'model_Features':model_Features,'problemTypeuq':problemType}
else:
display_dict = {"ProblemType":problemType,"BrierScore":'','model_Features':model_Features,'problemTypeuq':problemType}
display_json = json.dumps(display_dict)
t2 = time.time()
log.info('Brier Score : ' + str(selected_use_case) + ' : ' + str(
model_version) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return HttpResponse(display_json, content_type="application/json")
except Exception as e:
print(e)
log.info('Brier Score : ' + str(selected_use_case) + ' : ' + str(
model_version) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Brier Score, ' + str(e))
return HttpResponse('') |
upload_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getusercasestatus
import os
import plotly.graph_objects as go
import time
import sys
from pathlib import Path
import csv
import pandas as pd
import numpy as np
from appbe.pages import getversion
AION_VERSION = getversion()
def uploadedData(request):
from appbe.dataIngestion import ingestDataFromFile
context = ingestDataFromFile(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
context['FeturesEDA'] = No_of_Permissible_Features_EDA
return render(request, 'upload.html', context)
def uploaddatafromscript(request):
from appbe.aion_config import settings
usecasetab = settings()
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
try:
scriptPath = request.POST.get('pythonscriptPath')
if(os.path.isfile(scriptPath) == False ):
context = {'tab': 'upload', 'error': 'File does not exist', 'selected': 'modeltraning','clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
if(scriptPath != ''):
try:
f = open(scriptPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
except Exception as e:
context = {'tab': 'upload', 'error': 'Error in script execution i.e., '+str(e), 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
if 'dfpy' not in ldict:
context = {'tab': 'upload', 'error': 'dfpy dataset not found', 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
raw_data = ''
if 'df_aion_raw' in ldict:
df_raw = ldict['df_aion_raw']
raw_data = df_raw.to_json(orient="records")
raw_data = json.loads(raw_data)
df = ldict['dfpy']
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
context = {'tab': 'tabconfigure','FeturesEDA':No_of_Permissible_Features_EDA,'computeinfrastructure':computeinfrastructure,'raw_data':raw_data,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False,'usecasetab':usecasetab}
return render(request, 'upload.html', context)
else:
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure, 'error': 'Please enter script path', 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'upload.html', context)
except:
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
return render(request, 'upload.html', {'tab': 'upload','clusteringModels':clusteringModels,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'error':'Fail to upload data from script','selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion})
def listfiles(request):
from appbe.labels import label_filename
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
path = request.POST.get('scriptPath')
print(path)
dirstatus = os.path.isdir(path)
import glob
try:
if(path != '' and dirstatus == True):
radiovalue = request.POST.get('filetype')
# create csv
filetimestamp = str(int(time.time()))
header = ['File', 'Label']
filename = 'AION_List_' + selected_use_case + '.csv'
dataFile = os.path.join(DATA_FILE_PATH, filename)
csvfilename = 'AION_List_' + filetimestamp
request.session['csvfilename'] = dataFile
request.session['datalocation'] = path
type = 'NA'
request.session['fileExtension'] = radiovalue
if radiovalue in ['avi', 'wmv', 'mp4']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Video'
type = 'LLM_Video'
else:
request.session['datatype'] = 'Video'
type = 'Video'
elif radiovalue in ['jpeg', 'png', 'bmp']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Image'
type = 'LLM_Image'
else:
request.session['datatype'] = 'Image'
type = 'Image'
elif radiovalue in ['txt', 'log', 'pdf','docs','docx','doc']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Document'
type = 'LLM_Document'
else:
request.session['datatype'] = 'Document'
type = 'Document'
elif radiovalue in ['java','py']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Code'
type = 'LLM_Code'
else:
request.session['datatype'] = 'Code'
type = 'Document'
if type == 'NA':
context = {'tab': 'upload', 'error': 'Please select the type', 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'version':AION_VERSION, 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'upload.html', context)
request.session['folderPath'] = path
request.session['csvfullpath'] = dataFile
file = open(dataFile, 'w', newline='')
writer = csv.DictWriter(file, fieldnames=header)
# writing data row-wise into the csv file
writer.writeheader()
#os.chdir(path)
tifCounter = 0
if radiovalue == 'doc':
tifCounter = len(glob.glob(os.path.join(path,"**/*."+'doc'),recursive=True))
tifCounter = tifCounter+len(glob.glob(os.path.join(path,"**/*."+'docx'),recursive=True) )
else:
tifCounter = len(glob.glob(os.path.join(path, "**/*." + radiovalue), recursive=True))
if radiovalue == 'jpeg':
tifCounter += len(glob.glob1(path,"*.jpg"))
labelfileexists = False
dflabels = pd.DataFrame()
if type == 'Image':
labelfilename = label_filename(request)
labelfileexists = os.path.isfile(labelfilename)
if labelfileexists == True:
dflabels = pd.read_csv(labelfilename)
if len(dflabels) == 0:
labelfileexists = False
else:
dflabels = dflabels.head(5)
if tifCounter == 0:
context = {'tab': 'upload', 'error': 'No files in the folder with selected file type', 'selected': 'modeltraning','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
return render(request, 'upload.html', context)
filesCount = 0
filesSize = 0
files=[]
for filename in glob.iglob(os.path.join(path, "**/*." + radiovalue), recursive=True):
files.append(filename)
if radiovalue == 'doc':
for filename in glob.iglob(os.path.join(path, "**/*." + 'docx'), recursive=True):
files.append(filename)
for filename in files:
filesCount = filesCount+1
writer.writerow({'File': filename, 'Label': ''})
get_size = os.path.getsize(filename)
filesSize = round(filesSize + get_size, 1)
if filesSize > 1048576:
size = round((filesSize / (1024 * 1024)), 1)
filesSize = str(size) + ' M'
elif filesSize > 1024:
size = round((filesSize /1024), 1)
filesSize = str(size) + ' K'
else:
filesSize = str(filesSize) + ' B'
files = pd.DataFrame(files,columns=['File'])
files.index = range(1, len(files) + 1)
files.reset_index(level=0, inplace=True)
files = files.to_json(orient="records")
files = json.loads(files)
if radiovalue == 'jpeg':
for filename in glob.iglob(os.path.join(path,"**/*.jpg"), recursive=True):
writer.writerow({'File': filename, 'Label': ''})
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
#filesSize = str(filesSize)+' M'
print(filesSize)
print(filesCount)
context = {'tab': 'upload','files':files,'filesCount':filesCount,'filesSize':filesSize,'filelist':dataFile,'finalstate':0, 'file': dataFile,'FeturesEDA':No_of_Permissible_Features_EDA, 'csvfilename': csvfilename,'type':type,'csvgenerated': True,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION,"selectedfile":radiovalue,"selectedPath":path}
return render(request, 'upload.html', context)
else:
context = {'tab': 'upload', 'error': 'Error: Folder path either not entered or does not exists.', 'modeltraning': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION,"selectedfile":radiovalue,"selectedPath":path}
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'tab': 'upload','error':'Folder path is mandatory','version':AION_VERSION,'computeinfrastructure':computeinfrastructure, 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion})
def validatecsv(request):
from appbe.aion_config import settings
usecasetab = settings()
from appbe import exploratory_Analysis as ea
from appbe.labels import label_filename
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
#print(request.POST.get('validatesubmit'))
if request.POST.get('validatesubmit') == 'ObjectDetection':
df = pd.read_csv(request.session['csvfullpath'])
dataFile = label_filename(request)
request.session['LabelFileName'] = dataFile
request.session['currentIndex'] = 0
request.session['endIndex'] = len(df)-1
not_end = not(request.session['currentIndex'] == request.session['endIndex'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
request.session['labels'] = []
if os.path.isfile(dataFile):
image = df["File"].iloc[request.session['currentIndex']]
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
else:
with open(dataFile,'w') as f:
f.write("File,id,angle,xmin,xmax,ymin,ymax,height,width,Label\n")
f.close()
bounds = []
context = {'tab': 'upload','bounds':bounds,'labels': request.session['labels'],'directory':request.session['datalocation'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'filelist':df,'computeinfrastructure':computeinfrastructure}
context['version'] = AION_VERSION
return render(request, 'objectlabelling.html', context)
elif request.POST.get('validatesubmit') == 'bulkLabeling':
type = 'BulkImage'
dataFile = request.session['csvfullpath']
csvfilename = request.session['csvfullpath']
labelfileexists = False
dflabels = pd.DataFrame()
context = {'tab': 'upload', 'file': dataFile, 'csvfilename': csvfilename,'type':type,'csvgenerated': True,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
elif request.POST.get('validatesubmit') == 'ImageClassification':
df = pd.read_csv(request.session['csvfullpath'])
dataFile = label_filename(request)
request.session['LabelFileName'] = dataFile
with open(dataFile,'w') as f:
f.write("File,Label\n")
f.close()
request.session['currentIndex'] = 0
request.session['endIndex'] = len(df)-1
not_end = not(request.session['currentIndex'] == request.session['endIndex'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
request.session['labels'] = ''
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'tab': 'upload','id':request.session['currentIndex'],'labels': request.session['labels'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'computeinfrastructure':computeinfrastructure}
context['version'] = AION_VERSION
return render(request, 'imagelabelling.html', context)
elif request.POST.get('validatesubmit') == 'submitpreviouslabel':
dataFile = label_filename(request)
request.session['LabelFileName'] = dataFile
df = pd.read_csv(dataFile)
if len(df.columns) == 2:
context = imageeda(request)
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
else:
context = objecteda(request)
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
else:
df = pd.read_csv(request.session['csvfullpath'])
if request.session['datatype'] in ['LLM_Document','LLM_Code']:
from appfe.modelTraining.bc_views import basicconfig
return basicconfig(request)
else:
if df['Label'].isnull().sum() > 0:
# show error message
if request.session['datatype'] == 'Document':
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
if Path(filename).suffix == '.pdf':
from appbe.dataIngestion import pdf2text
text = pdf2text(filename)
dataDict["text"].append(text)
else:
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDf = pd.DataFrame.from_dict(dataDict)
tcolumns=['text']
wordcloudpic,df_text = ea.getWordCloud(dataDf,tcolumns)
status_msg = 'Successfully Done'
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'singletextdetails':wordcloudpic,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
else:
errormessage = str(df['Label'].isnull().sum()) + " rows do not contain label values"
context = {'error': errormessage}
else:
eda_result = ''
duplicate_img = ''
color_plt = ''
df2 = df.groupby('Label', as_index=False)['File'].count().reset_index().rename(columns ={'File':'Number of Files'})
df_json = df2.to_json(orient="records")
df_json = json.loads(df_json)
cfig = go.Figure()
xaxis_data = df2['Label'].tolist()
yaxis_data = df2['Number of Files'].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data))
cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520)
firstFile = df.groupby('Label').first().reset_index()
#firstFile['FilePath'] = firstFile['File'].apply(lambda x: os.path.join(request.session['datalocation'], x))
images = []
if request.session['datatype'] == 'Image':
qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation'])
#print(qualityscore)
for i in range(len(firstFile)):
filename = firstFile.loc[i, "File"]
filePath = os.path.join(request.session['datalocation'], filename)
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
firstFile.loc[i, "Image"] = image_64
firstFile.loc[i, "Quality"] = qualityscore[filename]
elif request.session['datatype'] == 'Document':
dataDrift = ''
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
if Path(filename).suffix == '.pdf':
from appbe.dataIngestion import pdf2text
text = pdf2text(filename)
dataDict["text"].append(text)
dataDict["Label"].append(df.loc[i,"Label"])
else:
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDict["Label"].append(df.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
wordcloudpic = ea.getCategoryWordCloud(dataDf)
status_msg = 'Successfully Done'
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'dataa': df_json,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'upload.html', context)
status_msg = 'Successfully Done'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'upload', 'featuregraph': bargraph,'dataa': df_json, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'validcsv': True,'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt, 'firstFile': firstFile,
'status_msg': status_msg,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except UnicodeDecodeError:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'upload.html', {'tab': 'upload','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Only utf8 file encoding supported','computeinfrastructure':computeinfrastructure})
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'upload.html', {'tab': 'upload','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Validation Failed','computeinfrastructure':computeinfrastructure})
def file_successfully_created(request,dataFile):
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
request.session['datalocation'] = str(dataFile)
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile,optimize=1)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures()
# ----------------------------
numberoffeatures = len(featuresList)
from appfe.modelTraining.views import getimpfeatures
imp_features = getimpfeatures(dataFile,numberoffeatures)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
from utils.file_ops import read_df
status,df_top = read_df(dataFile,nrows=10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
context = {'tab': 'tabconfigure','computeinfrastructure':computeinfrastructure,'range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'imp_features':imp_features, 'numberoffeatures':numberoffeatures,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning'})
def uploadDatafromSatandardDataset(request):
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
try:
dataobject = request.POST.get('dataset')
if dataobject == 'Iris':
from sklearn.datasets import load_iris
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['Species']=data['target']
df['Species']=df['Species'].apply(lambda x: data['target_names'][x])
elif dataobject == 'Boston':
from sklearn.datasets import load_boston
df1 = load_boston()
df = pd.DataFrame(data=df1.data, columns=df1.feature_names)
df["target"] = df1.target
elif dataobject == 'BreastCancer':
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
df = pd.DataFrame(np.c_[cancer['data'], cancer['target']],columns= np.append(cancer['feature_names'], ['target']))
elif dataobject == 'Diabetes':
from sklearn.datasets import load_diabetes
data = load_diabetes()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['y']=data['target']
elif dataobject == 'Wine':
from sklearn.datasets import load_wine
data = load_wine()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['class']=data['target']
df['class']=df['class'].apply(lambda x: data['target_names'][x])
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
# EDA Subsampling changes
# ----------------------------
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures()
# ----------------------------
numberoffeatures = len(featuresList)
from appfe.modelTraining.views import getimpfeatures
imp_features = getimpfeatures(dataFile,numberoffeatures)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'tab': 'tabconfigure','computeinfrastructure':computeinfrastructure,'range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'clusteringModels':clusteringModels,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'imp_features':imp_features, 'numberoffeatures':numberoffeatures,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning'})
def sqlAlchemy(request):
from appbe import alchemy
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
dbType = request.POST.get('dbType')
request.session['dbType'] = dbType
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
if dbType.lower() == "sqlite":
request.session['filepath'] = request.POST.get('filepath')
request.session['tablenamesql'] = request.POST.get('tablenamesql')
table_details = {"Database Type": dbType, "File Path": request.session['filepath']}
if dbType.lower() in ["postgresql", "mysql", "mssql"]:
if dbType.lower()=='mssql':
db = "mssql"
else:
db = "postgresql"
request.session['tablename'] = request.POST.get('tablename'+'_'+db)
request.session['dbname'] = request.POST.get('dbname'+'_'+db)
request.session['password'] = request.POST.get('password'+'_'+db)
request.session['username'] = request.POST.get('username'+'_'+db)
request.session['port'] = request.POST.get('port'+'_'+db)
request.session['host'] = request.POST.get('host'+'_'+db)
table_details = {"Database Type": dbType, "Database Name": request.session['dbname'],
"Host": request.session['host'], "Port": request.session['port']}
if dbType.lower() == "mssql":
request.session['driver'] = request.POST.get('driver'+'_'+db)
table_details.update({"driver": request.session['driver']})
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
#print(dbType)
submit_button = request.POST.get('sql_submit')
if submit_button == 'multitable':
try:
connection_string = alchemy.get_connection(request)
import sqlalchemy as db
engine = db.create_engine(connection_string)
engine.connect()
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
print(request.POST.get('dbType'))
context = {'tab': 'tabconfigure','FeturesEDA':No_of_Permissible_Features_EDA,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'version':AION_VERSION}
context.update({'db_details':table_details})
return render(request, 'querybuildersql.html', context)
except Exception as e:
print(str(e))
if "No module named 'psycopg2'" in str(e):
error = 'Not found module: psycopg2. Please install and try again'
else:
error = 'Error in connecting to the database'
return render(request, 'upload.html', {'tab': 'tabconfigure', 'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'selected': 'modeltraning', 'version': AION_VERSION,
'error': error})
else:
try:
df = alchemy.getDataFromSingleTable(request)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'FeturesEDA':No_of_Permissible_Features_EDA,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
if "No module named 'psycopg2'" in str(e):
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"error":"Not found module: psycopg2. Please install and try again"}
else:
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"error":"Error in fetching the data from database."}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
def get_table_list(request):
from appbe import alchemy
dbType = request.session['dbType']
table_list = alchemy.list_tables(request)
#print(json.dumps(table_list))
return HttpResponse(json.dumps(table_list), content_type="application/json")
def get_tables_fields_list(request):
from appbe import alchemy
table_list = request.GET.get("sel_tables")
table_field_list = alchemy.list_tables_fields(request,table_list)
return HttpResponse(table_field_list, content_type="application/json")
def validate_query(request):
from appbe import alchemy
query = request.GET.get("query")
table_details = request.GET.get("table_details")
join_details = request.GET.get("join_details")
where_details = request.GET.get("where_details")
request.session["table_details"]=table_details
request.session["join_details"]=join_details
request.session["where_details"]=where_details
df,msg = alchemy.validatequery(request,table_details,join_details,where_details)
return HttpResponse(json.dumps(msg), content_type="application/json")
def submitquery(request):
from appbe import alchemy
from appbe import compute
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
computeinfrastructure = compute.readComputeConfig()
try:
query = request.POST.get("txtfinalquery")
table_details = request.session["table_details"]
join_details = request.session["join_details"]
where_details = request.session["where_details"]
df,msg = alchemy.validatequery(request,table_details,join_details,where_details)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
return render(request, 'upload.html', context)
except:
return render(request, 'upload.html', {'tab': 'tabconfigure','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','error':'Failed to upload datafile'})
def EDAReport(request):
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'EDA','Yes')
from appbe import exploratory_Analysis as ea
request.session['defaultfilepath'] = DEFAULT_FILE_PATH
request.session['configfilepath'] = CONFIG_FILE_PATH
request.session['deploylocation'] = DEPLOY_LOCATION
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
submit_button = request.POST.get('upload_submit')
ModelVersion = request.session['ModelVersion']
#print(submit_button)
if submit_button == 'data_eda':
try:
from appbe.aion_config import settings
usecasetab = settings()
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = ea.get_eda(request)
context['computeinfrastructure'] = computeinfrastructure
context['s3buckets'] = get_s3_bucket()
context['gcsbuckets'] = get_gcs_bucket()
context['azurestorage'] = get_azureStorage()
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
except Exception as e:
print(e)
context = {'error':'Error in doing the EDA','ModelVersion': ModelVersion,'version':AION_VERSION}
return render(request, 'upload.html', context)
def get_features_datatype(v,num_list,cat_list,text_list):
""" To get exact datatype of the feature in Data Overview."""
if v in cat_list:
return 'Categorical'
elif v in num_list:
return 'Numerical'
elif v in text_list:
return 'Text'
def downloadedareport(request):
des1 = json.loads(request.POST.get('des1'))
des1 = pd.DataFrame(des1)
cluster_df = json.loads(request.POST.get('cluster_df'))
cluster_df = pd.DataFrame(cluster_df)
pca_df = []
if request.POST.get('pca_df') != 'Empty DataFrame\r\nColumns: []\r\nIndex: []':
pca_df = json.loads(request.POST.get('pca_df'))
pca_df = pd.DataFrame(pca_df)
cor_mat = json.loads(request.POST.get('cor_mat'))
cor_mat = pd.DataFrame(cor_mat)
cor_mat.replace(np.nan, 0, inplace=True)
cor_mat.fillna('None',inplace=True)
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
edaFileName = usename + '_EDA.xlsx'
from io import BytesIO as IO
excel_file = IO()
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
##For Task 17622
actual_df = json.loads(request.POST.get('data_deep_json'))
actual_df = pd.DataFrame(actual_df)
actual_df.replace(np.nan, 0,inplace=True)
actual_df.fillna('None',inplace=True)
top_10_rows = actual_df.head(10)
top_10_rows.to_excel(excel_writer, sheet_name='Top 10 Rows',index=True)
des1 = des1.fillna(0)
#Write everything in one single column
actual_df_numerical_features = actual_df.select_dtypes(exclude='object')
actual_df_categorical_features = actual_df.select_dtypes(include='object')
#For text features
textFeature = json.loads(request.POST.get('textFeature'))
textFeature_df = actual_df.filter(textFeature)
actual_df_categorical_features = actual_df_categorical_features.drop(textFeature, axis=1)
for i in des1['Features']:
num_cols = actual_df_numerical_features.columns.to_list()
cat_cols = actual_df_categorical_features.columns.to_list()
text_cols = textFeature
des1['Features Type'] = des1['Features'].apply(lambda x: get_features_datatype(x, num_cols,cat_cols,text_cols))
curr_columns = des1.columns.to_list()
curr_columns.remove('Features Type')
insert_i = curr_columns.index('Features')+1
curr_columns.insert(insert_i,'Features Type')
des1 = des1[curr_columns]
des1.to_excel(excel_writer, sheet_name='Data Overview',startrow=0, startcol=0,index=False)
## Hopkins value addition
hopkins_value = str(request.POST.get('hopkins_val'))
hopkins_tip = request.POST.get('hopkins_tip')
hopkins_dict = {'Hopkins_value':[hopkins_value],"hopkins_information":[hopkins_tip]}
hopkins_df = pd.DataFrame.from_dict(hopkins_dict)
##Data Distribution
from appbe.eda import ux_eda
eda_obj = ux_eda(actual_df)
datadist_dict={}
for k,v in enumerate(actual_df.columns.to_list()):
distname, sse = eda_obj.DistributionFinder(actual_df[v])
datadist_dict[v]=[distname,sse]
data_dist_df = pd.DataFrame(datadist_dict)
data_dist_df = data_dist_df.T
data_dist_df.reset_index(inplace=True)
data_dist_df.columns = ['Features','Distribution','SSE']
data_dist_df.drop(['SSE'],axis=1,inplace=True)
data_dist_df.fillna("NA",inplace = True)
data_dist_df = data_dist_df.replace(['',None,pd.NaT],"NA")
data_dist_df = data_dist_df.replace(["geom"],"geometric")
data_dist_df.to_excel(excel_writer, sheet_name='Data Distribution',index=False)
if len(pca_df) > 0:
pca_df.to_excel(excel_writer, sheet_name='Feature Importance',index=False)
cor_mat.to_excel(excel_writer, sheet_name='Correlation Analysis',index=False)
#Unsupervised clustering
cdf_start_row = 1+len(hopkins_df)+6
if not textFeature:
import io
hs_info = "Hopkins Statistics"
hs_info_df = pd.read_csv(io.StringIO(hs_info), sep=",")
hs_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=0, startcol=2,index=False)
hopkins_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=2, startcol=0,index=False)
else:
# If text features available in data.
import io
hs_info = "Hopkins Statistics is not availble for data with text features. Unselect text features and retry EDA."
hs_info_df = pd.read_csv(io.StringIO(hs_info), sep=",")
hs_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=0, startcol=3,index=False)
#cluster_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row, startcol=1,index=True)
cdf_start_row = 1+len(hopkins_df)+4
cluster_info = " Unsupervised clustering results (Excluding text features) "
cluster_info_df = pd.read_csv(io.StringIO(cluster_info), sep=",")
cluster_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row-2, startcol=1,index=False)
cluster_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row, startcol=0,index=False)
workbook = excel_writer.book
#excel_writer.save() #Save() is deprecated,instead we need to use close().
excel_writer.close()
excel_file.seek(0)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + edaFileName
return response |
visualizer_views.py | import os,sys
import re
import logging
from django.http import HttpRequest, HttpResponse
from django.conf import settings
from django.shortcuts import render
from appbe.pages import getversion
import plotly.graph_objects as go
import plotly.figure_factory as ff
from django.shortcuts import render
from plotly.subplots import make_subplots
from django.contrib.sessions.models import Session
from sklearn.metrics import confusion_matrix
from IPython.core.display import HTML
from IPython.core import display
from django.template import Context, loader
import pandas as pd
import numpy as np
import io
import urllib, base64
from natsort import natsorted
import matplotlib.pyplot as plt
import plotly.express as px
import json
from IPython.core.display import display, HTML
from appbe import compute
import base64
import warnings
warnings.filterwarnings('ignore')
import subprocess
from appbe import installPackage
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
from utils.file_ops import read_df_compressed
from appbe.dataPath import LOG_LOCATION
from appbe.log_ut import logg
import time
AION_VERSION = getversion()
def getusercasestatus(request):
if 'UseCaseName' in request.session:
selected_use_case = request.session['UseCaseName']
else:
selected_use_case = 'Not Defined'
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
ModelStatus = 'Not Trained'
return selected_use_case,ModelVersion,ModelStatus
def xplain(request):
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
if request.method == 'GET':
try:
if ModelStatus != 'SUCCESS':
log.info('xplain :' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:Please train the model first or launch an existing trained model')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please train the model first or launch an existing trained model','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
else:
if 'ModelVersion' not in request.session:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelVersion'] == 0:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif 'ModelStatus' not in request.session:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelStatus'] != 'SUCCESS':
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() != 'classification' and problemType.lower() != 'regression':
log.info('xplain:' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error:Explainable AI only available for classification and regression problem')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Explainable AI only available for classification and regression problem','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df = pd.read_csv(dataFilePath,nrows=10)
df.rename(columns=lambda x: x.strip(), inplace=True)
df = df[inputFeaturesList]
inputFieldsDict = df.to_dict(orient='index')[5]
inputFields = []
inputFields.append(inputFieldsDict)
targetfeatures = targetFeature.split(",")
##### Bug 20649 starts
featureDict = configSettingsJson['advance']['profiler']['featureDict']
catFeatureList = []
for feature in featureDict:
if feature['type'] == 'categorical':
catFeatureList.append(feature['feature'])
for feature in targetfeatures:
if feature in catFeatureList:
catFeatureList.remove(feature)
fairness_error = "" if len(catFeatureList)>0 else "Fairness metrics is not applicable as categorical feature(s) is not present."
##### Bug 20649 ends
context = {"fairness_error":fairness_error,"catFeatureList":catFeatureList,'selected_use_case':selected_use_case,'configSettings':configSettingsJson,'targetfeatures':targetfeatures,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected':'visualizer','subselected':'businessview','inputFields':inputFields,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'problemType':problemType}
return render(request, 'businessview.html', context)
except Exception as e:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Unexpected error occur, '+str(e))
print(e)
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Unexpected error occur','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
if request.method == 'POST':
if request.POST.get("submit") == 'modelxplain':
return modelxplain(request)
if request.POST.get("submit") == 'xplainprediction':
return predictionxplain(request)
def modelxplain(request):
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
from appbe.aion_config import settings
usecasetab = settings()
t1 = time.time()
if 'UseCaseName' not in request.session:
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please create the use case first, trained the model and then visualize the data','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
else:
if 'ModelVersion' not in request.session:
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelVersion'] == 0:
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif 'ModelStatus' not in request.session:
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelStatus'] != 'SUCCESS':
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() != 'classification' and problemType.lower() != 'regression':
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Explainable AI only available for classification and regression problem')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Explainable AI only available for classification and regression problem','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
targetFeature = configSettingsJson['basic']['targetFeature']
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
status, df = read_df_compressed(config['postprocessedData'], nrows=10)
df.rename(columns=lambda x: x.strip(), inplace=True)
if targetFeature in df.columns:
df.drop( targetFeature, axis=1, inplace=True)
inputFieldsDict = df.to_dict(orient='index')[5]
inputFields = []
inputFields.append(inputFieldsDict)
if 'nrows' in config:
nrows = config['nrows']
else:
nrows = 'Not Available'
if 'ncols' in config:
ncols = config['ncols']
else:
ncols = 'Not Available'
if 'targetFeature' in config:
targetFeature = config['targetFeature']
else:
targetFeature = ''
labelMaps = config['labelMaps']
modelfeatures = configSettingsJson['basic']['trainingFeatures'].split(',')
mfcount = len(modelfeatures)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df_proprocessed = pd.read_csv(dataFilePath,nrows=1000)
df_proprocessed.rename(columns=lambda x: x.strip(), inplace=True)
if 'targetFeature' != '':
target_classes = df_proprocessed[targetFeature].unique()
numberofclasses = len(target_classes)
else:
target_classes = []
numberofclasses = 'Not Available'
dataPoints = df_proprocessed.shape[0]
df_proprocessed = df_proprocessed.head(5)
df_proprocessed = df_proprocessed.to_json(orient="records")
df_proprocessed = json.loads(df_proprocessed)
expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py')
outputStr = subprocess.check_output([sys.executable,expainableAIPath,'global'])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
ale_json = json.loads(str(outputStr))
ale_json = ale_json['data']
ale_view = ale_json['data']
sentences = ale_json['sentences']
scoreMessage = ''
feature_importance = ale_json['feature_importance']
dfimp = pd.DataFrame.from_dict(feature_importance)
dfimp = dfimp.sort_values(by=['values'],ascending=False).reset_index()
yaxis_data = dfimp['values'].tolist()
xaxis_data = dfimp['labels'].tolist()
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data,y=yaxis_data,name='Feature Importance'))
cfig.update_layout(barmode='stack',xaxis_title='Features')
bargraph = cfig.to_html(full_html=False, default_height=450,default_width=1000)
dftoprecords = dfimp.head(2)
topTwoFeatures = dfimp['labels'].tolist()
topFeaturesMsg = []
for i in range(0,len(dfimp)):
value = round(dfimp.loc[i, "values"],2)*100
value = round(value,2)
tvalue = str(dfimp.loc[i, "labels"])+' contributing to '+ str(value)+'%'
topFeaturesMsg.append(tvalue)
most_influencedfeature = ale_json['most_influencedfeature']
interceppoint = ale_json['interceptionpoint']
anchorjson = ale_json['anchorjson']
t2 = time.time()
context = {'ale_view':ale_view,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected':'visualizer','subselected':'businessview','sentences':sentences,"bargraph":bargraph,'inputFields':inputFields,'nrows':nrows,'ncols':ncols,'targetFeature':targetFeature,'dataPoints':dataPoints,'target_classes':target_classes,'datarows':df_proprocessed,'numberofclasses':numberofclasses,'modelfeatures':modelfeatures,'problemType':problemType,'mfcount':mfcount,'topTwoFeatures':topTwoFeatures,'topFeaturesMsg':topFeaturesMsg,'most_influencedfeature':most_influencedfeature,'interceppoint':interceppoint,'achors':anchorjson,'labelMaps':labelMaps,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return render(request, 'businessview.html', context)
except Exception as Inst:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(Inst)
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to Xplain Model, '+str(Inst))
log.info('Details : '+str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error1':'Failed to Xplain Model','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
def predictionxplain(request):
log = logging.getLogger('log_ux')
from appbe.aion_config import settings
usecasetab = settings()
computeinfrastructure = compute.readComputeConfig()
selected_use_case, ModelVersion, ModelStatus = getusercasestatus(request)
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
t1 = time.time()
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
inputFieldsDict = {}
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() == 'timeseriesforecasting': #task 11997
inputFieldsDict['noofforecasts'] = request.POST.get('noofforecasts')
elif problemType == 'RecommenderSystem':
inputFieldsDict['uid'] = request.POST.get('uid')
inputFieldsDict['iid'] = request.POST.get('iid')
inputFieldsDict['rating'] = request.POST.get('rating')
else:
for feature in inputFeaturesList:
try:
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df = pd.read_csv(dataFilePath,nrows=10)
df.rename(columns=lambda x: x.strip(), inplace=True)
df = df[inputFeaturesList]
inputFieldsDict = df.to_dict(orient='index')[5]
except:
inputFieldsDict[feature] = request.POST.get(feature)
for key, value in inputFieldsDict.items():
if value == 'nan':
inputFieldsDict[key] = ''
inputFieldsJson = json.dumps(inputFieldsDict)
expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py')
#print(inputFieldsJson)
outputStr = subprocess.check_output([sys.executable,expainableAIPath,'local',inputFieldsJson])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
#print(outputStr)
predict_dict = json.loads(str(outputStr))
if (predict_dict['status'] == 'SUCCESS'):
predict_dict = predict_dict['data']
prediction = predict_dict['prediction']
anchor = predict_dict['anchor']
precision = predict_dict['precision']
coverage = round(predict_dict['coverage'],2)
confidence = '95%'
forceplot_view = predict_dict['forceplot']
multidecisionplot_view = predict_dict['multidecisionplot']
waterfallplot_view = predict_dict['waterfallplot'] #Task12581
else:
context={'tab':'tabconfigure','error':'Failed to xplain','selected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion}
log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to xplain')
return render(request,'businessview.html',context)
inputFields = []
inputFields.append(inputFieldsDict)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context={'tab' : 'predict','inputFields':inputFields,'prediction':prediction,'reason':anchor, 'precision': precision,'coverage':coverage,'confidence':confidence,'forceplot_view':forceplot_view,'multidecisionplot_view':multidecisionplot_view,'waterfallplot_view':waterfallplot_view,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected' : 'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}
t2= time.time()
log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return render(request, 'businessview.html', context = context)
except Exception as inst:
print(inst)
log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' +'0'+ 'sec' + ' : ' + 'Error : Failed to Xplain Prediction, ' + str(inst))
context={'tab' : 'tabconfigure','error' : 'Failed to Xplain Prediction','selected' : 'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion}
return render(request,'businessview.html',context)
|
train_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getversion
from appbe.aion_config import running_setting
from appbe.training import checkModelUnderTraining
from appbe.training import calculate_total_activities
from appbe.training import check_unsupported_col
from appbe.training import check_granularity
from appbe.training import checkversionrunningstatus
from appbe.training import getModelStatus
from appbe.training import changeModelStatus
from appbe.training import calculate_total_interations
from appbe.pages import getusercasestatus
from utils.file_ops import read_df_compressed
import plotly.graph_objects as go
from appbe.pages import getMLModels
from appfe.modelTraining.models import usecasedetails
from appbe.training import getStatusCount
from appfe.modelTraining.models import Existusecases
import os,sys
import urllib, base64
import subprocess
import time
import re
import numpy as np
import pandas as pd
from pathlib import Path
import importlib
from appbe.log_ut import logg
from appbe import compute
import logging
AION_VERSION = getversion()
LOG_FILE_NAME = 'model_training_logs.log'
LOG_FOLDER = 'log'
def getPerformanceMatrix(deploypath,output_json):
displaypath = os.path.join(deploypath,'etc','display.json')
model_perf = []
try:
with open(displaypath) as file:
config = json.load(file)
file.close()
except Exception as e:
print(e)
import glob
resultJsonObj = json.loads(output_json)
if (resultJsonObj['data']['ModelType'] == 'anomalydetection' and resultJsonObj['data']['BestScore'] != 0) or resultJsonObj['data']['ModelType'].lower() == 'timeseriesanomalydetection': #task 11997
if resultJsonObj['data']['BestModel'].lower() == 'autoencoder' or resultJsonObj['data']['BestModel'].lower() == 'dbscan' :
try:
anomaly_plot_files = glob.glob(os.path.normpath(os.path.join(deploypath,'output','anomaly_plot','*.png')))
for plot in anomaly_plot_files:
if(os.path.isfile(plot)):
string = base64.b64encode(open(plot, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
model_perf.append(image_64)
else:
model_perf.append('nograph')
except Exception as e:
print("Anomaly plot exe error: \n",e)
else:
predictfile = os.path.join(deploypath,'data','predicteddata.csv')
if(os.path.isfile(predictfile)):
df = pd.read_csv(predictfile)
outliers=df.loc[df['predict']==-1]
outlier_index=list(outliers.index)
normals=df.loc[df['predict']!=-1]
normals_index=list(normals.index)
featuresList = df.columns.values.tolist()
#print(featuresList)
if 'predict' in featuresList:
featuresList.remove('predict')
if 'score' in featuresList:
featuresList.remove('score')
if len(featuresList) == 1:
xdata = df[featuresList[0]]
ydata = df['score']
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[normals_index], y=df['score'].iloc[normals_index],mode='markers',name='Normal'))
fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[outlier_index], y=df['score'].iloc[outlier_index],mode='markers',name='Predicted Outliers'))
fig.update_layout(xaxis_title=featuresList[0],yaxis_title="Score")
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
if len(featuresList) == 2:
fig = go.Figure()
df = df.reset_index()
fig.add_trace(go.Scatter(x=df[featuresList[0]], y=df[featuresList[1]],mode='markers',name='Normal Points'))
fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[outlier_index], y=df[featuresList[1]].iloc[outlier_index],mode='markers',name='Predicted Outliers'))
fig.update_xaxes(title_text=featuresList[0])
fig.update_yaxes(title_text=featuresList[1])
fig.update_layout(xaxis_title=featuresList[0],yaxis_title=featuresList[1])
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
if len(featuresList) > 2:
from sklearn.decomposition import PCA
pca = PCA(2)
pca.fit(df)
res=pd.DataFrame(pca.transform(df))
Z = np.array(res)
fig = go.Figure()
fig.add_trace(go.Scatter(x=res[0], y=res[1],mode='markers',name='Normal Points'))
fig.add_trace(go.Scatter(x=res.iloc[outlier_index,0], y=res.iloc[outlier_index,1],mode='markers',name='Predicted Outliers'))
fig.update_xaxes(title_text="Principal Component 1")
fig.update_yaxes(title_text="Principal Component 2")
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
return (model_perf)
if config['problemType'].lower() == 'classification' or config['problemType'].lower() == 'anomaly_detection' or config['problemType'].lower() == 'timeseriesanomalydetection':
displaypath = os.path.join(deploypath,'log','img')
import glob
for img in glob.glob(displaypath+"/*.png"):
string = base64.b64encode(open(img, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
model_perf.append(image_64)
#print(model_perf)
elif config['problemType'].lower() == 'regression' or config['problemType'].lower() == 'recommendersystem' or \
config['problemType'].lower() == 'text similarity':
dataPath = config['predictedData']
readstatus,predict_df=read_df_compressed(dataPath)
regfig = go.Figure()
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df['actual'],
mode='lines',
name='Actual'))
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df['predict'],
mode='lines',
name='Predict'))
frgraph = regfig.to_html(full_html=False, default_height=400, default_width=1100)
rfgraph = ''
model_perf.append(frgraph)
elif config['problemType'].lower() == 'clustering':
dataPath = config['predictedData']
readstatus,predict_df=read_df_compressed(dataPath)
distinctCount = len(predict_df['predict'].unique())
clusterlist = predict_df['predict'].unique()
color = ['green','blue','red','orange','green','blue','red','orange']
fig = go.Figure()
for cluster in clusterlist:
df_cluster = predict_df[predict_df['predict'] == cluster]
modelFeatures = config['modelFeatures']
X1= df_cluster[modelFeatures[0]].tolist()
X2= df_cluster[modelFeatures[1]].tolist()
fig.add_trace(go.Scatter(x=X1, y=X2,mode='markers',name='cluster '+str(cluster)))
fig.update_layout(title="Cluster Graph",xaxis_title=modelFeatures[0],yaxis_title=modelFeatures[1],)
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
elif config['problemType'].lower() == 'timeseriesforecasting': #task 11997
dataPath = config['predictedData']
predict_df = pd.read_csv(dataPath)
modelFeatures = config['modelFeatures']
for feature in modelFeatures:
feature_name = feature + '_actual'
prediction = feature + '_pred'
if feature_name in predict_df.columns:
regfig = go.Figure()
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df[feature_name],
mode='lines',
name=feature))
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df[prediction],
mode='lines',
name='Predict'))
frgraph = regfig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
return (model_perf)
def stoptraining(request):
request.session['ModelStatus'] = 'Terminated'
request.session.save()
changeModelStatus(Existusecases,request.session['modelid'],'Terminated','NA','NA')
return HttpResponse('Terminated')
def kill_child_proc_rec(ppid):
import psutil
for process in psutil.process_iter():
_ppid = process.ppid()
if _ppid == ppid:
_pid = process.pid
kill_child_proc_rec(_pid)
print(f'Terminating: {_pid}')
if sys.platform == 'win32':
process.terminate()
else:
os.system('kill -9 {0}'.format(_pid))
def getDataFileCountAndSize(basicConfig):
import glob
path = basicConfig['dataLocation']
radiovalue = basicConfig['folderSettings']['fileExtension']
filesCount = 0
filesSize = 0
files = []
for filename in glob.iglob(os.path.join(path, "**/*." + radiovalue), recursive=True):
files.append(filename)
if radiovalue == 'doc':
for filename in glob.iglob(os.path.join(path, "**/*." + 'docx'), recursive=True):
files.append(filename)
for filename in files:
#for filename in glob.iglob(os.path.join(path, "**/*." + radiovalue), recursive=True):
filesCount = filesCount + 1
get_size = os.path.getsize(filename)
filesSize = round(filesSize + get_size, 1)
if filesSize > 1048576:
size = round((filesSize / (1024 * 1024)), 1)
filesSize = str(size) + ' M'
elif filesSize > 1024:
size = round((filesSize /1024), 1)
filesSize = str(size) + ' K'
else:
filesSize = str(filesSize) + ' B'
return filesCount,filesSize
# task 4343 Abort training
def read_log_file( config_file):
outputStr = 'aion_learner_status:{"status":"Fail","message":"Log file not found"}'
if Path(config_file).exists():
with open(config_file, 'r', encoding='utf-8') as f:
config = json.load(f)
deployPath = Path(config['basic']['deployLocation'])
log_file = deployPath/config['basic']['modelName'].replace(' ', '_')/config['basic']['modelVersion']/LOG_FOLDER/LOG_FILE_NAME
if log_file.exists():
with open(log_file, 'r', encoding='utf-8') as f:
outputStr = f.read()
return outputStr
def checkVectorDBPackage(embeddedDB):
errorStatus = 'False'
if embeddedDB.lower() == 'vectordb':
status = importlib.util.find_spec('chromadb')
if not status:
errorStatus = 'True'
return errorStatus
def getModelSize(configSettings,model):
modelSize = 'NA'
if 'modelSize' in configSettings['basic']:
selectedModelSize = configSettings['basic']['modelSize']['llmFineTuning'][model]
for k in selectedModelSize.keys():
if configSettings['basic']['modelSize']['llmFineTuning'][model][k] == 'True':
modelSize = k
break
return modelSize
def llmmodelevaluate(request):
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
usecasename = request.session['usecaseid'].replace(" ", "_")
from appbe.prediction import get_instance
hypervisor,instanceid,region,image = get_instance(usecasename+'_'+str(ModelVersion))
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
usecaseconfigfile = request.session['config_json']
f = open(usecaseconfigfile, "r+", encoding="utf-8")
configSettingsData = f.read()
f.close()
configSettings = json.loads(configSettingsData)
problem_type = ''
modelSize = ''
problemtypes = configSettings['basic']['analysisType']
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
mlmodels =''
algorihtms = configSettings['basic']['algorithms'][problem_type]
for k in algorihtms.keys():
if configSettings['basic']['algorithms'][problem_type][k] == 'True':
if mlmodels != '':
mlmodels += ', '
mlmodels += k
if 'modelSize' in configSettings['basic']:
selectedModelSize = configSettings['basic']['modelSize']['llmFineTuning'][mlmodels]
for k in selectedModelSize.keys():
if configSettings['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True':
modelSize = k
break
eval = ''
if configSettings['basic']['folderSettings']['fileType'] == 'LLM_Document':
eval = 'doc'
elif configSettings['basic']['folderSettings']['fileType'] == 'LLM_Code':
eval = 'code'
#print(sys.executable, scriptPath,hypervisor,instanceid,f'{mlmodels}-{modelSize}',selected_use_case+'_'+str(ModelVersion),eval)
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','llmbenchmarking','-hv',hypervisor,'-i',instanceid,'-md',f'{mlmodels}-{modelSize}','-uc',usecasename+'_'+str(ModelVersion),'-e',eval])
return trainmodel(request)
def trainresult(request):
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
usecasename = request.session['usecaseid'].replace(" ", "_")
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
trainmodel =request.POST.get('trainmodel')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettings = json.loads(configSettingsData)
total_steps = calculate_total_activities(configSettings)
request.session['total_steps'] = total_steps
p = usecasedetails.objects.get(usecaseid=request.session['usecaseid'])
usecaseindex = p.id #bugid:14163
if trainmodel == 'Train Model':
try:
if configSettings['basic']['analysisType']['survivalAnalysis'] != 'True' and configSettings['basic']['analysisType']['llmFineTuning'] != 'True':
configSettings['advance']['testPercentage'] = int(request.POST.get('TrainTestPercentage',0)) #Unnati
configSettings['advance']['categoryBalancingMethod'] = request.POST.get('BalancingMethod','NA')
if configSettings['basic']['analysisType']['llmFineTuning'] == 'True':
configSettings['basic']['vmRunning'] = request.POST.get('vmRunning','KeepRunning')
if configSettings['basic']['analysisType']['similarityIdentification'] == 'True':
dbs = configSettings['basic']['preprocessing']['similarityIdentification']
for dbt in dbs.keys():
configSettings['basic']['preprocessing']['similarityIdentification'][dbt]='False'
configSettings['basic']['preprocessing']['similarityIdentification'][request.POST.get('contentdb')] = 'True'
errorStatus = checkVectorDBPackage(request.POST.get('contentdb'))
if errorStatus.lower() == 'true':
return render(request, 'training.html', {'error': 'Error: Chromadb package not found.','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''})
if configSettings['basic']['analysisType']['contextualSearch'] == 'True':
dbs = configSettings['basic']['preprocessing']['contextualSearch']
for dbt in dbs.keys():
configSettings['basic']['preprocessing']['contextualSearch'][dbt]='False'
configSettings['basic']['preprocessing']['contextualSearch'][request.POST.get('contentdb')] = 'True'
errorStatus = checkVectorDBPackage(request.POST.get('contentdb'))
if errorStatus.lower() == 'true':
return render(request, 'training.html', {'error': 'Error: Chromadb package not found.','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''})
updatedConfigSettingsJson = json.dumps(configSettings)
f.seek(0)
f.write(updatedConfigSettingsJson)
f.truncate()
f.close()
# output_json = aion_train_model(updatedConfigFile)
request.session['noflines'] = 0
request.session['ModelStatus'] = 'Running'
request.session.save()
changeModelStatus(Existusecases,request.session['modelid'],'Running','NA','NA')
#print(configSettings['basic']['distributedLearning'])
#sys.exit()
import timeit
startTime = timeit.default_timer()
process_killed = False
if computeinfrastructure['computeInfrastructure'].lower() == 'aws' and configSettings['basic']['analysisType']['llmFineTuning'] != 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
#print(scriptPath,updatedConfigFile)
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','awstraining','-c',updatedConfigFile])
elif computeinfrastructure['computeInfrastructure'].lower() in ['aws','gcp']:
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','llmtuning','-c',updatedConfigFile])
else:
if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True' or configSettings['basic']['analysisType']['multiModalLearning'] == 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_gluon.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath, updatedConfigFile])
elif configSettings['basic']['onlineLearning'] == 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','onlinetraining','-c',updatedConfigFile])
elif configSettings['basic']['distributedLearning'] == 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','distributedtraining','-c',updatedConfigFile])
else:
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
cmd = [sys.executable, scriptPath,'-m','training','-c',updatedConfigFile] # task 4343 abort training
training_proc = subprocess.Popen( cmd)
outputStr = ''
while training_proc.poll() == None:
if getModelStatus(Existusecases,request.session['modelid']) == 'Terminated':
kill_child_proc_rec(training_proc.pid)
training_proc.kill()
process_killed = True
time.sleep(1)
if process_killed:
outputStr = 'aion_learner_status:{"status":"Fail","message":"Terminated by user"}'
else:
outputStr = read_log_file( updatedConfigFile)
usename = request.session['UseCaseName'].replace(" ", "_")
outputfile = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'etc','output.json')
if os.path.isfile(outputfile):
f1 = open(outputfile, "r+", encoding="utf-8")
outputStr = f1.read()
f1.close()
else:
if not isinstance( outputStr, str):
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resultJsonObj = json.loads(outputStr)
#print(resultJsonObj)
odataFile = request.session['outputfilepath']
with open(odataFile, 'w') as json_file:
json.dump(resultJsonObj, json_file)
json_file.close()
model = Existusecases.objects.get(id=request.session['modelid'])
request.session['ModelStatus'] = resultJsonObj['status']
ModelStatus = request.session['ModelStatus']
model.Status = resultJsonObj['status']
training_error = ''
if resultJsonObj['status'] == 'SUCCESS':
model.modelType = resultJsonObj['data']['ModelType']
model.DeployPath = str(resultJsonObj['data']['deployLocation'])
if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection', 'timeSeriesAnomalyDetection']: #task 11997
model.ProblemType = 'unsupervised'
else:
model.ProblemType = 'supervised'
else:
training_error = resultJsonObj['message']
model.save()
problemtypes = configSettings['basic']['analysisType']
#print(problemtypes.keys())
problem_typ = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_typ = k
break
modeltyp = problem_typ
listofmodels = ''
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings)
if mlmodels != '':
listofmodels += str(mlmodels)
if dlmodels != '':
listofmodels += listofmodels+' '+str(dlmodels)
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Algorithms',listofmodels)
# ----------------------------------------------------------------------------- #
if (problem_type == 'classification' or problem_type == 'regression'):
if len(mlmodels.split(',')) == 1:
trainingTime = timeit.default_timer() - startTime
trainingTime = round(trainingTime/60)
# calculate the size of uploaded dataset
filePath = configSettings['basic']['dataLocation']
sz = os.path.getsize(filePath)
fileSizeMB = sz / (1024 * 1024)
filesize = str(fileSizeMB) + " MB"
featuresCount = str(len(configSettings['basic']['trainingFeatures'].split(',')))
modelname = mlmodels.split(',')[0]
fileSizeMBLimit = 0
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training.config')
if(os.path.isfile(configfilepath)):
file = open(configfilepath, "r", encoding="utf-8")
read = file.read()
file.close()
for line in read.splitlines():
if 'fileSizeMBLimit=' in line:
fileSizeMBLimit = int(line.split('=',1)[1])
# append the new entry into config only if size of uploaded dataset meets the threshold
if fileSizeMB > fileSizeMBLimit:
_val = updateRunConfig(trainingTime, filesize, featuresCount, modelname, problem_type)
# ----------------------------------------------------------------------------- #
if resultJsonObj['status'] == 'SUCCESS':
#from appbe import telemetry
request.session['deploypath'] = str(resultJsonObj['data']['deployLocation'])
from appbe.trainresult import ParseResults
result, survical_images = ParseResults(outputStr)
model_perf = getPerformanceMatrix(request.session['deploypath'],outputStr)
#telemetry.telemetry_data('Training Successfully Done',selected_use_case+'_'+str(ModelVersion),str(listofmodels))
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Success')
request.session['currentstate'] = 3
request.session['finalstate'] = 4
request.session.save()
file_path = request.session['logfilepath']
my_file = open(file_path, 'r',encoding="utf8")
file_content = my_file.read()
my_file.close()
matched_lines = [line.replace('Status:-', '') for line in file_content.split('\n') if "Status:-" in line]
matched_status_lines = matched_lines[::-1]
matched_status_lines = matched_status_lines[0]
matched_status_lines = matched_status_lines.split('...')
matched_status_lines = matched_status_lines[1]
no_lines = len(matched_lines)
if 'noflines' not in request.session:
request.session['noflines'] = 0
request.session['noflines'] = request.session['noflines'] + 1
if request.session['ModelStatus'] != 'SUCCESS':
numberoflines = request.session['noflines']
if numberoflines > no_lines:
numberoflines = no_lines
request.session['noflines'] = no_lines
matched_lines = matched_lines[0:numberoflines]
shortlogs = getStatusCount(matched_lines,request.session['total_steps'])
temp = {}
temp['modelName'] = request.session['UseCaseName']
temp['modelVersion'] = request.session['ModelVersion']
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = configSettings['basic']['dateTimeFeature']
config['sequenceFeatures'] = configSettings['basic']['indexFeature']
config['FeaturesList'] = configSettings['basic']['trainingFeatures']
config['unimportantFeatures'] = ''
config['targetFeature'] = configSettings['basic']['targetFeature']
modelCondition = ''
problemtypes = configSettings['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem_type,dproblemType,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings)
configSettings['basic']['problem_type'] = problem_type
configSettings['basic']['dproblem_type'] = dproblemType
if mlmodels != '':
configSettings['basic']['mllearner'] = 'enable'
if dlmodels != '':
configSettings['basic']['dllearner'] = 'enable'
if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True':
configSettings['basic']['selected_ML_Models'] = 'AutoGluon'
configSettings['basic']['mllearner'] = 'enable'
else:
configSettings['basic']['selected_ML_Models'] = mlmodels
configSettings['basic']['selected_DL_Models'] = dlmodels
configSettings['basic']['smodel_size'] = smodelsize
if 'noOfRecords' in configSettings['basic']:
records = configSettings['basic']['noOfRecords']
else:
from appbe.train_output import getDataSetRecordsCount
records = getDataSetRecordsCount(configSettings['basic']['dataLocation'])
filesCount = 0
filesSize = 0
#print(configSettings['basic']['analysisType']['llmFineTuning'].lower())
#print(configSettings['basic']['folderSettings']['fileType'].lower())
if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'true' and configSettings['basic']['folderSettings']['fileType'].lower() in ['llm_document','llm_code']:
filesCount,filesSize = getDataFileCountAndSize(configSettings['basic'])
noofIteration = calculate_total_interations(configSettings)
features = configSettings['basic']['trainingFeatures'].split(',')
noOfTrainingFeatures = len(features)
configSettings['basic']['problem_type']=problem_type
featuretype = configSettings['advance']['profiler']['featureDict']
if ('Logistic Regression' not in mlmodels) or ('Linear Regression' not in mlmodels):
selectedmodel = 'modelcomparision'
else:
selectedmodel = " "
user_provided_data_type = {}
text_type=''
for feat_conf in featuretype:
colm = feat_conf.get('feature', '')
if feat_conf['type'] == "text":
text_type="text"
break
contentdb = ''
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettings['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettings['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
contentdb = 'VectorDB'
context = {'tab': 'trainresult','filesCount':filesCount,'filesSize':filesSize, 'result': result, 'selectedmodel': selectedmodel, 'advconfig': configSettings, 'shortlogs':shortlogs,
'selected_use_case': selected_use_case, 'noOfRecords': records,'noOfTrainingFeatures':noOfTrainingFeatures,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecaseid':usecaseindex,#bugid:14163 #BugID13336
'noofIteration':noofIteration,'log_file':file_content,'contentdb':contentdb,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'model_perf': model_perf,'logs':matched_status_lines, 'perf_images': survical_images, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'usecasename':usecasename}
context['version'] = AION_VERSION
return render(request, 'training.html', context)
else:
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Error')
request.session['currentstate'] = 3
request.session['finalstate'] = 4
#from appbe import telemetry
if process_killed:
errorMsg = 'Terminated by user'
else:
errorMsg = 'Model Training Error (check log file for more details)'
contentdb = ''
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettings['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettings['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
contentdb = 'VectorDB'
#telemetry.telemetry_data('Training Error',selected_use_case+'_'+str(ModelVersion),str(listofmodels))
context = {'tab': 'trainresult', 'error': errorMsg,'selected_use_case': selected_use_case,'contentdb':contentdb,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecaseid':usecaseindex,#bugid:14163 #BugID13336
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'usecasename':usecasename}
context['version'] = AION_VERSION
return render(request, 'training.html', context)
except Exception as e:
log.info('Training Fail:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0' + 'sec' + ':' + 'Training fail '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(e)
return render(request, 'training.html', {'error': 'Model Training Error','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasename':usecasename,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''})
else:
modelCondition = ''
problemtypes = configSettings['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings)
configSettings['basic']['problem_type'] = problem_type
configSettings['basic']['dproblem_type'] = dproblem_type
if mlmodels != '':
configSettings['basic']['mllearner'] = 'enable'
if dlmodels != '':
configSettings['basic']['dllearner'] = 'enable'
if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True':
configSettings['basic']['selected_ML_Models'] = 'AutoGluon'
configSettings['basic']['mllearner'] = 'enable'
else:
configSettings['basic']['selected_ML_Models'] = mlmodels
configSettings['basic']['selected_DL_Models'] = dlmodels
if 'noofRecords' in configSettings['basic']:
records = configSettings['basic']['noofRecords']
else:
from appbe.train_output import getDataSetRecordsCount
records = getDataSetRecordsCount(configSettings['basic']['dataLocation'])
filesCount = 0
filesSize = 0
print(configSettings['basic']['analysisType']['llmFineTuning'].lower())
print(configSettings['basic']['folderSettings']['fileType'].lower())
if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'true' and \
configSettings['basic']['folderSettings']['fileType'].lower() in ['llm_document', 'llm_code']:
filesCount, filesSize = getDataFileCountAndSize(configSettings['basic'])
noofIteration = calculate_total_interations(configSettings)
features = configSettings['basic']['trainingFeatures'].split(',')
noOfTrainingFeatures = len(features)
configSettings['basic']['problem_type']=problem_type
context = { 'advconfig': configSettings,'filesCount':filesCount,'filesSize':filesSize,
'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion,'noofIteration':noofIteration,'usecasename':usecasename,
'modelCondition':modelCondition, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures}
context['version'] = AION_VERSION
return render(request, 'training.html',context)
def getTrainingTime(filePath, no_of_features):
#filePath = 'C:\\MyFolder\AION\\AION Datasets\\Heavy Datasets\\class_1MRows_26Cols.csv'
returnVal = '0_0'
if(os.path.isfile(filePath)):
trainingTime = 0
neartrainingTime = 0 # It's used to store the closest Training-Time
nearsampleSize = 0 # It's used to store the closest Sample-Size
leastSizeDifference = 0 # It's used to find the possible minimum difference between the dataset's actual size and Sample-Size in JSON file
inRange = 0 # It's used to identify if Extrapolation is needed or not
fileSizeMBLimit = 0 # It's used to check/limit the size of uploaded dataset
acceptSizeVariance = 10 # It's used to cover the variance in sample-size
featuresThreshold = 50 # It's used to set the boundary/binary-classification of records-typte
# ------------------------------------------------------------------------------------------------------------ #
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training.config')
if(os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
for line in read.splitlines():
if 'fileSizeMBLimit=' in line:
fileSizeMBLimit = int(line.split('=',1)[1])
if 'acceptSizeVariance=' in line:
acceptSizeVariance = int(line.split('=',1)[1])
if 'featuresThreshold=' in line:
featuresThreshold = int(line.split('=',1)[1])
# get the size of uploaded dataset/file (in MB)
sz = os.path.getsize(filePath)
fileSizeMB = sz / (1024 * 1024)
# check if uploaded dataset/file is bigger than defined threshold or not. If yes, only than go to calculate the tentative training-time
if(fileSizeMB > fileSizeMBLimit):
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training_runs.json')
try:
if(os.path.isfile(configfilepath)):
# Opening JSON file
f = open(configfilepath)
# returns JSON object as a dictionary
data = json.load(f)
# Iterating through the json list
for run in data['runs']:
sampleSize = run['sampleSize'].replace(" MB","")
sampleSize = int(float(sampleSize))
features = int(run['features'])
# match records under 10% (+ or -) of variance
sizeDifference = fileSizeMB - sampleSize
if (sizeDifference < 0):
sizeDifference = sizeDifference * -1
if (leastSizeDifference == 0):
leastSizeDifference = sizeDifference
# ------------------------------------------------------------------------------------------------ #
if (no_of_features <= featuresThreshold):
if ((sizeDifference * 100)/fileSizeMB < acceptSizeVariance and features <= featuresThreshold):
acceptSizeVariance = (sizeDifference * 100)/fileSizeMB
trainingTime = run['trainingTime'].replace(" Mins","")
trainingTime = int(trainingTime)
returnVal = str(trainingTime) + '_match'
inRange = 1
# get the nearest value of sampleSize (which can be used for extrapolation) from the JSON file
if (sizeDifference <= leastSizeDifference and features <= featuresThreshold):
nearsampleSize = sampleSize
leastSizeDifference = sizeDifference
neartrainingTime = run['trainingTime'].replace(" Mins","")
neartrainingTime = int(neartrainingTime)
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
if (no_of_features > featuresThreshold):
if ((sizeDifference * 100)/fileSizeMB < acceptSizeVariance and features > featuresThreshold):
acceptSizeVariance = (sizeDifference * 100)/fileSizeMB
trainingTime = run['trainingTime'].replace(" Mins","")
trainingTime = int(trainingTime)
returnVal = str(trainingTime) + '_match'
inRange = 1
# get the nearest value of sampleSize (which can be used for extrapolation) from the JSON file
if (sizeDifference <= leastSizeDifference and features > featuresThreshold):
nearsampleSize = sampleSize
leastSizeDifference = sizeDifference
neartrainingTime = run['trainingTime'].replace(" Mins","")
neartrainingTime = int(neartrainingTime)
# ------------------------------------------------------------------------------------------------ #
# When there is no record (sample-size) matched with 10% of variance then go for the extrapolation
if (inRange == 0):
sizeDifference = fileSizeMB - nearsampleSize
ratio = (sizeDifference * 100)/nearsampleSize
trainingTime = neartrainingTime + ((ratio * neartrainingTime)/100)
trainingTime = int(trainingTime)
returnVal = str(trainingTime) + '_extrapolation'
# Closing file
f.close()
except Exception as inst:
pass
return returnVal
def getllmmodelscore(usecaseid,model):
DB_TABLE = 'llm_benchmarking'
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = str(Path(DATA_DIR)/'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
usecaseidcond = f'usecaseid="{usecaseid}"'
helptxt =''
msg = ''
#print(usecaseidcond)
if sqlite_obj.table_exists(DB_TABLE):
usecasemodelscore = sqlite_obj.read_data(DB_TABLE,usecaseidcond)
status = ''
finetunedscore = 'NA'
foundationscore = 'NA'
benchmarkdataset = 'NA'
modelfunctionscore = {'CodeLLaMA-2-7B':'33%','CodeLLaMA-2-13B':'36%','LLaMA-2-7B':'16.8%','LLaMA-2-13B':'20.1%','LLaMA-2-70B':'31.0%','LLaMA-2-Chat-7B':'76%','LLaMA-2-Chat-13B':'79.2%','LLaMA-2-Chat-70B':'84.2%','Falcon-7B':'NA','Falcon-40B':'NA'}
foundationscore = modelfunctionscore.get(model,'NA')
scoretype='NA'
for x in usecasemodelscore:
#print(x)
keys = sqlite_obj.column_names(DB_TABLE)
#print(keys)
status = x[keys.index('state')]
if status.lower() in ['success','finished']:
result_type = x[keys.index('result_type')]
result = eval(x[keys.index('result')])
scoretype = list(result.keys())[0]
if scoretype.lower() == 'hellaswag':
benchmarkdataset = 'HellaSwag'
helptxt = 'HellaSwag is a challenge dataset for evaluating commonsense Natural Language Inferencing. It consists of ~70k multiple choice questions with four answer choices about what might happen next. The correct answer is the (real) sentence for the next event; the three incorrect answers are adversarial generated and human verified.'
else:
benchmarkdataset = 'HumanEval'
if result_type == 'dict':
sub_result = list(result.values())[0]
scoretype = list(sub_result.keys())[0]
if scoretype == 'acc':
scoretype = 'Accuracy'
finetunedscore = str(round((float(list(sub_result.values())[0])*100),2))
finetunedscore = f'{finetunedscore}%'
else:
finetunedscore = str(round((float(list(result.values())[0])*100),2))
elif status.lower() == 'error':
msg = x[keys.index('result')]
evaluation = {'status':status,'msg':msg,'benchmarkdataset':benchmarkdataset,'scoreType':scoretype,'finetunedscore':str(finetunedscore),'foundationscore':foundationscore,'helptxt':helptxt}
else:
evaluation = {'status':'','scoreType':'','benchmarkdataset':'','finetunedscore':'','foundationscore':'','helptxt':''}
#print(evaluation)
return evaluation
def trainmodel(request):
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
usecasename = request.session['usecaseid'].replace(" ", "_")
try:
checkModelUnderTraining(request,usecasedetails,Existusecases)
computeinfrastructure = compute.readComputeConfig()
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
total_steps = calculate_total_activities(configSettingsJson)
warning = check_unsupported_col(configSettingsJson)
time_series_warning = check_granularity(configSettingsJson)
noofIteration = calculate_total_interations(configSettingsJson)
request.session['total_steps'] = total_steps
p = usecasedetails.objects.get(usecaseid=request.session['usecaseid'])
usecaseid = p.id
modelCondition = ''
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
configSettingsJson['basic']['problem_type'] = problem_type
configSettingsJson['basic']['dproblem_type'] = dproblem_type
if mlmodels != '':
configSettingsJson['basic']['mllearner'] = 'enable'
if dlmodels != '':
configSettingsJson['basic']['dllearner'] = 'enable'
if configSettingsJson['basic']['analysisType']['multiLabelPrediction'] == 'True' or configSettingsJson['basic']['analysisType']['multiModalLearning'] == 'True':
configSettingsJson['basic']['selected_ML_Models'] = 'AutoGluon'
configSettingsJson['basic']['mllearner'] = 'enable'
else:
configSettingsJson['basic']['selected_ML_Models'] = mlmodels
configSettingsJson['basic']['selected_DL_Models'] = dlmodels
configSettingsJson['basic']['smodel_size'] = smodelsize
# ---------------------------------------------------------------------- #
cal_trainingTime = 0.
is_extrapolation = 'No'
is_DataImbalance = 'No'
if (request.session['ModelStatus'] == 'Not Trained' and (problem_type == 'classification' or problem_type == 'regression')):
# <!-- ------------------------------ Data Imbalance Changes ------------------------------ -->
if ( problem_type == 'classification' ):
is_DataImbalance = 'Yes'
# <!-- ------------------------------------------------------------------------------------- -->
if len(mlmodels.split(',')) == 1:
filePath = configSettingsJson['basic']['dataLocation']
no_of_features = len(configSettingsJson['basic']['trainingFeatures'].split(','))
returnVal = getTrainingTime(filePath, no_of_features)
cal_trainingTime = int(returnVal.split('_')[0])
if (returnVal.split('_')[1] == 'extrapolation'):
is_extrapolation = 'Yes'
# ---------------------------------------------------------------------- #
features = configSettingsJson['basic']['trainingFeatures'].split(',')
if configSettingsJson['basic']['targetFeature'] in features:
features.remove(configSettingsJson['basic']['targetFeature'])
noOfTrainingFeatures = len(features)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
if 'noOfRecords' in configSettingsJson['basic']:
records = configSettingsJson['basic']['noOfRecords']
else:
from appbe.train_output import getDataSetRecordsCount
records = getDataSetRecordsCount(configSettingsJson['basic']['dataLocation'])
filesCount = 0
filesSize = 0
try:
if configSettingsJson['basic']['analysisType']['llmFineTuning'].lower() == 'true' and \
configSettingsJson['basic']['folderSettings']['fileType'].lower() in ['llm_document', 'llm_code']:
filesCount, filesSize = getDataFileCountAndSize(configSettingsJson['basic'])
except:
pass
if request.session['finalstate'] <= 3:
request.session['finalstate'] = 3
request.session['currentstate'] = 3
if request.session['ModelStatus'].lower() == 'running':
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
status = checkversionrunningstatus(model.id,usecasedetails,Existusecases)
request.session['ModelStatus'] = status
request.session.save()
if request.session['ModelStatus'] == 'SUCCESS':
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
model_perf = getPerformanceMatrix(request.session['deploypath'],training_output)
from appbe.trainresult import ParseResults
result, survical_images = ParseResults(training_output)
file_path = request.session['logfilepath']
my_file = open(file_path, 'r',encoding="utf-8")
file_content = my_file.read()
my_file.close()
matched_lines = [line.replace('Status:-', '') for line in file_content.split('\n') if "Status:-" in line]
matched_status_lines = matched_lines[::-1]
matched_status_lines = matched_status_lines[0]
matched_status_lines = matched_status_lines.split('...')
matched_status_lines = matched_status_lines[1]
no_lines = len(matched_lines)
if 'noflines' not in request.session:
request.session['noflines'] = 0
request.session['noflines'] = request.session['noflines'] + 1
if request.session['ModelStatus'] != 'SUCCESS':
numberoflines = request.session['noflines']
if numberoflines > no_lines:
numberoflines = no_lines
request.session['noflines'] = no_lines
matched_lines = matched_lines[0:numberoflines]
shortlogs = getStatusCount(matched_lines,request.session['total_steps'])
featuretype = configSettingsJson['advance']['profiler']['featureDict']
user_provided_data_type = {}
text_type=''
for feat_conf in featuretype:
colm = feat_conf.get('feature', '')
if feat_conf['type'] == "text":
text_type="text"
break
configSettingsJson['basic']['problem_type']= problem_type
configSettingsJson['basic']['selected_ML_Models']= mlmodels
if ('Logistic Regression' not in mlmodels) or ('Linear Regression' not in mlmodels):
selectedmodel = 'modelcomparision'
else:
selectedmodel = " "
contentdb = ''
finetunedeval = {}
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettingsJson['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettingsJson['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
contentdb = 'VectorDB'
if problem_type.lower() == 'llmfinetuning':
modelSize = getModelSize(configSettingsJson,mlmodels)
usecasename = request.session['usecaseid'].replace(" ", "_")
finetunedeval = getllmmodelscore(f'{usecasename}_{ModelVersion}',f'{mlmodels}-{modelSize}')
context = {'result': result,'filesCount':filesCount,'filesSize':filesSize, 'text_type':text_type,'selectedmodel':selectedmodel, 'advconfig': configSettingsJson,'usecaseid':usecaseid,'usecasename':usecasename,
'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus,'warning':warning,'time_series_warning':time_series_warning,
'modelCondition':modelCondition,'ModelVersion': ModelVersion,'shortlogs':shortlogs,'logs':matched_status_lines,'currentstate': request.session['currentstate'],'finalstate': request.session['finalstate'], 'model_perf': model_perf,'perf_images': survical_images, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures,'version':AION_VERSION,'noofIteration':noofIteration,'log_file':file_content,'contentdb':contentdb,'finetunedeval':finetunedeval}
else:
contentdb = ''
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettingsJson['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettingsJson['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
status = importlib.util.find_spec('chromadb')
if not status:
contentdb = 'CSV'
else:
contentdb = 'VectorDB'
else:
status = importlib.util.find_spec('chromadb')
if not status:
contentdb = 'CSV'
else:
contentdb = 'VectorDB'
configSettingsJson['basic']['problem_type']=problem_type
context = {'cal_trainingTime':cal_trainingTime,'filesCount':filesCount,'filesSize':filesSize, 'is_extrapolation': is_extrapolation,'advconfig': configSettingsJson,'usecaseid':usecaseid,'usecasename':usecasename,
'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus, 'warning':warning,'time_series_warning':time_series_warning,'is_DataImbalance' : is_DataImbalance,
'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'],
'modelCondition':modelCondition,'finalstate': request.session['finalstate'], 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures,'version':AION_VERSION,'noofIteration':noofIteration,'contentdb':contentdb}
return render(request, 'training.html', context)
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
context = { 'error': 'Model Training Error','selected_use_case': selected_use_case,'contentdb':'','usecasename':usecasename,
'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
return render(request, 'training.html', context) |
apps.py | from django.apps import AppConfig
class ModelTrainingConfig(AppConfig):
name = 'appfe.modelTraining'
|
tests.py | from django.test import TestCase
# Create your tests here.
|
landing_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import os
from django.db.models import Max, F
import pandas as pd
from appbe.publish import check_input_data
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe import installPackage
import json
from appbe import compute
from appbe.training import checkModelUnderTraining
import logging
def opentraininglogs(request, id,currentVersion):
from appbe.pages import usecases_page
try:
from appbe import installPackage
modelID = installPackage.getMIDFromUseCaseVersion(id,currentVersion,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemtypes = configSettings['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type.lower() in ['videoforecasting','imageclassification','objectdetection','document','llmfinetuning']:
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
if problem_type.lower() == 'llmfinetuning':
request.session['fileExtension'] = configSettings['basic']['folderSettings']['fileExtension']
else:
request.session['datalocation'] = str(p.DataFilePath)
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
from appfe.modelTraining.views import uploaddata
return uploaddata(request)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Failed to launch model. Please train the model first before launching.','selected': 'prediction','version':AION_VERSION})
def retrain(request, id,currentVersion):
from appbe.aion_config import eda_setting
from appbe.pages import usecases_page
from appbe.aion_config import settings
usecasetab = settings()
try:
p = usecasedetails.objects.get(id=id)
s1 = Existusecases.objects.filter(ModelName=id).annotate(maxver=Max('ModelName__existusecases__Version'))
config_list = s1.filter(Version=F('maxver'))
if config_list.count() > 0:
Version = config_list[0].Version
Version = Version + 1
else:
Version = 1
usecasename = p.UsecaseName
request.session['ModelName'] = p.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.usecaseid
request.session['ModelVersion'] = Version
request.session['ModelStatus'] = 'Not Trained'
request.session['finalstate'] = 0
usecase = usecasedetails.objects.all().order_by('-id')
# Retraing settings changes
# -------- S T A R T --------
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
if(model.count() > 0):
indexVal = 0
configfile = str(model[indexVal].ConfigPath)
f = open(configfile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
dataFile = configSettings['basic']['dataLocation']
if configSettings['basic']['folderSettings']['fileType'] == 'Object':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['objectLabelFileName'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
return objectlabeldone(request)
else:
request.session['datalocation'] = str(configSettings['basic']['dataLocation'])
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
df = pd.read_csv(dataFile, encoding='utf8',nrows=10,encoding_errors= 'replace')
records = df.shape[0]
df1 = check_input_data(usecasename)
if df1.shape[0] > 0:
df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
df = df.append(df1, ignore_index=True)
df = df.reset_index(drop=True)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False)
print(df.shape[0])
request.session['datalocation'] = str(dataFile)
request.session['NoOfRecords'] = records
request.session['IsRetraining'] = 'Yes'
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
# from AION import ux_eda
# eda_obj = ux_eda(dataFile)
# featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature = eda_obj.getFeatures()
featuresList = df.columns.tolist()
numberoffeatures = len(featuresList)
from appfe.modelTraining.views import getimpfeatures
imp_features = getimpfeatures(dataFile,numberoffeatures)
check_df = pd.read_csv(dataFile,encoding='utf8',encoding_errors= 'replace')
# EDA Performance change
# ----------------------------
sample_size = int(eda_setting())
# dflength = len(eda_obj.getdata())
dflength = len(check_df)
if dflength > sample_size:
samplePercentage = int((sample_size/dflength) * 100)
samplePercentval = samplePercentage / 100
showRecommended = True
# ----------------------------
statusmsg = 'Data loaded Successfully for Retraining.'
computeinfrastructure = compute.readComputeConfig()
# ----------------------------
selected_use_case = request.session['UseCaseName']
ModelVersion = Version
ModelStatus = 'Not Trained'
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
# Retraing settings changes
# -------- S T A R T --------
# return render(request, 'usecases.html', {'usecasedetail': usecase,'nouc':nouc,'models': models, 'selectedusecase': usecasename,
# 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,
# 'ModelVersion': ModelVersion, 'selected': 'usecase'})
ps = Existusecases(DataFilePath=request.session['datalocation'], DeployPath='', Status='Not Trained',ConfigPath=configfile, Version=Version, ModelName=p,TrainOuputLocation='')
ps.save()
if(model.count() > 0):
context = {'range':range(1,101),'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList, 'tab': 'tabconfigure','data': df_json,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','exploratory':False, 'status_msg': statusmsg,'computeinfrastructure':computeinfrastructure,'IsRetrainingModel':True,'imp_features':imp_features,'numberoffeatures':numberoffeatures, 'dataSetPath': dataFile,'usecasetab':usecasetab,'finalstate':request.session['finalstate'],'version':AION_VERSION}
else:
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'Modelretrain':request.session['ModelVersion'],'finalstate':request.session['finalstate'],'version':AION_VERSION}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
checkModelUnderTraining(request,usecasedetails,Existusecases)
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
#print(context)
context['version'] = AION_VERSION
context['Status'] = 'Error'
context['Msg'] = 'Error in retraining usecase. Check log file for more details'
return render(request,action,context)
def launchmodel(request, id,version):
from appbe.pages import usecases_page
try:
modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemtypes = configSettings['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'videoForecasting' or problem_type == 'imageClassification' or problem_type == 'objectDetection':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
elif configSettings['basic']['folderSettings']['fileType'] == 'Document':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
else:
request.session['datalocation'] = str(p.DataFilePath)
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
from appfe.modelTraining.prediction_views import Prediction
return Prediction(request)
except Exception as e:
print(e)
return render(request, 'prediction.html',{'error': 'Failed to launch model. Please train the model first before launching.','selected': 'prediction','version':AION_VERSION})
def modxplain(request, id,version):
from appbe.pages import usecases_page
log = logging.getLogger('log_ux')
modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
log.info('modxplain:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0 ' + 'sec' + ':' + 'Error:Error in model launching: Some of the files are missing')
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
from appfe.modelTraining import visualizer_views as v
return v.xplain(request)
def moddrift(request, id,version):
from appbe.pages import usecases_page
modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
f = open( configpath, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
trainingdataloc = configSettingsJson['basic']['dataLocation']
request.session['datalocation']= trainingdataloc
return inputdrift(request)
def inputdrift(request):
log = logging.getLogger('log_ux')
from appbe.aion_config import settings
usecasetab = settings()
from appbe import service_url
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
if ModelStatus != 'SUCCESS':
context = {'error': 'Please train the model first or launch an existing trained model', 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
log.info('Error Please train the model first or launch an existing trained model')
else:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem = problem_type
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion)
if problem.lower() not in ['classification','regression']:
context = {'error': 'Input drift only available for classification and regression problems', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'version':AION_VERSION,
'ModelVersion': ModelVersion, 'selected': 'monitoring','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
else:
context = {'SUCCESS': 'Model is trained', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'version':AION_VERSION,
'ModelVersion': ModelVersion, 'selected': 'monitoring','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
return render(request, 'inputdrif.html', context)
except Exception as e:
print(e)
log.info('inputdrift; Error: Failed to perform drift analysis'+str(e))
return render(request, 'inputdrif.html', {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION,'error':'Fail to do inputdrift analysis','usecasetab':usecasetab})
|
AirflowLib.py | #
# AirflowLib.py
#
# It contains methods to consume rest API of Apache airflow instance
# Apache Airflow exposed experimental API
# One can achieve the API output just by using the methods implemented within this python file by importing the same
#
import requests
import pandas as pd
# base_url = 'http://localhost:8080/api/experimental'
# It defines the API error which actually raised when error occured during API consumption
from modelTraining.airflow_config import base_url
class ApiError(Exception):
"""An API Error Exception"""
def __init__(self, status):
self.status = status
def __str__(self):
return "APIError: status={}".format(self.status)
# This method takes dagId as parameter and return the list of Dag Run from apache airflow instance
def GetDagRunList(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/dag_runs')
if resp.status_code != 200:
raise ApiError('GetDagRunList {}'.format(resp))
dfData = ConvertJSONtoDF(resp.json())
return dfData
# It is responsible to create/trigger dag of the Airflow instance
# It takes 2 parameter dagId and paramJson
def TriggerDag(dagId, paramJson):
paramJson = {"conf": "{\"key\":\"value\"}"}
resp = requests.post(base_url + '/dags/' + dagId + '/dag_runs', json=paramJson)
print(resp)
if resp.status_code != 200:
raise ApiError('TriggerDag {}'.format(resp))
return resp.json()
# This method toggle the Dag as off in the airflow instance
def PauseDagRun(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/paused/true')
if resp.status_code != 200:
raise ApiError('PauseDagRun {}'.format(resp))
return resp.json()
# This method toggle the Dag as on in the airflow instance
def UnPauseDagRun(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/paused/false')
if resp.status_code != 200:
raise ApiError('UnPauseDagRun {}'.format(resp))
return resp.json()
# It checks if Apache Airflow instance is up and running
def TestAPI():
resp = requests.get(base_url + '/test')
if resp.status_code != 200:
raise ApiError('TestAPI {}'.format(resp))
return resp.json()
# It return the latest dag run info for each available dag
def GetLatestDagRun():
resp = requests.get(base_url + '/latest_runs')
if resp.status_code != 200:
raise ApiError('GetLatestDagRun {}'.format(resp))
dfData = ConvertJSONtoDF(resp.json()['items'])
return dfData
# It will return the list of available pools
def GetPoolsList():
resp = requests.get(base_url + '/pools')
if resp.status_code != 200:
raise ApiError('GetPoolsList {}'.format(resp))
return resp.json()
# It return the specific pool info by pool Name
def GetPoolInfo(poolName):
resp = requests.get(base_url + '/pools/' + poolName)
if resp.status_code != 200:
raise ApiError('GetPoolInfo {}'.format(resp))
return resp.json()
# Return the task info created within the DAG
def GetDagTaskInfo(dagId, taskId):
resp = requests.get(base_url + '/dags/' + dagId + '/tasks/' + taskId)
if resp.status_code != 200:
raise ApiError('GetDagTaskInfo {}'.format(resp))
return resp.json()
# Returns the Paused state of a DAG
def GetDagPausedState(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/paused')
if resp.status_code != 200:
raise ApiError('GetDagPausedState {}'.format(resp))
return resp.json()
# It will create a pool into the Airflow instance
def CreatePool(name, description, slots):
paramJson = {"description": description, "name": name, "slots": slots}
resp = requests.post(base_url + '/pools', json=paramJson)
if resp.status_code != 200:
raise ApiError('CreatePool {}'.format(resp))
return resp.json()
# It is responsible to delete the specific pool by pool Name
def DeletePool(name):
resp = requests.delete(base_url + '/pools/' + name)
if resp.status_code != 200:
raise ApiError('DeletePool {}'.format(resp))
return resp.json()
def ConvertJSONtoDF(jsonData):
df = pd.json_normalize(jsonData)
return df |
settings_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
from appbe.aion_config import getrunningstatus
import time
def computetoGCPLLaMA13B(request):
from appbe import compute
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings('GCP')
time.sleep(2)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def computetoLLaMMA7b(request):
from appbe import compute
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings('AWS')
time.sleep(2)
#print(1)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def computetoAWS(request):
from appbe import compute
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings('AWS')
time.sleep(2)
#print(1)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def setting_context(request):
from appbe.aion_config import get_graviton_data
from appbe.aion_config import get_edafeatures
from appbe.aion_config import get_telemetryoptout
from appbe.aion_config import get_llm_data
from appbe.aion_config import running_setting
from appbe import compute
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
graviton_url, graviton_userid = get_graviton_data()
No_of_Permissible_Features_EDA = get_edafeatures()
telemetryoptout = get_telemetryoptout()
llm_key,llm_url,api_type,api_version =get_llm_data()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
try:
context = {'computeinfrastructure':computeinfrastructure,'graviton_url':graviton_url,'graviton_userid':graviton_userid,'FeaturesEDA':No_of_Permissible_Features_EDA,'llm_key':llm_key,'llm_url':llm_url,'ruuningSetting':ruuningSetting,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'api_type':api_type,'api_version':api_version,'telemetryoptout':telemetryoptout,
'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion':ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage()}
context['version'] = AION_VERSION
return context
except Exception as e:
print(e)
context = {'computeinfrastructure':computeinfrastructure,'error':'Error in Settings'}
context['version'] = AION_VERSION
return context
def startKafka(request):
try:
nooftasks = getrunningstatus('AION_Consumer')
if len(nooftasks):
status = 'AION Kafka Consumer Already Running'
else:
import subprocess
kafkapath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','sbin','AION_Consumer.bat'))
#subprocess.Popen(kafkapath, shell=True)
os.system('start cmd /c "'+kafkapath+'"')
#addKafkaModel(request,request.session['datalocation'])
status = 'Kafka Consumer Initiated Successfully'
context = settings(request)
context['status'] = status
return render(request, 'settings_page.html', context)
except:
return render(request, 'settings_page.html', {'error':'Fail to start Kafka'})
def startPublishServices(request):
from appbe.models import startServices
startServices(request,usecasedetails,Existusecases)
status = 'Publish services start successfully'
context = setting_context(request)
context['status'] = status
return render(request, 'settings_page.html', context)
def saveopenaiconfig(request):
from appbe.aion_config import saveopenaisettings
try:
saveopenaisettings(request)
context = setting_context(request)
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html', context)
except:
context = {'error': 'error', 'runtimeerror': 'runtimeerror'}
return render(request, 'settings_page.html', context)
def savegravitonconfig(request):
from appbe.aion_config import savegravitonconfig
try:
savegravitonconfig(request)
context = setting_context(request)
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html', context)
except:
context={'error':'error','runtimeerror':'runtimeerror'}
return render(request, 'settings_page.html',context)
def saveaionconfig(request):
from appbe.aion_config import saveconfigfile
try:
saveconfigfile(request)
context = setting_context(request)
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html', context)
except:
context={'error':'error','runtimeerror':'runtimeerror'}
return render(request, 'settings_page.html',context)
def settings_page(request):
try:
context = setting_context(request)
context['version'] = AION_VERSION
context['selected'] = 'Settings'
return render(request, 'settings_page.html', context)
except:
return render(request, 'settings_page.html', {'error':'Please enter valid inputs','version':AION_VERSION})
def adds3bucket(request):
try:
if request.method == 'POST':
from appbe.s3bucketsDB import add_new_s3bucket
status = add_new_s3bucket(request)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Some values are missing','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
if status == 'error1':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Add S3bucket'})
def GCSbucketAdd(request):
try:
if request.method == 'POST':
from appbe.gcsbucketsDB import add_new_GCSBucket
status = add_new_GCSBucket(request)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Some values are missing','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
if status == 'error1':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except Exception as e:
print(e)
return render(request, 'settings_page.html',{'error': 'Fail to Add GCSbucket','version':AION_VERSION})
def azurestorageAdd(request):
try:
if request.method == 'POST':
from appbe.azureStorageDB import add_new_azureStorage
status = add_new_azureStorage(request)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Some values are missing','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
if status == 'error1':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Add Azure Container'})
def removeazurebucket(request,name):
try:
if request.method == 'GET':
from appbe.azureStorageDB import remove_azure_bucket
status = remove_azure_bucket(name)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Failed to delete Azure Bucket','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Failed to delete Azure Bucket'})
def removes3bucket(request,name):
try:
if request.method == 'GET':
from appbe.s3bucketsDB import remove_s3_bucket
status = remove_s3_bucket(name)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Failed to delete S3bucket','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Failed to delete S3bucket'})
def removegcsbucket(request,name):
try:
if request.method == 'GET':
from appbe.gcsbucketsDB import remove_gcs_bucket
status = remove_gcs_bucket(name)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Failed to delete GCS Bucket','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Failed to delete GCS Bucket'})
def gcpcomputesettings(request):
try:
from appbe import compute
status = compute.updateGCPConfig(request)
context = setting_context(request)
if status == 'error':
context['ErrorMsg'] = 'Some values are missing'
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Save GCP Settings','version':AION_VERSION})
def amazonec2settings(request):
try:
from appbe import compute
status = compute.updateComputeConfig(request)
context = setting_context(request)
if status == 'error':
context['ErrorMsg'] = 'Some values are missing'
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Save AWS Settings','version':AION_VERSION}) |
dg_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from appbe.pages import getversion
AION_VERSION = getversion()
def datagenrate(request):
from appbe.aion_config import settings
usecasetab = settings()
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "datagenrate.html",context)
def generateconfig(request):
from appbe import generate_json_config as gjc
try:
gjc.generate_json_config(request)
return render(request, "datagenrate.html",context={'success':'success','selected':'DataOperations'})
except Exception as e:
print(e)
return render(request, "datagenrate.html",context={'error':str(e),'selected':'DataOperations'}) |
admin.py | from django.contrib import admin
# Register your models here.
|
prediction_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import os
from django.db.models import Max, F
import pandas as pd
from appbe.publish import check_input_data
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
import json
from appbe import compute
import logging
def get_instance_id(modelID):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID)
print(data)
if len(data) > 0:
return (data[3]+' instance '+data[2])
else:
return 'Instance ID not available'
else:
return 'Instance ID not available'
def PredictForSingleInstance(request):
from appbe.trainresult import ParseResults
submittype = request.POST.get('predictsubmit')
from appbe.prediction import singleInstancePredict
context = singleInstancePredict(request,Existusecases,usecasedetails)
if submittype.lower() == 'predict':
from appbe.train_output import get_train_model_details
trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request)
imagedf = ''
model_count = Existusecases.objects.filter(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'],Status='SUCCESS').count()
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+", encoding="utf-8")
training_output = f.read()
f.close()
result,survical_images = ParseResults(training_output)
context.update({'result':result})
context['version'] = AION_VERSION
context['modelType'] = modelType
context['bestmodel'] = bestmodel
return render(request, 'prediction.html', context)
else:
context['version'] = AION_VERSION
return context
def getTrainingStatus(request):
model = Existusecases.objects.get(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+", encoding="utf-8")
training_output = f.read()
f.close()
from appbe.trainresult import FeaturesUsedForTraining
return FeaturesUsedForTraining(training_output)
def Prediction(request):
log = logging.getLogger('log_ux')
from appbe.trainresult import ParseResults
from appbe.dataIngestion import delimitedsetting
from appbe import service_url
from appbe.aion_config import settings
usecasetab = settings()
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
#print(computeinfrastructure)
if ModelStatus != 'SUCCESS':
log.info('Prediction:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first or launch an existing trained model')
return render(request, 'prediction.html', {
'error': 'Please train the model first or launch an existing trained model',
'selected': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION})
else:
if 'ModelVersion' not in request.session:
log.info('Prediction:' + str(selected_use_case) + ':' + str(
ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first')
return render(request, 'prediction.html',
{'usecasetab':usecasetab,'error': 'Please train the model first', 'selected': 'prediction','version':AION_VERSION})
elif request.session['ModelVersion'] == 0:
log.info('Prediction:' + str(selected_use_case) + ':' + str(
ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first')
return render(request,'prediction.html',{'usecasetab':usecasetab,'error':'Please train the model first','selected':'prediction','version':AION_VERSION})
else:
from appbe.train_output import get_train_model_details
trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request)
imagedf = ''
model_count = Existusecases.objects.filter(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'],Status='SUCCESS').count()
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
result,survical_images = ParseResults(training_output)
if model_count >= 1:
updatedConfigFile = request.session['config_json']
#print(updatedConfigFile)
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
analysisType = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in analysisType.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type.lower() == 'recommendersystem':
modelName = ""
recommender_models = configSettingsJson['basic']['algorithms']['recommenderSystem']
for k in recommender_models.keys():
if configSettingsJson['basic']['algorithms']['recommenderSystem'][k] == 'True':
modelName = k
break
if modelName.lower() == 'associationrules-apriori':
return render(request, 'prediction.html', {
'error': 'Prediction not supported for Association Rules (Apriori)',
'selected': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION})
delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier'])
#problemtypes = configSettingsJson['basic']['analysisType']
#print(problemtypes.keys())
from appfe.modelTraining.train_views import getMLModels
problem_type,dproblemtype,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
iterName = request.session['usecaseid'].replace(" ", "_")
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict = {'noofforecasts': 10}
elif problem_type == 'recommenderSystem' and mlmodels=='ItemRating':
inputFieldsDict = {"uid": 1, "numberOfRecommendation":10} #Task 11190
elif problem_type == 'stateTransition':
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'}
else:
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
if configSettingsJson['basic']['contextFeature'] != '':
inputFeaturesList.append(configSettingsJson['basic']['contextFeature'])
if problem_type == 'llmFineTuning':
inputFeaturesList.append('Temperature')
inputFeaturesList.append('Max Tokens')
if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997
if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na':
inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature'])
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
if problem_type != 'llmFineTuning':
if os.path.isfile(dataFilePath):
df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace')
try:
inputFieldsDict = df.to_dict(orient='index')[0]
except:
inputFieldsDict = pd.Series(0, index =inputFeaturesList).to_dict()
else:
inputFieldsDict = {"File":"EnterFileContent"}
else:
inputFieldsDict = pd.Series('', index =inputFeaturesList).to_dict()
inputFieldsDict['Temperature'] = '0.1'
from appbe.prediction import get_instance
hypervisor,instanceid,region,image = get_instance(iterName+'_'+str(ModelVersion))
if hypervisor.lower() == 'aws':
inputFieldsDict['Max Tokens'] = '1024'
else:
inputFieldsDict['Max Tokens'] = '4096'
inputFields = []
inputFields.append(inputFieldsDict)
settings_url = ''
if problem_type == 'llmFineTuning':
ser_url = get_instance_id(iterName+'_'+str(ModelVersion))
settings_url = ''
modelSize = ''
if 'modelSize' in configSettingsJson['basic']:
selectedModelSize = configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels]
for k in selectedModelSize.keys():
if configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True':
modelSize = k
break
mlmodels = mlmodels+'-'+modelSize
elif problem_type == 'stateTransition':
ser_url = service_url.read_service_url_params(request)
settings_url = service_url.read_service_url_params(request)
ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+iterName+'&version='+str(ModelVersion)
settings_url = settings_url+'pattern_anomaly_settings?usecaseid='+iterName+'&version='+str(ModelVersion)
else:
ser_url = service_url.read_service_url_params(request)
ser_url = ser_url+'predict?usecaseid='+iterName+'&version='+str(ModelVersion)
onnx_runtime = False
analyticsTypes = problem_type
usecasename = request.session['usecaseid'].replace(" ", "_")
return render(request, 'prediction.html',
{'inputFields': inputFields,'usecasename':usecasename,'mlmodels':mlmodels,'configSettingsJson':configSettingsJson,'result':result,'imagedf':imagedf, 'selected_use_case': selected_use_case,'ser_url':ser_url,'analyticsType':analyticsTypes,'settings_url':settings_url,
'ModelStatus': ModelStatus,'onnx_edge':onnx_runtime,'ModelVersion': ModelVersion, 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'version':AION_VERSION,'modelType':modelType,'bestmodel':bestmodel,'usecasetab':usecasetab})
else:
log.info('Prediction; Error: Please train the model first')
return render(request, 'prediction.html',
{'usecasetab':usecasetab,'error': 'Please train the model first', 'selected': 'prediction','version':AION_VERSION})
except Exception as e:
print(e)
log.info('Prediction:' + str(selected_use_case) + ':' + str(
ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error:'+str(e))
return render(request, 'prediction.html',{'usecasetab':usecasetab,'error': 'Failed to perform prediction', 'selected': 'prediction','version':AION_VERSION}) |
drift_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import os
from django.db.models import Max, F
import pandas as pd
from appbe.publish import check_input_data
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe import installPackage
import json
from appbe import service_url
from appbe import compute
import sys
import csv
import time
from appbe.training import checkModelUnderTraining
import logging
def Distribution(request):
from appbe import exploratory_Analysis as ea
log = logging.getLogger('log_ux')
from appbe.aion_config import settings
usecasetab = settings()
computeinfrastructure = compute.readComputeConfig()
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Drift','Yes')
t1 = time.time()
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
training_output = json.loads(training_output)
featuresused = training_output['data']['featuresused']
feature = eval(featuresused)
dataFilePath = request.session['datalocation']
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performanceusecaseid='+iterName+'&version='+str(ModelVersion)
if request.POST.get('inputdriftsubmit') == 'trainingdatadrift':
historicadata = request.session['datalocation']
trainingdf = pd.read_csv(historicadata)
trainingDrift = ea.getDriftDistribution(feature, trainingdf)
newDataDrift = ''
concatDataDrift = ''
drift_msg = ''
driftdata = 'NA'
else:
historicadata = request.session['datalocation']
trainingdf = pd.read_csv(historicadata)
trainingDrift = ''
type = request.POST.get("optradio")
if type == "url":
try:
url = request.POST.get('urlpathinput')
newdatadf = pd.read_csv(url)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv')
newdatadf.to_csv(dataFile, index=False)
request.session['drift_datalocations']= dataFile
driftdata = request.session['drift_datalocations']
except Exception as e:
request.session['currentstate'] = 0
e = str(e)
if e.find("tokenizing")!=-1:
error = "This is not an open source URL to access data"
elif e.find("connection")!=-1:
error = "Can not access the URL through HCL network, please try with other network"
else:
error = 'Please provide a correct URL'
context = {'error': error,'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'emptycsv':'emptycsv','s3buckets': get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'kafkaSetting':'kafkaSetting','ruuningSetting':'ruuningSetting','usecasetab':usecasetab}
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+error+', ' + e)
return render(request, 'upload.html', context)
else:
if request.FILES:
Datapath = request.FILES['DataFilePath']
from io import StringIO
content = StringIO(Datapath.read().decode('utf-8'))
reader = csv.reader(content)
df = pd.DataFrame(reader)
df.columns = df.iloc[0]
df = df[1:]
ext = str(Datapath).split('.')[-1]
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
if(os.path.isfile(dataFile) == False):
context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case,
' ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Data file does not exist')
return render(request, 'inputdrif.html', context)
request.session['drift_datalocations'] = dataFile
driftdata = request.session['drift_datalocations']
newdatadf = pd.read_csv(driftdata)
newDataDrift = ea.getDriftDistribution(feature, trainingdf, newdatadf)
condf = pd.concat([trainingdf, newdatadf], ignore_index=True, sort=True)
concatDataDrift = ea.getDriftDistribution(feature,trainingdf,condf)
drift_msg,htmlPath = Drift(request,historicadata, dataFile, feature)
if htmlPath != 'NA':
file = open(htmlPath, "r",errors='ignore')
driftdata = file.read()
file.close()
else:
driftdata = 'NA'
t2 = time.time()
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return render(request, 'inputdrif.html',
{'trainingDrift': trainingDrift, 'newDataDrift': newDataDrift, 'concatDataDrift': concatDataDrift,'usecasetab':usecasetab,
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'version' :AION_VERSION,
'selected': 'monitoring', 'drift_msg': drift_msg,'htmlPath':driftdata,'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure})
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performanceusecaseid='+iterName+'&version='+str(ModelVersion)
context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': 'Failed to perform drift analysis', 'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version' : AION_VERSION}
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to do drift analysis'+', '+str(inst))
log.info('Details : '+str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'inputdrif.html', context)
def Drift(request,trainingdatalocation, newdatalocation, features):
log = logging.getLogger('log_ux')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":newdatalocation}
inputFieldsJson = json.dumps(inputFieldsJson)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = service_url.read_monitoring_service_url_params(request)
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
import requests
try:
#print(inputFieldsJson)
#print(ser_url)
response = requests.post(ser_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
return outputStr
except Exception as inst:
print(inst)
if 'Failed to establish a new connection' in str(inst):
Msg = 'AION Service needs to be started'
else:
Msg = 'Error during Drift Analysis'
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + Msg+', '+str(inst))
return Msg
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
#print(decoded_data)
htmlPath = 'NA'
if decoded_data['status'] == 'SUCCESS':
data = decoded_data['data']
htmlPath = decoded_data['htmlPath']
if 'Message' in data:
Msg = []
Msg.append(data['Message'])
else:
Msg = data['Affected Columns']
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Success')
else:
Msg = 'Error during Drift Analysis'
htmlPath = 'NA'
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' +str(Msg))
return Msg,htmlPath
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + str(e))
log.info('Details : ' +str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def Evaluate(request):
from appbe.aion_config import settings
usecasetab = settings()
log = logging.getLogger('log_ux')
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Drift','Yes')
t1 = time.time()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
type = request.POST.get("optradio")
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'_'+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion)
if type == "url":
try:
url = request.POST.get('urlpathinput')
newdatadf = pd.read_csv(url)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv')
newdatadf.to_csv(dataFile, index=False)
except Exception as e:
request.session['currentstate'] = 0
e = str(e)
if e.find("tokenizing")!=-1:
error = "This is not an open source URL to access data"
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+error+', '+str(e))
elif e.find("connection")!=-1:
error = "Can not access the URL through HCL network, please try with other network"
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + error +', '+e)
else:
error = 'Please provide a correct URL'
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:' + error+', '+e)
context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': error,'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'emptycsv':'emptycsv','kafkaSetting':'kafkaSetting','ruuningSetting':'ruuningSetting','usecasetab':usecasetab,'version':AION_VERSION}
return render(request, 'upload.html', context)
else:
if request.FILES:
Datapath = request.FILES['DataFilePath']
from io import StringIO
content = StringIO(Datapath.read().decode('utf-8'))
reader = csv.reader(content)
df = pd.DataFrame(reader)
df.columns = df.iloc[0]
df = df[1:]
ext = str(Datapath).split('.')[-1]
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
if(os.path.isfile(dataFile) == False):
context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'version':AION_VERSION}
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + ' Error : Data file does not exist')
return render(request, 'inputdrif.html', context)
trainingdatalocation = request.session['datalocation']
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":dataFile}
inputFieldsJson = json.dumps(inputFieldsJson)
import requests
try:
#response = requests.post(pser_url,auth=(aion_service_username,aion_service_password),data=inputFieldsJson,headers={"Content-Type":"application/json",})
response = requests.post(pser_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
log.info('Performance Drift:' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error: Status code != 200')
return outputStr
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
Msg = 'AION Service needs to be started'
else:
Msg = 'Error during Drift Analysis'
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' +'0 ' + 'sec' + ' : ' + 'Error : '+Msg+', ' + str(inst))
return Msg
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
#print(decoded_data)
if decoded_data['status'] == 'SUCCESS':
htmlPath = decoded_data['htmlPath']
#print(htmlPath)
if htmlPath != 'NA':
file = open(htmlPath, "r",errors='ignore')
driftdata = file.read()
file.close()
else:
driftdata = 'NA'
print(htmlPath)
context = {'status':'SUCCESS','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'htmlPath': driftdata,'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
t2 = time.time()
log.info('Performance Drift:' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2-t1)) + 'sec' + ' : ' + 'Success')
return render(request, 'inputdrif.html', context=context)
else:
driftdata = 'Error'
context = {'status':'ERROR','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'htmlPath': driftdata,'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : driftdata = Error')
return render(request, 'inputdrif.html', context=context)
except Exception as e:
print(e)
context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': 'Fail to perform Drift Analysis', 'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Fail to perform Drift Analysis' + ', ' + str(e))
return render(request, 'inputdrif.html', context=context) |
llm_views.py | from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import time
from django.template import loader
from django import template
from appbe.aion_config import get_llm_data
from django.views.decorators.csrf import csrf_exempt
import os
import json
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from utils.file_ops import read_df_compressed
from appbe.dataPath import LOG_LOCATION
from appbe.pages import getversion
AION_VERSION = getversion()
def QueryToOpenAI(text,tempPrompt):
FragmentationAllowed="yes" #yes or no
try:
import openai
key,url,api_type,api_version=get_llm_data()
if (key == "") and (url == "") :
print("No API Key")
return("API Key and URL not provided")
openai.api_key = key
openai.api_base = url
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
deployment_name="Text-Datvinci-03"
import tiktoken
encoding = tiktoken.encoding_for_model("text-davinci-003")
maxTokens=1024 #4096-1024 == 3072
lgt=0
if FragmentationAllowed=="yes" :
words = text.split(".")
chunk=""
chunks=[]
multipleChunk="no"
partialData="no"
for i in range(len(words)):
chunk=chunk+words[i]+"."
chunk_token_count = encoding.encode(chunk)
length=len(chunk_token_count)
partialData="yes"
if length > 2800 :
chunks.append(chunk)
chunk=""
#print("\n\n\n")
partialData="no"
multipleChunk="yes"
if (multipleChunk =="no" ):
chunks.append(chunk)
chunk=""
if ((partialData =="yes") and (multipleChunk =="yes")):
chunks.append(chunk)
chunk=""
summaries = []
for chunk in chunks:
response = openai.Completion.create(engine=deployment_name, prompt=f"{tempPrompt}: {chunk}",temperature=0.2, max_tokens=maxTokens,frequency_penalty=0,presence_penalty=0)
summary = response['choices'][0]['text'].replace('\n', '').replace(' .', '.').strip()
summaries.append(summary)
wordsInSum = summary.split()
summaries=' '.join(summaries)
wordsInSum = summaries.split()
return summaries
else :
return "ok"
except openai.error.Timeout as e:
return "exception : Timeout Error due to Network Connection"
except Exception as e:
return "exception : "+str(e)
def azureOpenAiDavinciSumarization(request):
inputDataType = str(request.GET.get('FileType'))
import time
t1=time.time()
documentType=""
if inputDataType == 'file':
dataPath = str(request.GET.get('dataPath'))
#print("Datapath--",dataPath)
if dataPath.endswith(".pdf"):
from appbe.dataIngestion import pdf2text
originalText=pdf2text(dataPath)
if dataPath.endswith(".txt"):
data=[]
with open(dataPath, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
originalText=str1
if dataPath.endswith(".docx"):
import docx
doc = docx.Document(dataPath)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
fullText= '\n'.join(fullText)
originalText=fullText
if inputDataType == 'rawText':
originalText = str(request.GET.get('textDataProcessing'))
dataPath=""
if originalText== "None" or originalText== "":
context = {'originalText': originalText,'returnedText': "No Input given"}
print("returned due to None")
return render(request, "textsummarization.html",context)
KeyWords=str(request.GET.get('userUpdatedKeyword'))
contextOfText=str(request.GET.get('userUpdatedContext'))
doctype = str(request.GET.get('doctypeUserProvided'))
docDomainType = ["medical","other"]
Prompts = [
"Summarize the following article within 500 words with proper sub-heading so that summarization include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;",
"Summarize the following article with minimum 500 words so that summarization include all main points from topics like: "
]
for i in range (len(docDomainType)) :
if docDomainType[i] in doctype.lower() :
docDomainPrompts=Prompts[i]
if docDomainType[i]=="medical" :
print("medical doc")
documentType="medical"
docDomainFinalPrompts=docDomainPrompts
tempPrompt1="Summarize the following article so that summarization must include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"
tempPrompt2="Summarize the following article within 500 words with proper sub-heading so that summarization include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"
else :
print("other doc-a-")
docDomainFinalPrompts=docDomainPrompts+" "+contextOfText
tempPrompt1="Summarize the following article with minimum 500 words so that summarization include all main points from topics like: "+contextOfText
tempPrompt2=tempPrompt1
break
if (i== len(docDomainType)-1) :
print("other doc-b-")
docDomainPrompts=Prompts[i]
docDomainFinalPrompts=docDomainPrompts+" "+contextOfText
tempPrompt1="Summarize the following article so that summarization include all main points from topics like: "+contextOfText
tempPrompt2=tempPrompt1
try:
pattern =['Summary','Study Objective','Study Design', 'Demographics of Patients', 'Devices Used in Study','Duration of Exposure to Device','Study Outcomes','Complications','Adverse Events','Confounding Factors','Study Limitations and Weakness','Usability of the Device','Misuse and Off-Label Use of the Device','Conflict of Interest','Statistical Analysis','Conclusions']
import tiktoken
encoding = tiktoken.encoding_for_model("text-davinci-003")
encodedData = encoding.encode(originalText)
totalToken=len(encodedData)
while totalToken > 2800:
originalText=QueryToOpenAI(originalText,tempPrompt1)
encodedData = encoding.encode(originalText)
totalToken=len(encodedData)
retText=QueryToOpenAI(originalText,tempPrompt2)
import re
summary1=retText
summary2=retText
if documentType=="medical" :
for i in range(len(pattern)):
summary1=summary1.replace(pattern[i]+':','<br>'+'<u>'+pattern[i]+'</u>'+'<br>')
for i in range(len(pattern)):
summary1=summary1.replace(pattern[i],'<br>'+'<u>'+pattern[i]+'</u>'+'<br>')
for i in range(len(pattern)):
summary2=summary2.replace(pattern[i]+':','')
for i in range(len(pattern)):
summary2=summary2.replace(pattern[i],'')
#retText2=""
#tempPrompt="Find some most highlighting points in the following article"
#retText2=QueryToOpenAI(originalText,tempPrompt)
#retText3=""
#tempPrompt="Find only one or two risk factors that are mentioned in the following article"
#retText3=QueryToOpenAI(originalText,tempPrompt)
#retText4=""
#tempPrompt="Find statistical informtation that are mentioned in the following article"
#retText4=QueryToOpenAI(originalText,tempPrompt)
#retText5=""
#tempPrompt="Find name of the author only one time that are mentioned in the following article"
#retText5=QueryToOpenAI(originalText,tempPrompt)
#retText6=""
#tempPrompt="Suggest the name of the title for the following article"
#retText6=QueryToOpenAI(originalText,tempPrompt)
t2=time.time()
#print("\n time taken-->", t2-t1 ,"length of sum",str(length))
print("\n time taken-->", t2-t1 )
#print("\n summary from LLM-->\n",returnedText)
#context = {'title': retText6, 'summary': summary1, 'summary2': summary2, 'AuthorName': "Author names :"+retText5,'BulletPoints': retText2,'Riskfactor': retText3,'StatInfo': retText4}
context = {'title': "", 'summary': summary1, 'summary2': summary2, 'AuthorName': "",'BulletPoints': "",'Riskfactor': "",'StatInfo': ""}
return HttpResponse(json.dumps(context), content_type="application/json")
except:
context = {'returnedText': "exception"}
return HttpResponse(json.dumps(context), content_type="application/json")
def azureOpenAiDavinci(request):
key,url,api_type,api_version=get_llm_data()
inputDataType = str(request.POST.get('FileType'))
if inputDataType == 'file':
Datapath = request.FILES['file']
#dataPath = str(request.GET.get('dataPath'))
ext = str(Datapath).split('.')[-1]
temp1=str(Datapath).split('.')
filetimestamp = str(int(time.time()))
if ext.lower() in ['pdf','txt','docx']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' +temp1[0]+'_'+filetimestamp+'.'+ext)
#dataFile = os.path.join(DATA_FILE_PATH,'AION_' +filetimestamp+'.'+ext)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
dataPath = dataFile
if dataPath.endswith(".pdf"):
from appbe.dataIngestion import pdf2text
originalText=pdf2text(dataPath)
if dataPath.endswith(".txt"):
data=[]
with open(dataPath, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
originalText=str1
if dataPath.endswith(".docx"):
import docx
doc = docx.Document(dataPath)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
fullText= '\n'.join(fullText)
originalText=fullText
if inputDataType == 'rawText':
originalText = str(request.POST.get('textDataProcessing'))
dataPath=""
doctype = str(request.POST.get('doctypeUserProvided'))
if originalText== "None" or originalText== "":
context = {'originalText': originalText,'returnedText': "No Input given"}
print("returned due to None")
return render(request, "textsummarization.html",context)
length=len(originalText.split())
inputTextPromptForKeyWords="Create a list of keywords to summrizing the following document."
inputTextPromptForKeyWords="Suggest only ten most important keywords from the following document."
inputTextPromptForContext="Suggest ten most important context in the following article. "
#inputTextPromptForDocType="Suggest on which domain or field or area the following article is or the article is on sports or politics or medical or music or technology or legal field. "
try:
tempPrompt=inputTextPromptForKeyWords
retText=QueryToOpenAI(originalText,tempPrompt)
KeyWords=retText
tempPrompt=inputTextPromptForContext
retText=QueryToOpenAI(originalText,tempPrompt)
contextOfText=retText
#tempPrompt=inputTextPromptForDocType
#retText=QueryToOpenAI(originalText,tempPrompt)
#doctype=retText
context = {'originalText': originalText,'KeyWords': KeyWords,'contextOfText': contextOfText,'doctype': doctype,'dataPath' :dataPath}
return HttpResponse(json.dumps(context), content_type="application/json")
except Exception as e:
print(e)
context = {'originalText': originalText,'KeyWords': KeyWords,'contextOfText': contextOfText,'doctype': doctype,'dataPath' :dataPath}
return HttpResponse(json.dumps(context), content_type="application/json")
# Text Data Labelling using LLM related changes
# --------------------------------------------------------
def uploadedTextData(request):
from appbe.dataIngestion import ingestTextData
context = ingestTextData(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request, 'textdatalabelling.html', context)
def getTextLabel(request):
from appbe.llm_textdatalabelling import generateTextLabel
context = generateTextLabel(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request, 'textdatalabelling.html', context)
def downloadTextLabelReport(request):
file_path = request.session['texttopicdatapath']
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)
return response
raise Http404
# QnA Generator using LLM related changes
# --------------------------------------------------------
def genearateQA(request):
from appbe.llm_generateQnA import ingestDataForQA
context = ingestDataForQA(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
context['selected'] = "llm_features"
return render(request, 'QnA.html', context)
def downloadQnAReport(request):
file_path = request.session['QnAfilepath']
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)
return response
raise Http404
# -------------------------------------------------------- |
0002_auto_20200803_1820.py | # Generated by Django 3.0.8 on 2020-08-03 12:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='existusecases',
name='ModelName',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modelTraining.usecasedetails'),
),
migrations.AlterField(
model_name='existusecases',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='usecasedetails',
name='Description',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='usecasedetails',
name='UsecaseName',
field=models.CharField(max_length=50),
),
]
|
0011_existusecases_trainingpid_and_more.py | # Generated by Django 4.1.7 on 2023-05-17 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0010_existusecases_modeltype'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='trainingPID',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='ProblemType',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='TrainOuputLocation',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='driftStatus',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='modelType',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='portNo',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='publishPID',
field=models.IntegerField(blank=True, null=True),
),
]
|
0008_existusecases_publishtask.py | # Generated by Django 3.2.8 on 2023-03-28 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0007_auto_20230328_1823'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='publishtask',
field=models.CharField(default='', max_length=500),
),
]
|
0009_auto_20230329_0541.py | # Generated by Django 3.2.8 on 2023-03-29 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0008_existusecases_publishtask'),
]
operations = [
migrations.RemoveField(
model_name='existusecases',
name='publishtask',
),
migrations.AddField(
model_name='existusecases',
name='publishPID',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='existusecases',
name='Version',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='existusecases',
name='portNo',
field=models.IntegerField(default=0),
),
]
|
0005_usecasedetails_userdefinedname.py | # Generated by Django 3.2.8 on 2023-02-06 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0004_existusecases_problemtype'),
]
operations = [
migrations.AddField(
model_name='usecasedetails',
name='UserDefinedName',
field=models.CharField(default=models.CharField(max_length=50), max_length=50),
),
]
|
0001_initial.py | # Generated by Django 3.0.8 on 2020-08-01 17:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Existusecases',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ModelName', models.CharField(max_length=200)),
('Version', models.IntegerField()),
('DataFilePath', models.FileField(upload_to=None)),
('ConfigPath', models.FileField(upload_to=None)),
('DeployPath', models.FileField(upload_to=None)),
('Status', models.CharField(max_length=200)),
],
options={
'db_table': 'Existusecases',
},
),
migrations.CreateModel(
name='usecasedetails',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('UsecaseName', models.CharField(max_length=20)),
('Description', models.CharField(max_length=100)),
],
options={
'db_table': 'usecasedetails',
},
),
]
|
0003_existusecases_trainouputlocation.py | # Generated by Django 3.0.8 on 2020-09-18 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0002_auto_20200803_1820'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='TrainOuputLocation',
field=models.CharField(default='', max_length=200),
),
]
|
0010_existusecases_modeltype.py | # Generated by Django 3.2.8 on 2023-03-29 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0009_auto_20230329_0541'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='modelType',
field=models.CharField(default='', max_length=40),
),
]
|
0006_auto_20230206_1759.py | # Generated by Django 3.2.8 on 2023-02-06 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0005_usecasedetails_userdefinedname'),
]
operations = [
migrations.RemoveField(
model_name='usecasedetails',
name='UserDefinedName',
),
migrations.AddField(
model_name='usecasedetails',
name='usecaseid',
field=models.CharField(default=models.CharField(max_length=50), max_length=10),
),
]
|
0007_auto_20230328_1823.py | # Generated by Django 3.2.8 on 2023-03-28 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0006_auto_20230206_1759'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='driftStatus',
field=models.CharField(default='', max_length=20),
),
migrations.AddField(
model_name='existusecases',
name='portNo',
field=models.CharField(default='', max_length=5),
),
migrations.AddField(
model_name='existusecases',
name='publishStatus',
field=models.CharField(default='', max_length=20),
),
migrations.AlterField(
model_name='existusecases',
name='ProblemType',
field=models.CharField(default='', max_length=20),
),
]
|
0004_existusecases_problemtype.py | # Generated by Django 3.2.8 on 2022-10-28 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0003_existusecases_trainouputlocation'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='ProblemType',
field=models.CharField(default='', max_length=100),
),
]
|
runaion.py | from django.contrib.staticfiles.management.commands.runserver import Command as RunServer
class Command(RunServer):
def check(self, *args, **kwargs):
self.stdout.write(self.style.WARNING("SKIPPING SYSTEM CHECKS!\n"))
def check_migrations(self, *args, **kwargs):
self.stdout.write(self.style.WARNING("SKIPPING MIGRATION CHECKS!\n")) |
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
""" |
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
|
input_drift.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class input_drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def addInputDriftClass(self):
text = "\
\nclass inputdrift():\
\n\
\n def __init__(self,base_config):\
\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\
\n self.currentDataLocation = base_config['currentDataLocation']\
\n home = Path.home()\
\n if platform.system() == 'Windows':\
\n from pathlib import WindowsPath\
\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\
\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\
\n else:\
\n from pathlib import PosixPath\
\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\
\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\
\n if not output_model_dir.exists():\
\n raise ValueError(f'Configuration file not found at {output_model_dir}')\
\n\
\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\
\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\
\n mlflow.set_tracking_uri(tracking_uri)\
\n mlflow.set_registry_uri(registry_uri)\
\n client = mlflow.tracking.MlflowClient(\
\n tracking_uri=tracking_uri,\
\n registry_uri=registry_uri,\
\n )\
\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\
\n model = mlflow.pyfunc.load_model(model_version_uri)\
\n run = client.get_run(model.metadata.run_id)\
\n if run.info.artifact_uri.startswith('file:'):\
\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\
\n else:\
\n artifact_path = Path(run.info.artifact_uri)\
\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\
\n\
\n def get_input_drift(self,current_data, historical_data):\
\n curr_num_feat = current_data.select_dtypes(include='number')\
\n hist_num_feat = historical_data.select_dtypes(include='number')\
\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\
\n alert_count = 0\
\n data = {\
\n 'current':{'data':current_data},\
\n 'hist': {'data': historical_data}\
\n }\
\n dist_changed_columns = []\
\n dist_change_message = []\
\n for feature in num_features:\
\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\
\n if (curr_static_value < 0.05):\
\n distribution = {}\
\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\
\n distribution['curr'] = self.DistributionFinder( current_data[feature])\
\n if(distribution['hist']['name'] == distribution['curr']['name']):\
\n pass\
\n else:\
\n alert_count = alert_count + 1\
\n dist_changed_columns.append(feature)\
\n changed_column = {}\
\n changed_column['Feature'] = feature\
\n changed_column['KS_Training'] = curr_static_value\
\n changed_column['Training_Distribution'] = distribution['hist']['name']\
\n changed_column['New_Distribution'] = distribution['curr']['name']\
\n dist_change_message.append(changed_column)\
\n if alert_count:\
\n resultStatus = dist_change_message\
\n else :\
\n resultStatus='Model is working as expected'\
\n return(alert_count, resultStatus)\
\n\
\n def DistributionFinder(self,data):\
\n best_distribution =''\
\n best_sse =0.0\
\n if(data.dtype in ['int','int64']):\
\n distributions= {'bernoulli':{'algo':st.bernoulli},\
\n 'binom':{'algo':st.binom},\
\n 'geom':{'algo':st.geom},\
\n 'nbinom':{'algo':st.nbinom},\
\n 'poisson':{'algo':st.poisson}\
\n }\
\n index, counts = np.unique(data.astype(int),return_counts=True)\
\n if(len(index)>=2):\
\n best_sse = np.inf\
\n y1=[]\
\n total=sum(counts)\
\n mean=float(sum(index*counts))/total\
\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\
\n dispersion=mean/float(variance)\
\n theta=1/float(dispersion)\
\n r=mean*(float(theta)/1-theta)\
\n\
\n for j in counts:\
\n y1.append(float(j)/total)\
\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\
\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\
\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\
\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\
\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\
\n\
\n sselist = []\
\n for dist in distributions.keys():\
\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\
\n if np.isnan(distributions[dist]['sess']):\
\n distributions[dist]['sess'] = float('inf')\
\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\
\n best_distribution = best_dist\
\n best_sse = distributions[best_dist]['sess']\
\n\
\n elif (len(index) == 1):\
\n best_distribution = 'Constant Data-No Distribution'\
\n best_sse = 0.0\
\n elif(data.dtype in ['float64','float32']):\
\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\
\n best_distribution = st.norm.name\
\n best_sse = np.inf\
\n nrange = data.max() - data.min()\
\n\
\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\
\n x = (x + np.roll(x, -1))[:-1] / 2.0\
\n\
\n for distribution in distributions:\
\n with warnings.catch_warnings():\
\n warnings.filterwarnings('ignore')\
\n params = distribution.fit(data.astype(float))\
\n arg = params[:-2]\
\n loc = params[-2]\
\n scale = params[-1]\
\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\
\n sse = np.sum(np.power(y - pdf, 2.0))\
\n if( sse < best_sse):\
\n best_distribution = distribution.name\
\n best_sse = sse\
\n\
\n return {'name':best_distribution, 'sse': best_sse}\
\n\
"
return text
def addSuffixCode(self, indent=1):
text ="\n\
\ndef check_drift( config):\
\n inputdriftObj = inputdrift(config)\
\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\
\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\
\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\
\n if message == 'Model is working as expected':\
\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\
\n else:\
\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\
\n return(output_json)\
\n\
\nif __name__ == '__main__':\
\n try:\
\n if len(sys.argv) < 2:\
\n raise ValueError('config file not present')\
\n config = sys.argv[1]\
\n if Path(config).is_file() and Path(config).suffix == '.json':\
\n with open(config, 'r') as f:\
\n config = json.load(f)\
\n else:\
\n config = json.loads(config)\
\n output = check_drift(config)\
\n status = {'Status':'Success','Message':output}\
\n print('input_drift:'+json.dumps(status))\
\n except Exception as e:\
\n status = {'Status':'Failure','Message':str(e)}\
\n print('input_drift:'+json.dumps(status))"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addInputDriftClass()
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
|
output_drift.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class output_drift():
def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
self.missing = missing
self.word2num_features = word2num_features
self.cat_encoder = cat_encoder
self.target_encoder = target_encoder
self.normalizer = normalizer
self.text_profiler = text_profiler
self.feature_reducer = feature_reducer
self.score_smaller_is_better = score_smaller_is_better
self.problem_type = problem_type
def addDatabaseClass(self, indent=0):
text = "\
\nclass database():\
\n def __init__(self, config):\
\n self.host = config['host']\
\n self.port = config['port']\
\n self.user = config['user']\
\n self.password = config['password']\
\n self.database = config['database']\
\n self.measurement = config['measurement']\
\n self.tags = config['tags']\
\n self.client = self.get_client()\
\n\
\n def read_data(self, query)->pd.DataFrame:\
\n cursor = self.client.query(query)\
\n points = cursor.get_points()\
\n my_list=list(points)\
\n df=pd.DataFrame(my_list)\
\n return df\
\n\
\n def get_client(self):\
\n client = InfluxDBClient(self.host,self.port,self.user,self.password)\
\n databases = client.get_list_database()\
\n databases = [x['name'] for x in databases]\
\n if self.database not in databases:\
\n client.create_database(self.database)\
\n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\
\n\
\n def write_data(self,data):\
\n if isinstance(data, pd.DataFrame):\
\n sorted_col = data.columns.tolist()\
\n sorted_col.sort()\
\n data = data[sorted_col]\
\n data = data.to_dict(orient='records')\
\n for row in data:\
\n if 'time' in row.keys():\
\n p = '%Y-%m-%dT%H:%M:%S.%fZ'\
\n time_str = datetime.strptime(row['time'], p)\
\n del row['time']\
\n else:\
\n time_str = None\
\n if 'model_ver' in row.keys():\
\n self.tags['model_ver']= row['model_ver']\
\n del row['model_ver']\
\n json_body = [{\
\n 'measurement': self.measurement,\
\n 'time': time_str,\
\n 'tags': self.tags,\
\n 'fields': row\
\n }]\
\n self.client.write_points(json_body)\
\n\
\n def close(self):\
\n self.client.close()\
\n"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def addPredictClass(self, indent=0):
text = "\
\nclass predict():\
\n\
\n def __init__(self, base_config):\
\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\
\n self.dataLocation = base_config['dataLocation']\
\n self.db_enabled = base_config.get('db_enabled', False)\
\n if self.db_enabled:\
\n self.db_config = base_config['db_config']\
\n home = Path.home()\
\n if platform.system() == 'Windows':\
\n from pathlib import WindowsPath\
\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\
\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\
\n else:\
\n from pathlib import PosixPath\
\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\
\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\
\n if not output_model_dir.exists():\
\n raise ValueError(f'Configuration file not found at {output_model_dir}')\
\n\
\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\
\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\
\n mlflow.set_tracking_uri(tracking_uri)\
\n mlflow.set_registry_uri(registry_uri)\
\n client = mlflow.tracking.MlflowClient(\
\n tracking_uri=tracking_uri,\
\n registry_uri=registry_uri,\
\n )\
\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\
\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\
\n self.model = mlflow.pyfunc.load_model(model_version_uri)\
\n run = client.get_run(self.model.metadata.run_id)\
\n if run.info.artifact_uri.startswith('file:'): #remove file:///\
\n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\
\n else:\
\n self.artifact_path = Path(run.info.artifact_uri)\
\n with open(self.artifact_path/'deploy.json', 'r') as f:\
\n deployment_dict = json.load(f)\
\n with open(self.artifact_path/'features.txt', 'r') as f:\
\n self.train_features = f.readline().rstrip().split(',')\
\n\
\n self.dataLocation = base_config['dataLocation']\
\n self.selected_features = deployment_dict['load_data']['selected_features']\
\n self.target_feature = deployment_dict['load_data']['target_feature']\
\n self.output_model_dir = output_model_dir"
if self.missing:
text += "\n self.missing_values = deployment_dict['transformation']['fillna']"
if self.word2num_features:
text += "\n self.word2num_features = deployment_dict['transformation']['word2num_features']"
if self.cat_encoder == 'labelencoding':
text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']"
elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'):
text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']"
text += "\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']"
if self.target_encoder:
text += "\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])"
if self.normalizer:
text += "\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\
\n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']"
if self.text_profiler:
text += "\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\
\n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']"
if self.feature_reducer:
text += "\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\
\n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']"
text += """
def read_data_from_db(self):
if self.db_enabled:
try:
db = database(self.db_config)
query = "SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''".format(db.measurement, self.model_version, self.target_feature)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
data = db.read_data(query)
except:
raise ValueError('Unable to read from the database')
finally:
if db:
db.close()
return data
return None"""
text += "\
\n def predict(self, data):\
\n df = pd.DataFrame()\
\n if Path(data).exists():\
\n if Path(data).suffix == '.tsv':\
\n df=read_data(data,encoding='utf-8',sep='\t')\
\n elif Path(data).suffix == '.csv':\
\n df=read_data(data,encoding='utf-8')\
\n else:\
\n if Path(data).suffix == '.json':\
\n jsonData = read_json(data)\
\n df = pd.json_normalize(jsonData)\
\n elif is_file_name_url(data):\
\n df = read_data(data,encoding='utf-8')\
\n else:\
\n jsonData = json.loads(data)\
\n df = pd.json_normalize(jsonData)\
\n if len(df) == 0:\
\n raise ValueError('No data record found')\
\n missing_features = [x for x in self.selected_features if x not in df.columns]\
\n if missing_features:\
\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\
\n if self.target_feature not in df.columns:\
\n raise ValueError(f'Ground truth values/target column({self.target_feature}) not found in current data')\
\n df_copy = df.copy()\
\n df = df[self.selected_features]"
if self.word2num_features:
text += "\n for feat in self.word2num_features:"
text += "\n df[ feat ] = df[feat].apply(lambda x: s2n(x))"
if self.missing:
text += "\n df.fillna(self.missing_values, inplace=True)"
if self.cat_encoder == 'labelencoding':
text += "\n df.replace(self.cat_encoder, inplace=True)"
elif self.cat_encoder == 'targetencoding':
text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\n df = cat_enc.transform(df)"
elif self.cat_encoder == 'onehotencoding':
text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()"
text += "\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]"
if self.normalizer:
text += "\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])"
if self.text_profiler:
text += "\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\
\n df_vect=self.text_profiler.transform(text_corpus)\
\n if isinstance(df_vect, np.ndarray):\
\n df1 = pd.DataFrame(df_vect)\
\n else:\
\n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\
\n df1 = df1.add_suffix('_vect')\
\n df = pd.concat([df, df1],axis=1)"
if self.feature_reducer:
text += "\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"
else:
text += "\n df = df[self.train_features]"
if self.target_encoder:
text += "\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\
\n df_copy['prediction'] = output.idxmax(axis=1)"
else:
text += "\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\
\n df_copy['prediction'] = output"
text += "\n return df_copy"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def getClassificationMatrixCode(self, indent=0):
text = "\
\ndef get_classification_metrices(actual_values, predicted_values):\
\n result = {}\
\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\
\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\
\n average='macro')\
\n\
\n result['accuracy'] = accuracy_score\
\n result['precision'] = avg_precision\
\n result['recall'] = avg_recall\
\n result['f1'] = avg_f1\
\n return result\
\n\
"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def getRegrssionMatrixCode(self, indent=0):
text = "\
\ndef get_regression_metrices( actual_values, predicted_values):\
\n result = {}\
\n\
\n me = np.mean(predicted_values - actual_values)\
\n sde = np.std(predicted_values - actual_values, ddof = 1)\
\n\
\n abs_err = np.abs(predicted_values - actual_values)\
\n mae = np.mean(abs_err)\
\n sdae = np.std(abs_err, ddof = 1)\
\n\
\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\
\n mape = np.mean(abs_perc_err)\
\n sdape = np.std(abs_perc_err, ddof = 1)\
\n\
\n result['mean_error'] = me\
\n result['mean_abs_error'] = mae\
\n result['mean_abs_perc_error'] = mape\
\n result['error_std'] = sde\
\n result['abs_error_std'] = sdae\
\n result['abs_perc_error_std'] = sdape\
\n return result\
\n\
"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def addSuffixCode(self, indent=1):
text ="\n\
\ndef check_drift( config):\
\n prediction = predict(config)\
\n usecase = config['modelName'] + '_' + config['modelVersion']\
\n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\
\n if not train_data_path.exists():\
\n raise ValueError(f'Training data not found at {train_data_path}')\
\n curr_with_pred = prediction.read_data_from_db()\
\n if prediction.target_feature not in curr_with_pred.columns:\
\n raise ValueError('Ground truth not updated for corresponding data in database')\
\n train_with_pred = prediction.predict(train_data_path)\
\n performance = {}"
if self.problem_type == 'classification':
text += "\n\
\n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\
\n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
else:
text += "\n\
\n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\
\n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
text += "\n return performance"
text += "\n\
\nif __name__ == '__main__':\
\n try:\
\n if len(sys.argv) < 2:\
\n raise ValueError('config file not present')\
\n config = sys.argv[1]\
\n if Path(config).is_file() and Path(config).suffix == '.json':\
\n with open(config, 'r') as f:\
\n config = json.load(f)\
\n else:\
\n config = json.loads(config)\
\n output = check_drift(config)\
\n status = {'Status':'Success','Message':json.loads(output)}\
\n print('output_drift:'+json.dumps(status))\
\n except Exception as e:\
\n status = {'Status':'Failure','Message':str(e)}\
\n print('output_drift:'+json.dumps(status))"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addDatabaseClass()
self.codeText += self.addPredictClass()
if self.problem_type == 'classification':
self.codeText += self.getClassificationMatrixCode()
elif self.problem_type == 'regression':
self.codeText += self.getRegrssionMatrixCode()
else:
raise ValueError(f"Unsupported problem type: {self.problem_type}")
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
|
deploy.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class deploy():
def __init__(self, target_encoder=False, feature_reducer=False, score_smaller_is_better=True, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = "\n\n\
\nclass deploy():\
\n\
\n def __init__(self, base_config, log=None):\
\n self.targetPath = (Path('aion')/base_config['targetPath']).resolve()\
\n if log:\
\n self.logger = log\
\n else:\
\n log_file = self.targetPath/IOFiles['log']\
\n self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\
\n try:\
\n self.initialize(base_config)\
\n except Exception as e:\
\n self.logger.error(e, exc_info=True)\
\n\
\n def initialize(self, base_config):\
\n self.usecase = base_config['targetPath']\
\n monitoring_data = read_json(self.targetPath/IOFiles['monitor'])\
\n self.prod_db_type = monitoring_data['prod_db_type']\
\n self.db_config = monitoring_data['db_config']\
\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\
\n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(monitoring_data.get('mlflow_config',mlflow_default_config), self.targetPath)\
\n mlflow.tracking.set_tracking_uri(tracking_uri)\
\n mlflow.tracking.set_registry_uri(registry_uri)\
\n client = mlflow.tracking.MlflowClient()\
\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )\
\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\
\n self.model = mlflow.pyfunc.load_model(model_version_uri)\
\n run = client.get_run(self.model.metadata.run_id)\
\n if run.info.artifact_uri.startswith('file:'): #remove file:///\
\n skip_name = 'file:'\
\n if run.info.artifact_uri.startswith('file:///'):\
\n skip_name = 'file:///'\
\n self.artifact_path = Path(run.info.artifact_uri[len(skip_name) : ])\
\n self.artifact_path_type = 'file'\
\n meta_data = read_json(self.artifact_path/IOFiles['metaData'])\
\n else:\
\n self.artifact_path = run.info.artifact_uri\
\n self.artifact_path_type = 'url'\
\n meta_data_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+IOFiles['metaData'])\
\n meta_data = read_json(meta_data_file)\
\n self.selected_features = meta_data['load_data']['selected_features']\
\n self.train_features = meta_data['training']['features']"
if target_encoder:
self.codeText += "\
\n if self.artifact_path_type == 'url':\
\n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\
\n target_encoder_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['target_encoder'])\
\n else:\
\n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']\
\n target_encoder_file = self.artifact_path/meta_data['transformation']['target_encoder']\
\n self.target_encoder = joblib.load(target_encoder_file)"
else:
self.codeText += "\
\n if self.artifact_path_type == 'url':\
\n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\
\n else:\
\n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']"
self.codeText += "\
\n self.preprocessor = joblib.load(preprocessor_file)\
\n self.preprocess_out_columns = meta_data['transformation']['preprocess_out_columns']\
"
if feature_reducer:
self.codeText += "\
\n if self.artifact_path_type == 'url':\
\n feature_reducer_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['featureengineering']['feature_reducer']['file'])\
\n else:\
\n feature_reducer_file = self.artifact_path/meta_data['featureengineering']['feature_reducer']['file']\
\n self.feature_reducer = joblib.load(feature_reducer_file)\
\n self.feature_reducer_cols = meta_data['featureengineering']['feature_reducer']['features']"
self.codeText +="\n\
\n def write_to_db(self, data):\
\n prod_file = IOFiles['prodData']\
\n writer = dataReader(reader_type=self.prod_db_type,target_path=self.targetPath, config=self.db_config )\
\n writer.write(data, prod_file)\
\n writer.close()\
\n\
\n def predict(self, data=None):\
\n try:\
\n return self.__predict(data)\
\n except Exception as e:\
\n if self.logger:\
\n self.logger.error(e, exc_info=True)\
\n raise ValueError(json.dumps({'Status':'Failure', 'Message': str(e)}))\
\n\
\n def __predict(self, data=None):\
\n df = pd.DataFrame()\
\n jsonData = json.loads(data)\
\n df = pd.json_normalize(jsonData)\
\n if len(df) == 0:\
\n raise ValueError('No data record found')\
\n missing_features = [x for x in self.selected_features if x not in df.columns]\
\n if missing_features:\
\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\
\n df_copy = df.copy()\
\n df = df[self.selected_features]\
\n df = self.preprocessor.transform(df)\
\n if isinstance(df, scipy.sparse.spmatrix):\
\n df = df.toarray()\
\n df = pd.DataFrame(df, columns=self.preprocess_out_columns)"
if feature_reducer:
self.codeText += "\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"
else:
self.codeText += "\n df = df[self.train_features]"
if target_encoder:
self.codeText += "\n df = df.astype(np.float32)\
\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\
\n df_copy['prediction'] = output.idxmax(axis=1)\
\n self.write_to_db(df_copy)\
\n df_copy['probability'] = output.max(axis=1).round(2)\
\n df_copy['remarks'] = output.apply(lambda x: x.to_json(), axis=1)\
\n output = df_copy.to_json(orient='records')"
else:
self.codeText += "\n output = self.model._model_impl.predict(df).reshape(1, -1)[0].round(2)\
\n df_copy['prediction'] = output\
\n self.write_to_db(df_copy)\
\n output = df_copy.to_json(orient='records')"
self.codeText += "\n return output"
self.input_files = {}
self.output_files = {}
self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json', 'performance' : 'performance.json','monitor':'monitoring.json','log':'predict.log','prodData':'prodData'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
text += '\n'
text += self.getOutputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def addStatement(self, statement, indent=1):
pass
def getCode(self):
return self.codeText
def getGroundtruthCode(self):
return """
import sys
import math
import json
import sqlite3
import pandas as pd
from datetime import datetime
from pathlib import Path
import platform
from utility import *
from data_reader import dataReader
IOFiles = {
"monitoring":"monitoring.json",
"prodDataGT":"prodDataGT"
}
class groundtruth():
def __init__(self, base_config):
self.targetPath = Path('aion')/base_config['targetPath']
data = read_json(self.targetPath/IOFiles['monitoring'])
self.prod_db_type = data['prod_db_type']
self.db_config = data['db_config']
def actual(self, data=None):
df = pd.DataFrame()
jsonData = json.loads(data)
df = pd.json_normalize(jsonData)
if len(df) == 0:
raise ValueError('No data record found')
self.write_to_db(df)
status = {'Status':'Success','Message':'uploaded'}
return json.dumps(status)
def write_to_db(self, data):
prod_file = IOFiles['prodDataGT']
writer = dataReader(reader_type=self.prod_db_type, target_path=self.targetPath, config=self.db_config )
writer.write(data, prod_file)
writer.close()
"""
def getServiceCode(self):
return """
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import os
from os.path import expanduser
import platform
import threading
import subprocess
import argparse
import re
import cgi
import json
import shutil
import logging
import sys
import time
import seaborn as sns
from pathlib import Path
from predict import deploy
from groundtruth import groundtruth
import pandas as pd
import scipy.stats as st
import numpy as np
import warnings
from utility import *
from data_reader import dataReader
warnings.filterwarnings("ignore")
config_input = None
IOFiles = {
"inputData": "rawData.dat",
"metaData": "modelMetaData.json",
"production": "production.json",
"log": "aion.log",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
def DistributionFinder(data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64" or data.dtype == "float32"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= 2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(message)
return distributionName, sse
def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()):
import matplotlib.pyplot as plt
import math
import io, base64, urllib
np.seterr(divide='ignore', invalid='ignore')
try:
plt.clf()
except:
pass
plt.rcParams.update({'figure.max_open_warning': 0})
sns.set(color_codes=True)
pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
if len(feature) > 4:
numneroffeatures = len(feature)
plt.figure(figsize=(10, numneroffeatures*2))
else:
plt.figure(figsize=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
dataframe[i[1]] = dataframe[i[1]].astype(int)
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0])
else:
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean())
plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1)
plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1)
distname, sse = DistributionFinder(dataframe[i[1]])
print(distname)
ax = sns.distplot(dataframe[i[1]], label=distname)
ax.legend(loc='best')
if newdataframe.empty == False:
dataType = newdataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]])
newdataframe[i[1]] = newdataframe[i[1]].cat.codes
newdataframe[i[1]] = newdataframe[i[1]].astype(int)
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0])
else:
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean())
distname, sse = DistributionFinder(newdataframe[i[1]])
print(distname)
ax = sns.distplot(newdataframe[i[1]],label=distname)
ax.legend(loc='best')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
usecase = self.path.split('/')[-2]
if usecase.lower() == config_input['targetPath'].lower():
operation = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
if operation.lower() == 'predict':
output=deployobj.predict(dataStr)
resp = output
elif operation.lower() == 'groundtruth':
gtObj = groundtruth(config_input)
output = gtObj.actual(dataStr)
resp = output
elif operation.lower() == 'delete':
targetPath = Path('aion')/config_input['targetPath']
for file in data:
x = targetPath/file
if x.exists():
os.remove(x)
resp = json.dumps({'Status':'Success'})
else:
outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'})
resp = outputStr
resp=resp+'\\n'
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print('python ==> else1')
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print('PYTHON ######## REQUEST ####### ENDED')
return
def do_GET(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
usecase = self.path.split('/')[-2]
self.send_response(200)
self.targetPath = Path('aion')/config_input['targetPath']
meta_data_file = self.targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
production_file = self.targetPath/IOFiles['production']
if production_file.exists():
production_data = read_json(production_file)
else:
raise ValueError(f'Production Details not found: {production_file}')
operation = self.path.split('/')[-1]
if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'):
self.send_header('Content-Type', 'text/html')
self.end_headers()
ModelString = production_data['Model']
ModelPerformance = ModelString+'_performance.json'
performance_file = self.targetPath/ModelPerformance
if performance_file.exists():
performance_data = read_json(performance_file)
else:
raise ValueError(f'Production Details not found: {performance_data}')
Scoring_Creteria = performance_data['scoring_criteria']
train_score = round(performance_data['metrices']['train_score'],2)
test_score = round(performance_data['metrices']['test_score'],2)
current_score = 'NA'
monitoring = read_json(self.targetPath/IOFiles['monitoring'])
reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config'])
inputDatafile = self.targetPath/IOFiles['inputData']
NoOfPrediction = 0
NoOfGroundTruth = 0
inputdistribution = ''
if reader.file_exists(IOFiles['prodData']):
dfPredict = reader.read(IOFiles['prodData'])
dfinput = pd.read_csv(inputDatafile)
features = meta_data['training']['features']
inputdistribution = getDriftDistribution(features,dfinput,dfPredict)
NoOfPrediction = len(dfPredict)
if reader.file_exists(IOFiles['prodDataGT']):
dfGroundTruth = reader.read(IOFiles['prodDataGT'])
NoOfGroundTruth = len(dfGroundTruth)
common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()]
proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner')
if Scoring_Creteria.lower() == 'accuracy':
from sklearn.metrics import accuracy_score
current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction'])
current_score = round((current_score*100),2)
elif Scoring_Creteria.lower() == 'recall':
from sklearn.metrics import accuracy_score
current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro')
current_score = round((current_score*100),2)
msg = \"""<html>
<head>
<title>Performance Details</title>
</head>
<style>
table, th, td {border}
</style>
<body>
<h2><b>Deployed Model:</b>{ModelString}</h2>
<br/>
<table style="width:50%">
<tr>
<td>No of Prediction</td>
<td>{NoOfPrediction}</td>
</tr>
<tr>
<td>No of GroundTruth</td>
<td>{NoOfGroundTruth}</td>
</tr>
</table>
<br/>
<table style="width:100%">
<tr>
<th>Score Type</th>
<th>Train Score</th>
<th>Test Score</th>
<th>Production Score</th>
</tr>
<tr>
<td>{Scoring_Creteria}</td>
<td>{train_score}</td>
<td>{test_score}</td>
<td>{current_score}</td>
</tr>
</table>
<br/>
<br/>
<img src="data:image/png;base64,{newDataDrift}" alt="" >
</body>
</html>
\""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution)
elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'):
self.send_header('Content-Type', 'text/plain')
self.end_headers()
log_file = self.targetPath/IOFiles['log']
if log_file.exists():
with open(log_file) as f:
msg = f.read()
f.close()
else:
raise ValueError(f'Log Details not found: {log_file}')
else:
self.send_header('Content-Type', 'application/json')
self.end_headers()
features = meta_data['load_data']['selected_features']
bodydes='['
for x in features:
if bodydes != '[':
bodydes = bodydes+','
bodydes = bodydes+'{"'+x+'":"value"}'
bodydes+=']'
urltext = '/AION/'+config_input['targetPath']+'/predict'
urltextgth='/AION/'+config_input['targetPath']+'/groundtruth'
urltextproduction='/AION/'+config_input['targetPath']+'/metrices'
msg=\"""
Version:{modelversion}
RunNo: {runNo}
URL for Prediction
==================
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
URL for GroundTruth
===================
URL:{urltextgth}
RequestType: POST
Content-Type=application/json
Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work
URL for Model In Production Analysis
====================================
URL:{urltextproduction}
RequestType: GET
Content-Type=application/json
\""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class file_status():
def __init__(self, reload_function, params, file, logger):
self.files_status = {}
self.initializeFileStatus(file)
self.reload_function = reload_function
self.params = params
self.logger = logger
def initializeFileStatus(self, file):
self.files_status = {'path': file, 'time':file.stat().st_mtime}
def is_file_changed(self):
if self.files_status['path'].stat().st_mtime > self.files_status['time']:
self.files_status['time'] = self.files_status['path'].stat().st_mtime
return True
return False
def run(self):
global config_input
while( True):
time.sleep(30)
if self.is_file_changed():
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config_input = read_json(config_file)
config_input['deployedModel'] = productionmodel['Model']
config_input['deployedRunNo'] = productionmodel['runNo']
self.logger.info('Model changed Reloading.....')
self.logger.info(f'Model: {config_input["deployedModel"]}')
self.logger.info(f'Version: {str(config_input["modelVersion"])}')
self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}')
self.reload_function(config_input)
class SimpleHttpServer():
def __init__(self, ip, port, model_file_path,reload_function,params, logger):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
self.status_checker = file_status( reload_function, params, model_file_path, logger)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.status_thread = threading.Thread(target=self.status_checker.run)
self.status_thread.start()
def waitForThread(self):
self.server_thread.join()
self.status_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('-ip','--ipAddress', help='HTTP Server IP')
parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
if args.ipAddress:
config['ipAddress'] = args.ipAddress
if args.portNo:
config['portNo'] = args.portNo
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config['deployedModel'] = productionmodel['Model']
config['deployedRunNo'] = productionmodel['runNo']
#server = SimpleHttpServer(config['ipAddress'],int(config['portNo']))
config_input = config
logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger(Path(__file__).parent.name)
deployobj = deploy(config_input, logger)
server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger)
logger.info('HTTP Server Running...........')
logger.info(f"IP Address: {config['ipAddress']}")
logger.info(f"Port No.: {config['portNo']}")
print('HTTP Server Running...........')
print('For Prediction')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/predict')
print('\\nFor GroundTruth')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/groundtruth')
print('\\nFor Help')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/help')
print('\\nFor Model In Production Analysis')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/metrices')
server.start()
server.waitForThread()
""" |
trainer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class learner():
def __init__(self, problem_type="classification", target_feature="", sample_method=None,indent=0, tab_size=4):
self.tab = " "*tab_size
self.df_name = 'df'
self.problem_type = problem_type
self.target_feature = target_feature
self.search_space = []
self.codeText = f"\ndef train(log):"
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'featureEngineeredData.dat','testData' : 'test.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json','log' : 'aion.log'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n return config"
return text
def __addSaveModelCode(self):
text = "\n\
\ndef save_model( experiment_id, estimator, features, metrices, params,tags, scoring):\
\n # mlflow log model, metrices and parameters\
\n with mlflow.start_run(experiment_id = experiment_id, run_name = model_name):\
\n return logMlflow(params, metrices, estimator, tags, model_name.split('_')[0])"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\n' + self.codeText
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
self.function_code += self.__addSaveModelCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += "\
\n config = validateConfig()\
\n targetPath = Path('aion')/config['targetPath']\
\n if not targetPath.exists():\
\n raise ValueError(f'targetPath does not exist')\
\n meta_data_file = targetPath/IOFiles['metaData']\
\n if meta_data_file.exists():\
\n meta_data = read_json(meta_data_file)\
\n else:\
\n raise ValueError(f'Configuration file not found: {meta_data_file}')\
\n log_file = targetPath/IOFiles['log']\
\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\
\n dataLoc = targetPath/IOFiles['inputData']\
\n if not dataLoc.exists():\
\n return {'Status':'Failure','Message':'Data location does not exists.'}\
\n\
\n status = dict()\
\n usecase = config['targetPath']\
\n df = pd.read_csv(dataLoc)\
\n prev_step_output = meta_data['featureengineering']['Status']"
def getSuffixModules(self):
modules = [{'module':'platform'}
,{'module':'time'}
,{'module':'mlflow'}
]
return modules
def add_100_trainsize_code(self):
self.codeText +="\n\
\n else:\
\n test_score = train_score\
\n metrices = {}"
def addSuffixCode(self, indent=1):
self.codeText += "\n\
\n meta_data['training'] = {}\
\n meta_data['training']['features'] = features\
\n scoring = config['scoring_criteria']\
\n tags = {'estimator_name': model_name}\
\n monitoring_data = read_json(targetPath/IOFiles['monitor'])\
\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\
\n mlflow_client, experiment_id = mlflow_create_experiment(monitoring_data.get('mlflow_config',mlflow_default_config), targetPath, usecase)\
\n run_id = save_model(experiment_id, estimator,features, metrices,best_params,tags,scoring)\
\n write_json(meta_data, targetPath/IOFiles['metaDataOutput'])\
\n write_json({'scoring_criteria': scoring, 'metrices':metrices, 'param':best_params}, targetPath/IOFiles['performance'])\
\n\
\n # return status\
\n status = {'Status':'Success','mlflow_run_id':run_id,'FeaturesUsed':features,'test_score':metrices['test_score'],'train_score':metrices['train_score']}\
\n log.info(f'Test score: {test_score}')\
\n log.info(f'Train score: {train_score}')\
\n log.info(f'MLflow run id: {run_id}')\
\n log.info(f'output: {status}')\
\n return json.dumps(status)"
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += "\n\
\nif __name__ == '__main__':\
\n log = None\
\n try:\
\n print(train(log))\
\n except Exception as e:\
\n if log:\
\n log.error(e, exc_info=True)\
\n status = {'Status':'Failure','Message':str(e)}\
\n print(json.dumps(status))\
"
def add_variable(self, name, value, indent=1):
if isinstance(value, str):
self.codeText += f"\n{self.tab * indent}{name} = '{value}'"
else:
self.codeText += f"\n{self.tab * indent}{name} = {value}"
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
def add_search_space_w(self, algoritms):
for model, params in algoritms.items():
d = {'clf': f"[{model}()]"}
for k,v in params.items():
if isinstance(v, str):
d[f'clf__{k}']=f"'{v}'"
else:
d[f'clf__{k}']= f"{v}"
self.search_space.append(d)
def add_search_space(self, indent=1):
self.codeText += f"\n{self.tab}search_space = config['search_space']"
def add_train_test_split(self, train_feature, target_feature,test_ratio, indent=1):
self.codeText += "\n\n # split the data for training\
\n selected_features = prev_step_output['selected_features']\
\n target_feature = config['target_feature']\
\n train_features = prev_step_output['total_features'].copy()\
\n train_features.remove(target_feature)\
\n X_train = df[train_features]\
\n y_train = df[target_feature]\
\n if config['test_ratio'] > 0.0:\
\n test_data = read_data(targetPath/IOFiles['testData'])\
\n X_test = test_data[train_features]\
\n y_test = test_data[target_feature]\
\n else:\
\n X_test = pd.DataFrame()\
\n y_test = pd.DataFrame()"
def add_model_fit(self, estimator, optimizer, selector_method, importer, indent=1):
# need to adjust the indent
importer.addModule('importlib')
importer.addModule('operator')
text = f"\n features = selected_features['{selector_method}']\
\n estimator = {estimator}()\
\n param = config['algorithms']['{estimator}']"
if optimizer == 'GridSearchCV':
text += "\n grid = GridSearchCV(estimator, param,cv=config['optimization_param']['trainTestCVSplit'])\
\n grid.fit(X_train[features], y_train)\
\n train_score = grid.best_score_ * 100\
\n best_params = grid.best_params_\
\n estimator = grid.best_estimator_"
elif optimizer == 'GeneticSelectionCV':
text += "\n grid = GeneticSelectionCV(estimator, scoring=scorer, n_generations=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'],n_population=config['optimization_param']['geneticparams']['n_population'],crossover_proba=config['optimization_param']['geneticparams']['crossover_proba'],mutation_proba=config['optimization_param']['geneticparams']['mutation_proba'],crossover_independent_proba=config['optimization_param']['geneticparams']['crossover_independent_proba'],mutation_independent_proba=config['optimization_param']['geneticparams']['mutation_independent_proba'],tournament_size=config['optimization_param']['geneticparams']['tournament_size'],n_gen_no_change=config['optimization_param']['geneticparams']['n_gen_no_change'])\
\n grid.fit(X_train[features], y_train)\
\n train_score = grid.score(X_train[features], y_train)\
\n best_params = grid.estimator_.get_params()\
\n estimator = grid.estimator_"
else:
text += f"\n grid = {optimizer}(estimator, param, scoring=scorer, n_iter=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'])\
\n grid.fit(X_train[features], y_train)\
\n train_score = grid.best_score_ * 100\
\n best_params = grid.best_params_\
\n estimator = grid.best_estimator_"
self.codeText += text
def addLearner(self, model_name, params, importer, indent=1):
importer.addModule('Pipeline', mod_from='sklearn.pipeline')
importer.addModule('ColumnTransformer', mod_from='sklearn.compose')
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
model_params = []
for k,v in params.items():
if isinstance(v, str):
model_params.append(f"{k}='{v}'")
else:
model_params.append(f"{k}={v}")
model_params = ",".join(model_params)
self.codeText += self.getTransformer()
text = f"\n{self.tab * indent}pipeline = Pipeline(steps = [('preprocessor', preprocessor),('learner',{model_name}({model_params}))])"
self.codeText += text
self.codeText += self.splitTargetFeature(importer)
if self.balancing:
self.codeText += self.balancingCode(importer)
self.codeText += self.fitModelCode(importer)
def splitTargetFeature(self, importer, indent=1):
importer.addModule('train_test_split', mod_from='sklearn.model_selection')
return f"\n{self.tab * indent}target = df['{self.target_feature}']\
\n{self.tab * indent}df = df.drop(['{self.target_feature}'], axis=1)\
\n{self.tab * indent}X_train, X_test, y_train, y_test = train_test_split(df,target, train_size = percentage/100.0)"
def getCode_remove(self, model_name=None, indent=1):
return self.codeText
def getDFName(self):
return self.df_name
def copyCode(self, learner):
self.codeText = learner.getCode()
|
selector.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class selector():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = f"\n\ndef featureSelector(log):"
self.pipe = 'pipe'
self.code_generated = False
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n return config"
return text
def addMainCode(self):
self.codeText += "\n\
\nif __name__ == '__main__':\
\n log = None\
\n try:\
\n print(featureSelector(log))\
\n except Exception as e:\
\n if log:\
\n log.error(e, exc_info=True)\
\n status = {'Status':'Failure','Message':str(e)}\
\n print(json.dumps(status))\
"
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\n' + self.codeText
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += "\
\n config = validateConfig()\
\n targetPath = Path('aion')/config['targetPath']\
\n if not targetPath.exists():\
\n raise ValueError(f'targetPath does not exist')\
\n meta_data_file = targetPath/IOFiles['metaData']\
\n if meta_data_file.exists():\
\n meta_data = read_json(meta_data_file)\
\n else:\
\n raise ValueError(f'Configuration file not found: {meta_data_file}')\
\n log_file = targetPath/IOFiles['log']\
\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\
\n dataLoc = targetPath/IOFiles['inputData']\
\n if not dataLoc.exists():\
\n return {'Status':'Failure','Message':'Data location does not exists.'}\
\n\
\n status = dict()\
\n df = pd.read_csv(dataLoc)\
\n prev_step_output = meta_data['transformation']"
def getSuffixModules(self):
modules = [{'module':'platform'}
,{'module':'time'}
]
return modules
def addSuffixCode(self, indent=1):
self.codeText += "\n\
\n csv_path = str(targetPath/IOFiles['outputData'])\
\n write_data(df, csv_path,index=False)\
\n status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'total_features':total_features, 'selected_features':selected_features}\
\n log.info(f'Selected data saved at {csv_path}')\
\n meta_data['featureengineering']['Status'] = status\
\n write_json(meta_data, str(targetPath/IOFiles['metaData']))\
\n log.info(f'output: {status}')\
\n return json.dumps(status)"
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'argparse'}
]
return modules
def add_variable(self, name, value, indent=1):
if isinstance(value, str):
self.codeText += f"\n{self.tab * indent}{name} = '{value}'"
else:
self.codeText += f"\n{self.tab * indent}{name} = {value}"
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
def modelBased(self, problem_type, indent=1):
if problem_type == 'classification':
self.codeText += f"\n{self.tab * indent}selector = SelectFromModel(ExtraTreesClassifier())"
self.codeText += f"\n{self.tab * indent}selector()"
if problem_type == 'regression':
self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', SelectFromModel(Lasso()))])"
self.codeText += f"\n{self.tab * indent}selector.fit(df[train_features],df[target_feature])"
self.codeText += f"\n{self.tab * indent}selected_features = [x for x,y in zip(train_features, selector.get_support()) if y]"
self.codeText += f"\n{self.tab * indent}df = df[selected_features + [target_feature]]"
def featureReductionBased(self, reducer, n_components, indent=1):
if reducer == 'pca':
if n_components == 0:
self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components='mle',svd_solver = 'full'))])"
elif n_components < 1:
self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components={n_components},svd_solver = 'full'))])"
else:
self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components=int({n_components})))])"
self.codeText += "pipe.fit_transform(df)"
def getPipe(self):
return self.pipe
|
utility.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
utility_functions = {
'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
}
#TODO convert read and write functions in to class functions
functions_code = {
'read_json':{'imports':[{'mod':'json'}],'code':"\n\
\ndef read_json(file_path):\
\n data = None\
\n with open(file_path,'r') as f:\
\n data = json.load(f)\
\n return data\
\n"},
'write_json':{'imports':[{'mod':'json'}],'code':"\n\
\ndef write_json(data, file_path):\
\n with open(file_path,'w') as f:\
\n json.dump(data, f)\
\n"},
'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\
\ndef read_data(file_path, encoding='utf-8', sep=','):\
\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\
\n"},
'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\
\ndef write_data(data, file_path, index=False):\
\n return data.to_csv(file_path, index=index)\
\n\
\n#Uncomment and change below code for google storage\
\n#from google.cloud import storage\
\n#def write_data(data, file_path, index=False):\
\n# file_name= file_path.name\
\n# data.to_csv('output_data.csv')\
\n# storage_client = storage.Client()\
\n# bucket = storage_client.bucket('aion_data')\
\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\
\n# return data\
\n"},
'is_file_name_url':{'imports':[],'code':"\n\
\ndef is_file_name_url(file_name):\
\n supported_urls_starts_with = ('gs://','https://','http://')\
\n return file_name.startswith(supported_urls_starts_with)\
\n"},
'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':"\n\
\nclass logger():\
\n #setup the logger\
\n def __init__(self, log_file, mode='w', logger_name=None):\
\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\
\n self.log = logging.getLogger(logger_name)\
\n\
\n #get logger\
\n def getLogger(self):\
\n return self.log\
\n\
\n def info(self, msg):\
\n self.log.info(msg)\
\n\
\n def error(self, msg, exc_info=False):\
\n self.log.error(msg,exc_info)\
\n\
\n # format and log dataframe\
\n def log_dataframe(self, df, rows=2, msg=None):\
\n buffer = io.StringIO()\
\n df.info(buf=buffer)\
\n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\
\n log_text += '\\n\\t'+str(df.head(rows)).replace('\\n','\\n\\t')\
\n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\
\n self.log.info(log_text)\
\n"},
}
class utility_function():
def __init__(self, module):
if module in utility_functions.keys():
self.module_name = module
else:
self.module_name = None
self.importer = importModule()
self.codeText = ""
def get_code(self):
code = ""
if self.module_name:
functions = utility_functions[self.module_name]
for function in functions:
self.codeText += self.get_function_code(function)
code = self.importer.getCode()
code += self.codeText
return code
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
if __name__ == '__main__':
obj = utility_function('load_data')
p = obj.get_utility_code()
print(p) |
drift_analysis.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def getInputFiles(self):
IOFiles = {
"log": "aion.log",
"trainingData":"rawData.dat",
"production": "production.json",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
text = 'IOFiles = '
if not IOFiles:
text += '{ }'
else:
text += json.dumps(IOFiles, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
return self.codeText
# temporary code
def get_input_drift_import_modules(self):
return [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'mlflow', 'mod_from': None, 'mod_as': None},
{'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'warnings', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_input_drift_code(self):
return """
class inputdrift():
def __init__(self,base_config):
if 'mlflowURL' in base_config:
self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']
self.currentDataLocation = base_config['currentDataLocation']
home = Path.home()
if platform.system() == 'Windows':
from pathlib import WindowsPath
output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'
output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase
else:
from pathlib import PosixPath
output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'
output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase
if not output_model_dir.exists():
raise ValueError(f'Configuration file not found at {output_model_dir}')
tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')
registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')
mlflow.set_tracking_uri(tracking_uri)
mlflow.set_registry_uri(registry_uri)
client = mlflow.tracking.MlflowClient(
tracking_uri=tracking_uri,
registry_uri=registry_uri,
)
model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)
model = mlflow.pyfunc.load_model(model_version_uri)
run = client.get_run(model.metadata.run_id)
if run.info.artifact_uri.startswith('file:'):
artifact_path = Path(run.info.artifact_uri[len('file:///') : ])
else:
artifact_path = Path(run.info.artifact_uri)
self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')
def get_input_drift(self,current_data, historical_data):
curr_num_feat = current_data.select_dtypes(include='number')
hist_num_feat = historical_data.select_dtypes(include='number')
num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]
alert_count = 0
data = {
'current':{'data':current_data},
'hist': {'data': historical_data}
}
dist_changed_columns = []
dist_change_message = []
for feature in num_features:
curr_static_value = round(st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue,3)
if (curr_static_value < 0.05):
try:
distribution = {}
distribution['hist'] = self.DistributionFinder( historical_data[feature])
distribution['curr'] = self.DistributionFinder( current_data[feature])
if(distribution['hist']['name'] == distribution['curr']['name']):
pass
else:
alert_count = alert_count + 1
dist_changed_columns.append(feature)
changed_column = {}
changed_column['Feature'] = feature
changed_column['KS_Training'] = curr_static_value
changed_column['Training_Distribution'] = distribution['hist']['name']
changed_column['New_Distribution'] = distribution['curr']['name']
dist_change_message.append(changed_column)
except:
pass
if alert_count:
resultStatus = dist_change_message
else :
resultStatus='Model is working as expected'
return(alert_count, resultStatus)
def DistributionFinder(self,data):
best_distribution =''
best_sse =0.0
if(data.dtype in ['int','int64']):
distributions= {'bernoulli':{'algo':st.bernoulli},
'binom':{'algo':st.binom},
'geom':{'algo':st.geom},
'nbinom':{'algo':st.nbinom},
'poisson':{'algo':st.poisson}
}
index, counts = np.unique(data.astype(int),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)
distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))
distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))
distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)
distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)
sselist = []
for dist in distributions.keys():
distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))
if np.isnan(distributions[dist]['sess']):
distributions[dist]['sess'] = float('inf')
best_dist = min(distributions, key=lambda v: distributions[v]['sess'])
best_distribution = best_dist
best_sse = distributions[best_dist]['sess']
elif (len(index) == 1):
best_distribution = 'Constant Data-No Distribution'
best_sse = 0.0
elif(data.dtype in ['float64','float32']):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
nrange = data.max() - data.min()
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if( sse < best_sse):
best_distribution = distribution.name
best_sse = sse
return {'name':best_distribution, 'sse': best_sse}
def check_drift( config):
inputdriftObj = inputdrift(config)
historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath,skipinitialspace = True,na_values=['-','?'])
currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation,skipinitialspace = True,na_values=['-','?'])
historicaldataFrame.columns = historicaldataFrame.columns.str.strip()
currentdataFrame.columns = currentdataFrame.columns.str.strip()
dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)
if message == 'Model is working as expected':
output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}
else:
output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}
return(output_json)
"""
def get_main_drift_code(self, problem_type, smaller_is_better=True):
text = ''
if problem_type == 'classification':
text += """
def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5):
testscore = production['score']
current_score = current_matrices[scoring_criteria]
threshold_value = testscore * threshold / 100.0
if current_score > (testscore - threshold_value) :
return True
else:
return False
def get_metrices(actual_values, predicted_values):
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
result = {}
accuracy_score = accuracy_score(actual_values, predicted_values)
avg_precision = precision_score(actual_values, predicted_values,
average='macro')
avg_recall = recall_score(actual_values, predicted_values,
average='macro')
avg_f1 = f1_score(actual_values, predicted_values,
average='macro')
result['accuracy'] = round((accuracy_score*100),2)
result['precision'] = round((avg_precision*100),2)
result['recall'] = round((avg_recall*100),2)
result['f1'] = round((avg_f1*100),2)
return result
"""
else:
text += """
def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5):
testscore = production['score']
current_score = current_matrices[scoring_criteria]
threshold_value = testscore * threshold / 100.0
"""
if smaller_is_better:
text += """
if current_score < (testscore + threshold_value) :"""
else:
text += """
if current_score > (testscore - threshold_value) :"""
text += """
return True
else:
return False
def get_metrices(actual_values, predicted_values):
import numpy as np
result = {}
me = np.mean(predicted_values - actual_values)
sde = np.std(predicted_values - actual_values, ddof = 1)
abs_err = np.abs(predicted_values - actual_values)
mae = np.mean(abs_err)
sdae = np.std(abs_err, ddof = 1)
abs_perc_err = 100.0 * np.abs(predicted_values - actual_values) / actual_values
mape = np.mean(abs_perc_err)
sdape = np.std(abs_perc_err, ddof = 1)
result['mean_error'] = me
result['mean_abs_error'] = mae
result['mean_abs_perc_error'] = mape
result['error_std'] = sde
result['abs_error_std'] = sdae
result['abs_perc_error_std'] = sdape
return result
"""
text += """
def monitoring(config, log=None):
targetPath = Path('aion')/config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath/IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
output_json = {}
trainingDataLocation = targetPath/IOFiles['trainingData']
monitoring = targetPath/IOFiles['monitoring']
log.info(f'Input Location External: {config["inputUriExternal"]}')
trainingStatus = 'False'
dataFileLocation = ''
driftStatus = 'No Drift'
if monitoring.exists():
monitoring_data = read_json(monitoring)
if monitoring_data.get('runNo', False):
reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None))
production= targetPath/IOFiles['production']
proddataDF = pd.DataFrame()
predicted_data = pd.DataFrame()
if production.exists():
production = read_json(production)
if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']):
predicted_data = reader.read(IOFiles['prodData'])
actual_data = reader.read(IOFiles['prodDataGT'])
common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()]
proddataDF = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner')
currentPerformance = {}
currentPerformance = get_metrices(proddataDF[config['target_feature']], proddataDF['prediction'])
if is_drift_within_limits(production, currentPerformance,config['scoring_criteria']):
log.info(f'OutputDrift: No output drift found')
output_json.update({'outputDrift':'Model score is with in limits'})
else:
log.info(f'OutputDrift: Found Output Drift')
log.info(f'Original Test Score: {production["score"]}')
log.info(f'Current Score: {currentPerformance[config["scoring_criteria"]]}')
output_json.update({'outputDrift':{'Meassage': 'Model output is drifted','trainedScore':production["score"], 'currentScore':currentPerformance[config["scoring_criteria"]]}})
trainingStatus = 'True'
driftStatus = 'Output Drift'
else:
if reader.file_exists(IOFiles['prodData']):
predicted_data = reader.read(IOFiles['prodData'])
log.info(f'OutputDrift: Prod Data not found')
output_json.update({'outputDrift':'Prod Data not found'})
else:
log.info(f'Last Time pipeline not executed completely')
output_json.update({'Msg':'Pipeline is not executed completely'})
trainingStatus = 'True'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
if trainingStatus == 'False':
historicaldataFrame=pd.read_csv(trainingDataLocation)
if config['inputUriExternal']:
currentdataFrame=pd.read_csv(config['inputUriExternal'])
elif not predicted_data.empty:
currentdataFrame = predicted_data.copy()
elif 's3' in config.keys():
reader = dataReader(reader_type='s3',target_path=config['targetPath'], config=config['s3'])
currentdataFrame = reader.read(config['s3']['file_name'])
else:
currentdataFrame=pd.read_csv(config['inputUri'])
inputdriftObj = inputdrift(config)
dataalertcount,inputdrift_message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)
if inputdrift_message == 'Model is working as expected':
log.info(f'InputDrift: No input drift found')
output_json.update({'Status':'SUCCESS','inputDrift':'Model is working as expected'})
else:
log.info(f'InputDrift: Input drift found')
log.info(f'Affected Columns {inputdrift_message}')
output_json.update({'inputDrift':{'Affected Columns':inputdrift_message}})
trainingStatus = 'True'
driftStatus = 'Input Drift'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif actual_data_path.exists() and predict_data_path.exists():
dataFileLocation = ''
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
else:
log.info(f'Pipeline Executing first Time')
output_json.update({'Msg':'Pipeline executing first time'})
trainingStatus = 'True'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
else:
log.info(f'Pipeline Executing first Time')
output_json.update({'Msg':'Pipeline executing first time'})
trainingStatus = 'True'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
if monitoring.exists():
monitoring_data['runNo'] = int(monitoring_data.get('runNo', '0')) + 1
else:
monitoring_data = {}
monitoring_data['runNo'] = 1
monitoring_data['prod_db_type'] = config.get('prod_db_type', 'sqlite')
monitoring_data['db_config'] = config.get('db_config', {})
monitoring_data['mlflow_config'] = config.get('mlflow_config', None)
if 's3' in config.keys():
monitoring_data['s3'] = config['s3']
monitoring_data['dataLocation'] = dataFileLocation
monitoring_data['driftStatus'] = driftStatus
write_json(monitoring_data,targetPath/IOFiles['monitoring'])
output = {'Status':'SUCCESS'}
output.update(output_json)
return(json.dumps(output))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputUri', help='Training Data Location')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
config['inputUriExternal'] = None
if args.inputUri:
if args.inputUri != '':
config['inputUriExternal'] = args.inputUri
log = None
try:
print(monitoring(config, log))
except Exception as e:
if log:
log.error(e, exc_info=True)
status = {'Status':'Failure','Message':str(e)}
print(json.dumps(status))
raise Exception(str(e))
"""
return text |
data_reader.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
supported_reader = ['sqlite', 'influx','s3']
functions_code = {
'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""
class dataReader():
def get_reader(self, reader_type, target_path=None, config=None):
if reader_type == 'sqlite':
return sqlite_writer(target_path=target_path)
elif reader_type == 'influx':
return Influx_writer(config=config)
elif reader_type == 'gcs':
return gcs(config=config)
elif reader_type == 'azure':
return azure(config=config)
elif reader_type == 's3':
return s3bucket(config=config)
else:
raise ValueError(reader_type)
"""
},
'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':"""\n\
class sqlite_writer():
def __init__(self, target_path):
self.target_path = Path(target_path)
database_file = self.target_path.stem + '.db'
self.db = sqlite_db(self.target_path, database_file)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
return self.db.read(file)
def write(self, data, file):
self.db.write(data, file)
def close(self):
self.db.close()
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name):
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()
"""
},
'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""\n\
class Influx_writer():
def __init__(self, config):
self.db = influx_db(config)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
query = "SELECT * FROM {}".format(file)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
return self.db.read(query)
def write(self, data, file):
self.db.write(data, file)
def close(self):
pass
class influx_db():
def __init__(self, config):
self.host = config['host']
self.port = config['port']
self.user = config.get('user', None)
self.password = config.get('password', None)
self.token = config.get('token', None)
self.database = config['database']
self.measurement = config['measurement']
self.tags = config['tags']
self.client = self.get_client()
def table_exists(self, name):
query = f"SHOW MEASUREMENTS ON {self.database}"
result = self.client(query)
for measurement in result['measurements']:
if measurement['name'] == name:
return True
return False
def read(self, query)->pd.DataFrame:
cursor = self.client.query(query)
points = cursor.get_points()
my_list=list(points)
df=pd.DataFrame(my_list)
return df
def get_client(self):
headers = None
if self.token:
headers={"Authorization": self.token}
client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers)
databases = client.get_list_database()
databases = [x['name'] for x in databases]
if self.database not in databases:
client.create_database(self.database)
return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers)
def write(self,data, measurement=None):
if isinstance(data, pd.DataFrame):
sorted_col = data.columns.tolist()
sorted_col.sort()
data = data[sorted_col]
data = data.to_dict(orient='records')
if not measurement:
measurement = self.measurement
for row in data:
if 'time' in row.keys():
p = '%Y-%m-%dT%H:%M:%S.%fZ'
time_str = datetime.strptime(row['time'], p)
del row['time']
else:
time_str = None
if 'model_ver' in row.keys():
self.tags['model_ver']= row['model_ver']
del row['model_ver']
json_body = [{
'measurement': measurement,
'time': time_str,
'tags': self.tags,
'fields': row
}]
self.client.write_points(json_body)
def delete(self, name):
pass
def close(self):
self.client.close()
"""
},
's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\
class s3bucket():
def __init__(self, config={}):
if 's3' in config.keys():
config = config['s3']
aws_access_key_id = config.get('aws_access_key_id','')
aws_secret_access_key = config.get('aws_secret_access_key','')
bucket_name = config.get('bucket_name','')
if not aws_access_key_id:
raise ValueError('aws_access_key_id can not be empty')
if not aws_secret_access_key:
raise ValueError('aws_secret_access_key can not be empty')
self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key))
self.bucket_name = bucket_name
def read(self, file_name):
try:
response = self.client.get_object(Bucket=self.bucket_name, Key=file_name)
return pd.read_csv(response['Body'])
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchBucket':
raise ValueError(f"Bucket '{self.bucket_name}' not found in aws s3 storage")
elif ex.response['Error']['Code'] == 'NoSuchKey':
raise ValueError(f"File '{file_name}' not found in s3 bucket '{self.bucket_name}'")
else:
raise
"""
},
'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\
def azure():
def __init__(self,config={}):
if 'azure' in config.keys():
config = config['azure']
account_name = config.get('account_name','')
account_key = config.get('account_key','')
container_name = config.get('container_name','')
if not account_name:
raise ValueError('Account name can not be empty')
if not account_key:
raise ValueError('Account key can not be empty')
if not container_name:
raise ValueError('Container name can not be empty')
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", account_name), credential=account_key)
self.file_system_client = service_client.get_file_system_client(container_name)
def read(self, directory_name):
root_dir = str(directory_name)
file_paths = self.file_system_client.get_paths(path=root_dir)
main_df = pd.DataFrame()
for path in file_paths:
if not path.is_directory:
file_client = file_system_client.get_file_client(path.name)
file_ext = Path(path.name).suffix
if file_ext in [".csv", ".tsv"]:
with open(csv_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
with open(csv_local, 'r') as file:
data = file.read()
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t'])
processed_df = pd.read_csv(csv_local, sep=row_delimiter)
elif file_ext == ".parquet":
stream = io.BytesIO()
file_client.download_file().readinto(stream)
processed_df = pd.read_parquet(stream, engine='pyarrow')
elif file_ext == ".avro":
with open(avro_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
processed_df = pdx.read_avro(avro_local)
if main_df.empty:
main_df = pd.DataFrame(processed_df)
else:
main_df = main_df.append(processed_df, ignore_index=True)
return main_df
"""
},
'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\
class gcs():
def __init__(self, config={}):
if 'gcs' in config.keys():
config = config['gcs']
account_key = config.get('account_key','')
bucket_name = config.get('bucket_name','')
if not account_key:
raise ValueError('Account key can not be empty')
if not bucket_name:
raise ValueError('bucket name can not be empty')
storage_client = storage.Client.from_service_account_json(account_key)
self.bucket = storage_client.get_bucket(bucket_name)
def read(self, bucket_name, file_name):
data = self.bucket.blob(file_name).download_as_text()
return pd.read_csv(data, encoding = 'utf-8', sep = ',')
"""
}
}
class data_reader():
def __init__(self, reader_type=[]):
self.supported_readers = supported_reader
if isinstance(reader_type, str):
self.readers = [reader_type]
elif not reader_type:
self.readers = self.supported_readers
else:
self.readers = reader_type
unsupported_reader = [ x for x in self.readers if x not in self.supported_readers]
if unsupported_reader:
raise ValueError(f"reader type '{unsupported_reader}' is not supported\nSupported readers are {self.supported_readers}")
self.codeText = ""
self.importer = importModule()
def get_reader_code(self, readers):
reader_code = {
'sqlite': 'return sqlite_writer(target_path=target_path)',
'influx': 'return Influx_writer(config=config)',
'gcs': 'return gcs(config=config)',
'azure': 'return azure(config=config)',
's3': 'return s3bucket(config=config)'
}
code = "\n\ndef dataReader(reader_type, target_path=None, config=None):\n"
for i, reader in enumerate(readers):
if not i:
code += f" if reader_type == '{reader}':\n"
else:
code += f" elif reader_type == '{reader}':\n"
code += f" {reader_code[reader]}\n"
if readers:
code += " else:\n"
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n"""
else:
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n"""
return code
def get_code(self):
code = self.get_reader_code(self.readers)
functions = []
for reader in self.readers:
functions.append(reader)
for function in functions:
code += self.get_function_code(function)
self.codeText += self.importer.getCode()
self.codeText += code
return self.codeText
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
|
imports.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\n"
else:
require += f"{mod}=={version(mod)}\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\n\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\n\n#local modules" + local_modules + '\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
|
transformer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class transformer():
def __init__(self, indent=0, tab_size=4):
self.df_name = 'df'
self.tab = ' ' * tab_size
self.codeText = ""
self.transformers = []
self.TxCols = []
self.imputers = {}
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','trainData' : 'transformedData.dat','testData' : 'test.dat','preprocessor' : 'preprocessor.pkl'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n return config"
return text
def getPrefixModules(self):
modules = [
{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'scipy'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += """
def transformation(log):
config = validateConfig()
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath/IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
dataLoc = targetPath/IOFiles['inputData']
if not dataLoc.exists():
return {'Status':'Failure','Message':'Data location does not exists.'}
status = dict()
df = read_data(dataLoc)
log.log_dataframe(df)
target_feature = config['target_feature']
if config['test_ratio'] == 0.0:
train_data = df
test_data = pd.DataFrame()
else:
"""
def getSuffixModules(self):
modules = [{'module':'pandas','mod_as':'pd'}
,{'module':'json'}
,{'module':'joblib'}
]
return modules
def addSuffixCode(self,encoder=False, indent=1):
self.codeText += """
train_data, preprocess_pipe, label_encoder = profilerObj.transform()
if not preprocess_pipe:
raise ValueError('Pipeline not created')
joblib.dump(preprocess_pipe, targetPath/IOFiles['preprocessor'])
test_data.reset_index(inplace=True)
"""
if encoder:
self.codeText += """
joblib.dump(label_encoder, targetPath/IOFiles['targetEncoder'])
if not test_data.empty:
ytest = label_encoder.transform(test_data[target_feature])
"""
else:
self.codeText += """
if not test_data.empty:
ytest = test_data[target_feature]
"""
self.codeText += """
test_data.astype(profilerObj.train_features_type)
test_data = preprocess_pipe.transform(test_data)
if isinstance(test_data, scipy.sparse.spmatrix):
test_data = test_data.toarray()
preprocess_out_columns = train_data.columns.tolist()
preprocess_out_columns.remove(target_feature)
write_data(train_data,targetPath/IOFiles['trainData'],index=False)
if isinstance( test_data, np.ndarray):
test_data = pd.DataFrame(test_data, columns=preprocess_out_columns)
test_data[target_feature] = ytest
write_data(test_data,targetPath/IOFiles['testData'],index=False)
log.log_dataframe(train_data)
status = {'Status':'Success','trainData':IOFiles['trainData'],'testData':IOFiles['testData']}
meta_data['transformation'] = {}
meta_data['transformation']['cat_features'] = train_data.select_dtypes('category').columns.tolist()
meta_data['transformation']['preprocessor'] = IOFiles['preprocessor']
meta_data['transformation']['preprocess_out_columns'] = preprocess_out_columns
"""
if encoder:
self.codeText += """
meta_data['transformation']['target_encoder'] = IOFiles['targetEncoder']
"""
self.codeText += """
meta_data['transformation']['Status'] = status
write_json(meta_data, str(targetPath/IOFiles['metaData']))
log.info(f"Transformed data saved at {targetPath/IOFiles['trainData']}")
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'argparse'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += "\n\
\nif __name__ == '__main__':\
\n log = None\
\n try:\
\n print(transformation(log))\
\n except Exception as e:\
\n if log:\
\n log.error(e, exc_info=True)\
\n status = {'Status':'Failure','Message':str(e)}\
\n print(json.dumps(status))"
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self, indent=1):
return self.function_code + '\n' + self.codeText
def getDFName(self):
return self.df_name
class data_profiler():
def __init__(self, importer, text_features=False):
self.importer = importer
self.codeText = ""
self.text_features = text_features
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def get_module_import_statement(self, mod):
text = ""
if not mod.get('module', None):
return text
if mod.get('mod_from', None):
text += f"from {mod['mod_from']} "
text += f"import {mod['module']} "
if mod.get('mod_as', None):
text += f"as {mod['mod_as']}"
text += "\n"
return text
def get_import_modules(self):
profiler_importes = [
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'w2n', 'mod_from': 'word2number', 'mod_as': None},
{'module': 'LabelEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'OrdinalEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'OneHotEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'SimpleImputer', 'mod_from': 'sklearn.impute', 'mod_as': None },
{'module': 'KNNImputer', 'mod_from': 'sklearn.impute', 'mod_as': None },
{'module': 'Pipeline', 'mod_from': 'sklearn.pipeline', 'mod_as': None },
{'module': 'FeatureUnion', 'mod_from': 'sklearn.pipeline', 'mod_as': None },
{'module': 'MinMaxScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'StandardScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'PowerTransformer', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'ColumnTransformer', 'mod_from': 'sklearn.compose', 'mod_as': None },
{'module': 'TransformerMixin', 'mod_from': 'sklearn.base', 'mod_as': None },
{'module': 'IsolationForest', 'mod_from': 'sklearn.ensemble', 'mod_as': None },
{'module': 'TargetEncoder', 'mod_from': 'category_encoders', 'mod_as': None }
]
if self.text_features:
profiler_importes.append({'module': 'textProfiler', 'mod_from': 'text.textProfiler', 'mod_as': None })
profiler_importes.append({'module': 'textCombine', 'mod_from': 'text.textProfiler', 'mod_as': None })
return profiler_importes
def get_importer(self):
return self.importer
def get_code(self):
common_importes = self.get_import_modules()
for module in common_importes:
mod_name = module['module']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
if module['module'] in ['textProfiler','textCombine']:
self.importer.addLocalModule(mod_name, mod_from=mod_from, mod_as=mod_as)
else:
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
self.codeText += """
STR_TO_CAT_CONVERSION_LEN_MAX = 10
log_suffix = f'[{Path(__file__).stem}] '
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = True, config={}, keep_unprocessed=[], log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.encode_target = encode_target
self.label_encoder = None
keep_unprocessed = [x for x in keep_unprocessed if x in xtrain.columns]
if keep_unprocessed:
self.unprocessed = xtrain[keep_unprocessed]
self.data = xtrain.drop(keep_unprocessed, axis=1)
else:
self.data = xtrain
self.unprocessed = pd.DataFrame()
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = {}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
def log_dataframe(self, msg=None):
import io
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t')
log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
self.log.info(pipe)
process_data = pipe.fit_transform(self.data, y=self.target)
self.update_output_features_names(pipe)
if isinstance(process_data, scipy.sparse.spmatrix):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if not self.unprocessed.empty:
df[self.unprocessed.columns] = self.unprocessed
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'{k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', '1.0'))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', '1.0'))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', '20'))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.drop_na_target()
if self.config['removeDuplicate']:
self.drop_duplicate()
self.check_categorical_features()
self.string_to_numeric()
self.process_target()
self.train_features_type = dict(zip(self.data.columns, self.data.dtypes))
self.parse_process_step_config()
self.process_drop_fillna()
#self.log_type_conversion()
self.update_num_fill_dict()
#print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
self.log.info(self.process_method)
self.log.info(self.train_features_type)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.encode_target:
if self.is_target_available():
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = get_one_true_option(self.config.get('numericalFillMethod',None))
normalization_method = get_one_true_option(self.config.get('normalization',None))
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = get_one_true_option(self.config.get('categoricalFillMethod',None))
cat_encode_method = get_one_true_option(self.config.get('categoryEncoding',None))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names()}
if start_index:
index_shifter = 0
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
key = key + index_shifter
self.output_columns[key:key] = v
index_shifter += len(v)
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
pipeList = []
max_features = 2000
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.textProfiler(conf_json, pipeList, max_features)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
if not self.unprocessed.empty:
self.pipeline.append(Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.unprocessed.columns)
], remainder="drop"))]))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_dict[f][en]
if not self.num_fill_method_dict[f]:
del self.num_fill_method_dict[f]
def update_cat_fill_dict(self):
self.cat_fill_method_dict = {}
if 'catFill' in self.process_method.keys():
for f in supported_method['fillNa']['categorical']:
self.cat_fill_method_dict[f] = {}
for en in supported_method['categoryEncoding']:
self.cat_fill_method_dict[f][en] = []
for col in self.cat_feature:
catFillDict = self.process_method.get('catFill',{})
catEncoderDict = self.process_method.get('catEncoder',{})
if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''):
self.cat_fill_method_dict[f][en].append(col)
if not self.cat_fill_method_dict[f][en] :
del self.cat_fill_method_dict[f][en]
if not self.cat_fill_method_dict[f]:
del self.cat_fill_method_dict[f]
def __update_type(self):
self.numeric_feature = self.data.select_dtypes(include='number').columns.tolist()
self.cat_feature = self.data.select_dtypes(include='category').columns.tolist()
self.date_time = self.data.select_dtypes(include='datetime').columns.tolist()
self.text_feature = self.data.select_dtypes(include='object').columns.tolist()
def update_user_provided_type(self, data_types):
allowed_types = ['numerical','categorical', 'text','date','index']
type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),}
mapped_type = {k:type_mapping[v] for k,v in data_types.items()}
#self.log.info(mapped_type)
self.update_type(mapped_type, 'user provided data type')
def get_type(self, as_list=False):
if as_list:
return [self.colm_type.values()]
else:
return self.colm_type
def update_type(self, data_types={}, reason=''):
invalid_features = [x for x in data_types.keys() if x not in self.data.columns]
if invalid_features:
valid_feat = list(set(data_types.keys()) - set(invalid_features))
valid_feat_type = {k:v for k,v in data_types if k in valid_feat}
else:
valid_feat_type = data_types
for k,v in valid_feat_type.items():
if v != self.colm_type[k].name:
try:
self.data.astype({k:v})
self.colm_type.update({k:self.data[k].dtype})
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
except:
self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason)
self.data = self.data.astype(valid_feat_type)
self.__update_type()
def string_to_numeric(self):
def to_number(x):
try:
return w2n.word_to_num(x)
except:
return np.nan
for col in self.text_feature:
col_values = self.data[col].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
self.text_to_num[col] = 'float64'
self.wordToNumericFeatures.append(col)
if self.text_to_num:
columns = list(self.text_to_num.keys())
self.data[columns] = self.data[columns].apply(lambda x: to_number(x))
self.update_type(self.text_to_num)
self.log.info('----------- Inspecting Features -----------')
for col in self.text_feature:
self.log.info(f'-------> Feature : {col}')
if col in self.text_to_num:
self.log.info('----------> Numeric Status :Yes')
self.log.info('----------> Data Type Converting to numeric :Yes')
else:
self.log.info('----------> Numeric Status :No')
self.log.info(f'\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric')
self.log.info(f'\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}')
self.log.info('----------- Inspecting Features End -----------')
def check_categorical_features(self):
num_data = self.data.select_dtypes(include='number')
num_data_unique = num_data.nunique()
num_to_cat_col = {}
for i, value in enumerate(num_data_unique):
if value < self.config['categoryMaxLabel']:
num_to_cat_col[num_data_unique.index[i]] = 'category'
if num_to_cat_col:
self.update_type(num_to_cat_col, 'numerical to categorical')
str_to_cat_col = {}
str_data = self.data.select_dtypes(include='object')
str_data_unique = str_data.nunique()
for i, value in enumerate(str_data_unique):
if value < self.config['categoryMaxLabel']:
str_to_cat_col[str_data_unique.index[i]] = 'category'
for colm in str_data.columns:
if self.data[colm].str.len().max() < STR_TO_CAT_CONVERSION_LEN_MAX:
str_to_cat_col[colm] = 'category'
if str_to_cat_col:
self.update_type(str_to_cat_col, 'text to categorical')
def drop_features(self, features=[], reason='unspecified'):
if isinstance(features, str):
features = [features]
feat_to_remove = [x for x in features if x in self.data.columns]
if feat_to_remove:
self.data.drop(feat_to_remove, axis=1, inplace=True)
for feat in feat_to_remove:
self.dropped_features[feat] = reason
self.log_drop_feature(feat_to_remove, reason)
self.__update_type()
def drop_duplicate(self):
index = self.data.duplicated(keep='first')
if index.sum():
self.remove_rows(index, 'duplicate rows')
def drop_na_target(self):
if self.is_target_available():
self.remove_rows(self.target.isna(), 'null target values')
def log_drop_feature(self, columns, reason):
self.log.info(f'---------- Dropping {reason} features ----------')
self.log.info(f'\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found')
self.log.info(f'-------> Drop Features: {columns}')
self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}')
def log_normalization(self):
if self.process_method.get('normalization', None):
self.log.info(f'\\nStatus:- !... Normalization treatment done')
for method in supported_method['normalization']:
cols = []
for col, m in self.process_method['normalization'].items():
if m == method:
cols.append(col)
if cols and method != 'none':
self.log.info(f'Running {method} on features: {cols}')
def log_numerical_fill(self):
if self.process_method.get('numFill', None):
self.log.info(f'\\nStatus:- !... Fillna for numeric feature done')
for method in supported_method['fillNa']['numeric']:
cols = []
for col, m in self.process_method['numFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def log_categorical_fill(self):
if self.process_method.get('catFill', None):
self.log.info(f'\\nStatus:-!... FillNa for categorical feature done')
for method in supported_method['fillNa']['categorical']:
cols = []
for col, m in self.process_method['catFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def remove_constant_feature(self):
unique_values = self.data.nunique()
constant_features = []
for i, value in enumerate(unique_values):
if value == 1:
constant_features.append(unique_values.index[i])
if constant_features:
self.drop_features(constant_features, "constant")
for i in constant_features:
try:
self.numeric_feature.remove(i)
except ValueError:
pass
try:
self.cat_feature.remove(i)
except ValueError:
pass
def remove_empty_feature(self, misval_ratio=1.0):
missing_ratio = self.data.isnull().sum() / len(self.data)
missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)}
empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio]
if empty_features:
self.drop_features(empty_features, "empty")
for i in empty_features:
try:
self.numeric_feature.remove(i)
except ValueError:
pass
try:
self.cat_feature.remove(i)
except:
pass
def remove_index_features(self):
index_feature = []
for feat in self.numeric_feature:
if self.data[feat].nunique() == len(self.data):
if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)):
index_feature.append(feat)
self.drop_features(index_feature, "index")
for i in index_feature:
try:
self.numeric_feature.remove(i)
except ValueError:
pass
try:
self.cat_feature.remove(i)
except:
pass
def fill_missing_value_method(self, colm, method):
if colm in self.numeric_feature:
if method in supported_method['fillNa']['numeric']:
if 'numFill' not in self.process_method.keys():
self.process_method['numFill'] = {}
if method == 'na' and self.process_method['numFill'].get(colm, None):
pass # don't overwrite
else:
self.process_method['numFill'][colm] = method
if colm in self.cat_feature:
if method in supported_method['fillNa']['categorical']:
if 'catFill' not in self.process_method.keys():
self.process_method['catFill'] = {}
if method == 'na' and self.process_method['catFill'].get(colm, None):
pass
else:
self.process_method['catFill'][colm] = method
def check_encoding_method(self, method, colm,default=False):
if not self.is_target_available() and (method.lower() == list(target_encoding_method_change.keys())[0]):
method = target_encoding_method_change[method.lower()]
if default:
self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present")
return method
def fill_encoder_value_method(self,colm, method, default=False):
if colm in self.cat_feature:
if method.lower() in supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {supported_method['normalization']}")
def apply_outlier(self):
inlier_indices = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = findiqrOutlier(self.data[k])
elif v == 'zscore':
index = findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indices = np.logical_and(inlier_indices, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
index = findiforestOutlier(self.data[self.numeric_feature])
inlier_indices = np.logical_and(inlier_indices, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indices.sum() != len(self.data):
self.remove_rows( inlier_indices == False, 'outlier detection')
self.log.info('Status:- |... Outlier treatment done')
self.log.info(f'-------> Data Frame Shape After Outlier treatment (Rows,Columns): {self.data.shape}')
def remove_rows(self, indices, msg=''):
if indices.sum():
indices = ~indices
if len(indices) != len(self.data):
raise ValueError('Data Frame length mismatch')
self.data = self.data[indices]
self.data.reset_index(drop=True, inplace=True)
if self.is_target_available():
self.target = self.target[indices]
if isinstance(self.target, pd.Series):
self.target.reset_index(drop=True, inplace=True)
if not self.unprocessed.empty:
self.unprocessed = self.unprocessed[indices]
self.unprocessed.reset_index(drop=True, inplace=True)
self.log.info(f'-------> {msg} dropped rows count: {(indices == False).sum()}')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method != 'Disable':
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {supported_method['outlieroperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder(handle_unknown="error")
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="error")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder(handle_unknown='error')
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
"""
return self.codeText
|
functions.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class global_function():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.available_functions = {
'iqr':{'name':'iqrOutlier','code':f"\n\ndef iqrOutlier(df):\
\n{self.tab}Q1 = df.quantile(0.25)\
\n{self.tab}Q3 = df.quantile(0.75)\
\n{self.tab}IQR = Q3 - Q1\
\n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\
\n{self.tab}return index"},
'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f"\n\ndef zscoreOutlier(df):\
\n{self.tab}z = numpy.abs(stats.zscore(df))\
\n{self.tab}index = (z < 3).all(axis=1)\
\n{self.tab}return index"},
'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f"\n\ndef iforestOutlier(df):\
\n{self.tab}from sklearn.ensemble import IsolationForest\
\n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\
\n{self.tab}isolation_forest.fit(df)\
\n{self.tab}y_pred_train = isolation_forest.predict(df)\
\n{self.tab}return y_pred_train == 1"},
'minMaxImputer':{'name':'minMaxImputer','code':f"\n\nclass minMaxImputer(TransformerMixin):\
\n{self.tab}def __init__(self, strategy='max'):\
\n{self.tab}{self.tab}self.strategy = strategy\
\n{self.tab}def fit(self, X, y=None):\
\n{self.tab}{self.tab}self.feature_names_in_ = X.columns\
\n{self.tab}{self.tab}if self.strategy == 'min':\
\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\
\n{self.tab}{self.tab}else:\
\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\
\n{self.tab}{self.tab}return self\
\n{self.tab}def transform(self, X):\
\n{self.tab}{self.tab}import numpy\
\n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)"},
'DummyEstimator':{'name':'DummyEstimator','code':f"\n\nclass DummyEstimator(BaseEstimator):\
\n{self.tab}def fit(self): pass\
\n{self.tab}def score(self): pass"},
'start_reducer':{'name':'start_reducer','imports':[{'mod':'itertools'},{'mod':'numpy','mod_as':'np'},{'mod':'pandas','mod_as':'pd'},{'mod':'VarianceThreshold','mod_from':'sklearn.feature_selection'}], 'code':"""
def start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05):
qconstantColumns = []
train_features = df.columns.tolist()
train_features.remove(target_feature)
df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature
numeric_features = df.select_dtypes(include='number').columns.tolist()
non_numeric_features = df.select_dtypes(exclude='number').columns.tolist()
if numeric_features and var_threshold:
qconstantFilter = VarianceThreshold(threshold=var_threshold)
tempDf=df[numeric_features]
qconstantFilter.fit(tempDf)
qconstantColumns = [column for column in numeric_features if column not in tempDf.columns[qconstantFilter.get_support()]]
if target_feature in qconstantColumns:
qconstantColumns.remove(target_feature)
numeric_features = list(set(numeric_features) - set(qconstantColumns))
if numeric_features:
numColPairs = list(itertools.product(numeric_features, numeric_features))
for item in numColPairs:
if(item[0] == item[1]):
numColPairs.remove(item)
tempArray = []
for item in numColPairs:
tempCorr = np.abs(df[item[0]].corr(df[item[1]]))
if(tempCorr > corr_threshold):
tempArray.append(item[0])
tempArray = np.unique(tempArray).tolist()
nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray))
groupedFeatures = []
if tempArray:
corrDic = {}
for feature in tempArray:
temp = []
for col in tempArray:
tempCorr = np.abs(df[feature].corr(df[col]))
temp.append(tempCorr)
corrDic[feature] = temp
#Similar correlation df
corrDF = pd.DataFrame(corrDic,index = tempArray)
corrDF.loc[:,:] = np.tril(corrDF, k=-1)
alreadyIn = set()
similarFeatures = []
for col in corrDF:
perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist()
if perfectCorr and col not in alreadyIn:
alreadyIn.update(set(perfectCorr))
perfectCorr.append(col)
similarFeatures.append(perfectCorr)
updatedSimFeatures = []
for items in similarFeatures:
if(target_feature != '' and target_feature in items):
for p in items:
updatedSimFeatures.append(p)
else:
updatedSimFeatures.append(items[0])
newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))
updatedFeatures = list(set(newTempFeatures + non_numeric_features))
else:
updatedFeatures = list(set(df.columns) -set(qconstantColumns))
else:
updatedFeatures = list(set(df.columns) -set(qconstantColumns))
return updatedFeatures
"""},
'feature_importance_class':{'name':'feature_importance_class','code':"\n\
\ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\
\n import pandas as pd\
\n from sklearn.feature_selection import chi2\
\n from sklearn.feature_selection import f_classif\
\n from sklearn.feature_selection import mutual_info_classif\
\n \
\n impFeatures = []\
\n if cat_features:\
\n categoricalData=df[cat_features]\
\n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\
\n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\
\n impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())\
\n if numeric_features:\
\n quantData=df[numeric_features]\
\n fclassScore=f_classif(quantData,df[target_feature])[1]\
\n miClassScore=mutual_info_classif(quantData,df[target_feature])\
\n fClassSeries=pd.Series(fclassScore,index=numeric_features)\
\n miClassSeries=pd.Series(miClassScore,index=numeric_features)\
\n impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())\
\n impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())\
\n pearsonScore=df.corr() \
\n targetPScore=abs(pearsonScore[target_feature])\
\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\
\n return list(set(sum(impFeatures, [])))"},
'feature_importance_reg':{'name':'feature_importance_reg','code':"\n\
\ndef feature_importance_reg(df, numeric_features, target_feature,pValTh,corrTh):\
\n import pandas as pd\
\n from sklearn.feature_selection import f_regression\
\n from sklearn.feature_selection import mutual_info_regression\
\n \
\n impFeatures = []\
\n if numeric_features:\
\n quantData =df[numeric_features]\
\n fregScore=f_regression(quantData,df[target_feature])[1]\
\n miregScore=mutual_info_regression(quantData,df[target_feature])\
\n fregSeries=pd.Series(fregScore,index=numeric_features)\
\n miregSeries=pd.Series(miregScore,index=numeric_features)\
\n impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())\
\n impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())\
\n pearsonScore=df.corr()\
\n targetPScore=abs(pearsonScore[target_feature])\
\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\
\n return list(set(sum(impFeatures, [])))"},
'scoring_criteria':{'name':'scoring_criteria','imports':[{'mod':'make_scorer','mod_from':'sklearn.metrics'},{'mod':'roc_auc_score','mod_from':'sklearn.metrics'}], 'code':"\n\
\ndef scoring_criteria(score_param, problem_type, class_count):\
\n if problem_type == 'classification':\
\n scorer_mapping = {\
\n 'recall':{'binary_class': 'recall', 'multi_class': 'recall_weighted'},\
\n 'precision':{'binary_class': 'precision', 'multi_class': 'precision_weighted'},\
\n 'f1_score':{'binary_class': 'f1', 'multi_class': 'f1_weighted'},\
\n 'roc_auc':{'binary_class': 'roc_auc', 'multi_class': 'roc_auc_ovr_weighted'}\
\n }\
\n if (score_param.lower() == 'roc_auc') and (class_count > 2):\
\n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\
\n else:\
\n class_type = 'binary_class' if class_count == 2 else 'multi_class'\
\n if score_param in scorer_mapping.keys():\
\n score_param = scorer_mapping[score_param][class_type]\
\n else:\
\n score_param = 'accuracy'\
\n return score_param"},
'log_dataframe':{'name':'log_dataframe','code':f"\n\
\ndef log_dataframe(df, msg=None):\
\n import io\
\n buffer = io.StringIO()\
\n df.info(buf=buffer)\
\n if msg:\
\n log_text = f'Data frame after {{msg}}:'\
\n else:\
\n log_text = 'Data frame:'\
\n log_text += '\\n\\t'+str(df.head(2)).replace('\\n','\\n\\t')\
\n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\
\n get_logger().info(log_text)"},
'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':"\n\
\nclass BayesSearchCV():\
\n\
\n def __init__(self, estimator, params, scoring, n_iter, cv):\
\n self.estimator = estimator\
\n self.params = params\
\n self.scoring = scoring\
\n self.iteration = n_iter\
\n self.cv = cv\
\n self.best_estimator_ = None\
\n self.best_score_ = None\
\n self.best_params_ = None\
\n\
\n def __min_fun(self, params):\
\n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\
\n acc = score.mean()\
\n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\
\n\
\n def fit(self, X, y):\
\n trials = Trials()\
\n self.X = X\
\n self.y = y\
\n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\
\n result = sorted(trials.results, key = lambda x: x['loss'])[0]\
\n self.best_estimator_ = result['model']\
\n self.best_score_ = result['score']\
\n self.best_params_ = result['params']\
\n self.best_estimator_.fit(X, y)\
\n\
\n def hyperOptParamConversion( paramSpace):\
\n paramDict = {}\
\n for j in list(paramSpace.keys()):\
\n inp = paramSpace[j]\
\n isLog = False\
\n isLin = False\
\n isRan = False\
\n isList = False\
\n isString = False\
\n try:\
\n # check if functions are given as input and reassign paramspace\
\n v = paramSpace[j]\
\n if 'logspace' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isLog = True\
\n elif 'linspace' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isLin = True\
\n elif 'range' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isRan = True\
\n elif 'list' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isList = True\
\n elif '[' and ']' in paramSpace[j]:\
\n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\
\n isList = True\
\n x = paramSpace[j].split(',')\
\n except:\
\n x = paramSpace[j]\
\n str_arg = paramSpace[j]\
\n\
\n # check if arguments are string\
\n try:\
\n test = eval(x[0])\
\n except:\
\n isString = True\
\n\
\n if isString:\
\n paramDict.update({j: hp.choice(j, x)})\
\n else:\
\n res = eval(str_arg)\
\n if isLin:\
\n y = eval('np.linspace' + str(res))\
\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\
\n elif isLog:\
\n y = eval('np.logspace' + str(res))\
\n paramDict.update(\
\n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\
\n elif isRan:\
\n y = eval('np.arange' + str(res))\
\n paramDict.update({j: hp.choice(j, y)})\
\n # check datatype of argument\
\n elif isinstance(eval(x[0]), bool):\
\n y = list(map(lambda i: eval(i), x))\
\n paramDict.update({j: hp.choice(j, eval(str(y)))})\
\n elif isinstance(eval(x[0]), float):\
\n res = eval(str_arg)\
\n if len(str_arg.split(',')) == 3 and not isList:\
\n y = eval('np.linspace' + str(res))\
\n #print(y)\
\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\
\n else:\
\n y = list(res) if isinstance(res, tuple) else [res]\
\n paramDict.update({j: hp.choice(j, y)})\
\n else:\
\n res = eval(str_arg)\
\n if len(str_arg.split(',')) == 3 and not isList:\
\n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\
\n else:\
\n y = list(res) if isinstance(res, tuple) else [res]\
\n paramDict.update({j: hp.choice(j, y)})\
\n return paramDict"},
's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':"\n\
\ndef s2n(value):\
\n try:\
\n x=eval(value)\
\n return x\
\n except:\
\n try:\
\n return w2n.word_to_num(value)\
\n except:\
\n return np.nan"},
'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':"\n\
\ndef read_json(file_path):\
\n data = None\
\n with open(file_path,'r') as f:\
\n data = json.load(f)\
\n return data\
\n\
\ndef write_json(data, file_path):\
\n with open(file_path,'w') as f:\
\n json.dump(data, f)\
\n\
\ndef read_data(file_path, encoding='utf-8', sep=','):\
\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\
\n\
\ndef write_data(data, file_path, index=False):\
\n return data.to_csv(file_path, index=index)\
\n\
\n#Uncomment and change below code for google storage\
\n#def write_data(data, file_path, index=False):\
\n# file_name= file_path.name\
\n# data.to_csv('output_data.csv')\
\n# storage_client = storage.Client()\
\n# bucket = storage_client.bucket('aion_data')\
\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\
\n# return data\
\n\
\ndef is_file_name_url(file_name):\
\n supported_urls_starts_with = ('gs://','https://','http://')\
\n return file_name.startswith(supported_urls_starts_with)\
\n"},
'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f"\n\
\nlog = None\
\ndef set_logger(log_file, mode='a'):\
\n global log\
\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\
\n log = logging.getLogger(Path(__file__).parent.name)\
\n return log\
\n\
\ndef get_logger():\
\n return log\n"},
'mlflowSetPath':{'name':'mlflowSetPath','code':f"\n\ndef mlflowSetPath(path, name):\
\n{self.tab}db_name = str(Path(path)/'mlruns')\
\n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\
\n{self.tab}mlflow.set_experiment(str(Path(path).name))\
\n"},
'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f"\n\ndef mlflow_create_experiment(config, path, name):\
\n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\
\n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\
\n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\
\n{self.tab}client = mlflow.tracking.MlflowClient()\
\n{self.tab}experiment = client.get_experiment_by_name(name)\
\n{self.tab}if experiment:\
\n{self.tab}{self.tab}experiment_id = experiment.experiment_id\
\n{self.tab}else:\
\n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\
\n{self.tab}return client, experiment_id\
\n"},
'get_mlflow_uris':{'name':'get_mlflow_uris','code':f"\n\ndef get_mlflow_uris(config, path):\
\n artifact_uri = None\
\n tracking_uri_type = config.get('tracking_uri_type',None)\
\n if tracking_uri_type == 'localDB':\
\n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\
\n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\
\n tracking_uri = config['tracking_uri']\
\n if config.get('artifacts_uri', None):\
\n if Path(config['artifacts_uri']).exists():\
\n artifact_uri = 'file:' + config['artifacts_uri']\
\n else:\
\n artifact_uri = config['artifacts_uri']\
\n else:\
\n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\
\n else:\
\n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\
\n artifact_uri = None\
\n if config.get('registry_uri', None):\
\n registry_uri = config['registry_uri']\
\n else:\
\n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\
\n return tracking_uri, artifact_uri, registry_uri\
\n"},
'logMlflow':{'name':'logMlflow','code':f"\n\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\
\n{self.tab}run_id = None\
\n{self.tab}for k,v in params.items():\
\n{self.tab}{self.tab}mlflow.log_param(k, v)\
\n{self.tab}for k,v in metrices.items():\
\n{self.tab}{self.tab}mlflow.log_metric(k, v)\
\n{self.tab}if 'CatBoost' in algoName:\
\n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\
\n{self.tab}else:\
\n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\
\n{self.tab}tags['processed'] = 'no'\
\n{self.tab}tags['registered'] = 'no'\
\n{self.tab}mlflow.set_tags(tags)\
\n{self.tab}if model_info:\
\n{self.tab}{self.tab}run_id = model_info.run_id\
\n{self.tab}return run_id\
\n"},
'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':"\ndef get_classification_metrices( actual_values, predicted_values):\
\n result = {}\
\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\
\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\
\n average='macro')\
\n\
\n result['accuracy'] = math.floor(accuracy_score*10000)/100\
\n result['precision'] = math.floor(avg_precision*10000)/100\
\n result['recall'] = math.floor(avg_recall*10000)/100\
\n result['f1'] = math.floor(avg_f1*10000)/100\
\n return result\
\n"},
'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':"\ndef get_regression_metrices( actual_values, predicted_values):\
\n result = {}\
\n\
\n me = np.mean(predicted_values - actual_values)\
\n sde = np.std(predicted_values - actual_values, ddof = 1)\
\n\
\n abs_err = np.abs(predicted_values - actual_values)\
\n mae = np.mean(abs_err)\
\n sdae = np.std(abs_err, ddof = 1)\
\n\
\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\
\n mape = np.mean(abs_perc_err)\
\n sdape = np.std(abs_perc_err, ddof = 1)\
\n\
\n result['mean_error'] = me\
\n result['mean_abs_error'] = mae\
\n result['mean_abs_perc_error'] = mape\
\n result['error_std'] = sde\
\n result['abs_error_std'] = sdae\
\n result['abs_perc_error_std'] = sdape\
\n return result\
\n"}
}
def add_function(self, name, importer=None):
if name in self.available_functions.keys():
self.codeText += self.available_functions[name]['code']
if importer:
if 'imports' in self.available_functions[name].keys():
for module in self.available_functions[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
def get_function_name(self, name):
if name in self.available_functions.keys():
return self.available_functions[name]['name']
return None
def getCode(self):
return self.codeText
|
register.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class register():
def __init__(self, importer, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.importer = importer
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','model' : 'model.pkl', 'performance': 'performance.json','production':'production.json','monitor':'monitoring.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def code_imports(self):
modules = [{'module':'sys'}
,{'module':'json'}
,{'module':'time'}
,{'module':'platform'}
,{'module':'tempfile'}
,{'module':'sqlite3'}
,{'module':'mlflow'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'ViewType', 'mod_from':'mlflow.entities'}
,{'module':'MlflowClient', 'mod_from':'mlflow.tracking'}
,{'module':'ModelVersionStatus', 'mod_from':'mlflow.entities.model_registry.model_version_status'}
]
self.import_modules(modules)
def import_module(self, module, mod_from=None, mod_as=None):
self.importer.addModule(module, mod_from=mod_from, mod_as=mod_as)
def import_modules(self, modules):
if isinstance(modules, list):
for mod in modules:
if isinstance(mod, dict):
self.importer.addModule(mod['module'], mod_from= mod.get('mod_from', None), mod_as=mod.get('mod_as', None))
def getImportCode(self):
return self.importer.getCode()
def __addValidateConfigCode(self, models=None):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n return config\
"
return text
def addLocalFunctionsCode(self, models):
self.function_code += self.__addValidateConfigCode(models)
def addPrefixCode(self, indent=1):
self.code_imports()
self.codeText += "\n\
\ndef __merge_logs(log_file_sequence,path, files):\
\n if log_file_sequence['first'] in files:\
\n with open(path/log_file_sequence['first'], 'r') as f:\
\n main_log = f.read()\
\n files.remove(log_file_sequence['first'])\
\n for file in files:\
\n with open(path/file, 'r') as f:\
\n main_log = main_log + f.read()\
\n (path/file).unlink()\
\n with open(path/log_file_sequence['merged'], 'w') as f:\
\n f.write(main_log)\
\n\
\ndef merge_log_files(folder, models):\
\n log_file_sequence = {\
\n 'first': 'aion.log',\
\n 'merged': 'aion.log'\
\n }\
\n log_file_suffix = '_aion.log'\
\n log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()]\
\n log_files.append(log_file_sequence['first'])\
\n __merge_logs(log_file_sequence, folder, log_files)\
\n\
\ndef register_model(targetPath,models,usecasename, meta_data):\
\n register = mlflow_register(targetPath, usecasename, meta_data)\
\n register.setup_registration()\
\n\
\n runs_with_score = register.get_unprocessed_runs(models)\
\n best_run = register.get_best_run(runs_with_score)\
\n register.update_unprocessed(runs_with_score)\
\n return register.register_model(models, best_run)\
\n\
\ndef register(log):\
\n config = validateConfig()\
\n targetPath = Path('aion')/config['targetPath']\
\n models = config['models']\
\n merge_log_files(targetPath, models)\
\n meta_data_file = targetPath/IOFiles['metaData']\
\n if meta_data_file.exists():\
\n meta_data = read_json(meta_data_file)\
\n else:\
\n raise ValueError(f'Configuration file not found: {meta_data_file}')\
\n usecase = config['targetPath']\
\n # enable logging\
\n log_file = targetPath/IOFiles['log']\
\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\
\n register_model_name = register_model(targetPath,models,usecase, meta_data)\
\n status = {'Status':'Success','Message':f'Model Registered: {register_model_name}'}\
\n log.info(f'output: {status}')\
\n return json.dumps(status)"
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'os'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'shutil'}
,{'module':'argparse'}
]
return modules
def addMainCode(self, models, indent=1):
self.codeText += "\n\
\nif __name__ == '__main__':\
\n log = None\
\n try:\
\n print(register(log))\
\n except Exception as e:\
\n if log:\
\n log.error(e, exc_info=True)\
\n status = {'Status':'Failure','Message':str(e)}\
\n print(json.dumps(status))"
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
def query_with_quetes_code(self, decs=True, indent=1):
return """\n{first_indentation}def __get_unprocessed_runs_sorted(self):
{indentation}query = "tags.processed = 'no'"
{indentation}runs = self.client.search_runs(
{indentation} experiment_ids=self.experiment_id,
{indentation} filter_string=query,
{indentation} run_view_type=ViewType.ACTIVE_ONLY,
{indentation} order_by=['metrics.test_score {0}']
{indentation})
{indentation}return runs\n""".format('DESC' if decs else 'ASC', first_indentation=indent*self.tab, indentation=(1+indent)*self.tab)
def addClassCode(self, smaller_is_better=False):
self.codeText += "\
\nclass mlflow_register():\
\n\
\n def __init__(self, input_path, model_name, meta_data):\
\n self.input_path = Path(input_path).resolve()\
\n self.model_name = model_name\
\n self.meta_data = meta_data\
\n self.logger = logging.getLogger('ModelRegister')\
\n self.client = None\
\n self.monitoring_data = read_json(self.input_path/IOFiles['monitor'])\
\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\
\n if not self.monitoring_data.get('mlflow_config',False):\
\n self.monitoring_data['mlflow_config'] = mlflow_default_config\
\n\
\n def setup_registration(self):\
\n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(self.monitoring_data['mlflow_config'],self.input_path)\
\n self.logger.info(f'MLflow tracking uri: {tracking_uri}')\
\n self.logger.info(f'MLflow registry uri: {registry_uri}')\
\n mlflow.set_tracking_uri(tracking_uri)\
\n mlflow.set_registry_uri(registry_uri)\
\n self.client = mlflow.tracking.MlflowClient(\
\n tracking_uri=tracking_uri,\
\n registry_uri=registry_uri,\
\n )\
\n self.experiment_id = self.client.get_experiment_by_name(self.model_name).experiment_id\
\n"
self.codeText += self.query_with_quetes_code(smaller_is_better == False)
self.codeText += "\
\n def __log_unprocessed_runs(self, runs):\
\n self.logger.info('Unprocessed runs:')\
\n for run in runs:\
\n self.logger.info(' {}: {}'.format(run.info.run_id,run.data.metrics['test_score']))\
\n\
\n def get_unprocessed_runs(self, model_path):\
\n unprocessed_runs = self.__get_unprocessed_runs_sorted()\
\n if not unprocessed_runs:\
\n raise ValueError('Registering fail: No new trained model')\
\n self.__log_unprocessed_runs( unprocessed_runs)\
\n return unprocessed_runs\
\n\
\n def __wait_until_ready(self, model_name, model_version):\
\n client = MlflowClient()\
\n for _ in range(10):\
\n model_version_details = self.client.get_model_version(\
\n name=model_name,\
\n version=model_version,\
\n )\
\n status = ModelVersionStatus.from_string(model_version_details.status)\
\n if status == ModelVersionStatus.READY:\
\n break\
\n time.sleep(1)\
\n\
\n def __create_model(self, run):\
\n artifact_path = 'model'\
\n model_uri = 'runs:/{run_id}/{artifact_path}'.format(run_id=run.info.run_id, artifact_path=artifact_path)\
\n self.logger.info(f'Registering model (run id): {run.info.run_id}')\
\n model_details = mlflow.register_model(model_uri=model_uri, name=self.model_name)\
\n self.__wait_until_ready(model_details.name, model_details.version)\
\n self.client.set_tag(run.info.run_id, 'registered', 'yes' )\
\n state_transition = self.client.transition_model_version_stage(\
\n name=model_details.name,\
\n version=model_details.version,\
\n stage='Production',\
\n )\
\n self.logger.info(state_transition)\
\n return model_details\
\n\
\n def get_best_run(self, models):\
\n return models[0]\
\n\
\n def __validate_config(self):\
\n try:\
\n load_data_loc = self.meta_data['load_data']['Status']['DataFilePath']\
\n except KeyError:\
\n raise ValueError('DataIngestion step output is corrupted')\
\n\
\n def __mlflow_log_transformer_steps(self, best_run):\
\n run_id = best_run.info.run_id\
\n meta_data = read_json(self.input_path/(best_run.data.tags['mlflow.runName']+'_'+IOFiles['metaData']))\
\n self.__validate_config()\
\n with mlflow.start_run(run_id):\
\n if 'transformation' in meta_data.keys():\
\n if 'target_encoder' in meta_data['transformation'].keys():\
\n source_loc = meta_data['transformation']['target_encoder']\
\n mlflow.log_artifact(str(self.input_path/source_loc))\
\n meta_data['transformation']['target_encoder'] = Path(source_loc).name\
\n if 'preprocessor' in meta_data['transformation'].keys():\
\n source_loc = meta_data['transformation']['preprocessor']\
\n mlflow.log_artifact(str(self.input_path/source_loc))\
\n meta_data['transformation']['preprocessor'] = Path(source_loc).name\
\n\
\n write_json(meta_data, self.input_path/IOFiles['metaData'])\
\n mlflow.log_artifact(str(self.input_path/IOFiles['metaData']))\
\n\
\n def __update_processing_tag(self, processed_runs):\
\n self.logger.info('Changing status to processed:')\
\n for run in processed_runs:\
\n self.client.set_tag(run.info.run_id, 'processed', 'yes')\
\n self.logger.info(f' run id: {run.info.run_id}')\
\n\
\n def update_unprocessed(self, runs):\
\n return self.__update_processing_tag( runs)\
\n\
\n def __force_register(self, best_run):\
\n self.__create_model( best_run)\
\n self.__mlflow_log_transformer_steps( best_run)\
\n production_json = self.input_path/IOFiles['production']\
\n production_model = {'Model':best_run.data.tags['mlflow.runName'],'runNo':self.monitoring_data['runNo'],'score':best_run.data.metrics['test_score']}\
\n write_json(production_model, production_json)\
\n database_path = self.input_path/(self.input_path.stem + '.db')\
\n if database_path.exists():\
\n database_path.unlink()\
\n return best_run.data.tags['mlflow.runName']\
\n\
\n def __get_register_model_score(self):\
\n reg = self.client.list_registered_models()\
\n if not reg:\
\n return '', 0\
\n run_id = reg[0].latest_versions[0].run_id\
\n run = self.client.get_run(run_id)\
\n score = run.data.metrics['test_score']\
\n return run_id, score\
\n\
\n def register_model(self, models, best_run):\
\n return self.__force_register(best_run)"
def local_functions_code(self, smaller_is_better=True, indent=1):
if smaller_is_better:
min_max = 'min'
else:
min_max = 'max'
self.codeText += "\ndef validate_config(deploy_dict):\
\n try:\
\n load_data_loc = deploy_dict['load_data']['Status']['DataFilePath']\
\n except KeyError:\
\n raise ValueError('DataIngestion step output is corrupted')\
\n\
\ndef get_digest(fname):\
\n import hashlib\
\n hash_algo = hashlib.sha256()\
\n with open(fname, 'rb') as f:\
\n for chunk in iter(lambda: f.read(2 ** 20), b''):\
\n hash_algo.update(chunk)\
\n return hash_algo.hexdigest()\
\n"
def getCode(self, indent=1):
return self.function_code + '\n' + self.codeText
|
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
from .load_data import tabularDataReader
from .transformer import transformer as profiler
from .transformer import data_profiler
from .selector import selector
from .trainer import learner
from .register import register
from .deploy import deploy
from .drift_analysis import drift
from .functions import global_function
from .data_reader import data_reader
from .utility import utility_function
|
load_data.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class tabularDataReader():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.function_code = ''
self.codeText = ''
self.code_generated = False
def getInputFiles(self):
IOFiles = {
"rawData": "rawData.dat",
"metaData" : "modelMetaData.json",
"log" : "aion.log",
"outputData" : "rawData.dat",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
text = 'IOFiles = '
if not IOFiles:
text += '{ }'
else:
text += json.dumps(IOFiles, indent=4)
return text
def getOutputFiles(self):
output_files = {
'metaData' : 'modelMetaData.json',
'log' : 'aion.log',
'outputData' : 'rawData.dat'
}
text = 'output_file = '
if not output_files:
text += '{ }'
else:
text += json.dumps(output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n if not config['targetPath']:\
\n raise ValueError(f'Target Path is not configured')\
\n return config"
return text
def addMainCode(self):
self.codeText += "\n\
\nif __name__ == '__main__':\
\n log = None\
\n try:\
\n print(load_data(log))\
\n except Exception as e:\
\n if log:\
\n log.getLogger().error(e, exc_info=True)\
\n status = {'Status':'Failure','Message':str(e)}\
\n print(json.dumps(status))\
\n raise Exception(str(e))\
"
def addLoadDataCode(self):
self.codeText += """
#This function will read the data and save the data on persistent storage
def load_data(log):
config = validateConfig()
targetPath = Path('aion')/config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath/IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
monitoring = targetPath/IOFiles['monitoring']
if monitoring.exists():
monitoringStatus = read_json(monitoring)
if monitoringStatus['dataLocation'] == '' and monitoringStatus['driftStatus'] != 'No Drift':
reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None))
raw_data_location = targetPath/IOFiles['rawData']
if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']):
predicted_data = reader.read(IOFiles['prodData'])
actual_data = reader.read(IOFiles['prodDataGT'])
common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()]
mergedRes = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner')
raw_data_path = pd.read_csv(raw_data_location)
df = pd.concat([raw_data_path,mergedRes])
else:
raise ValueError(f'Prod Data not found')
elif monitoringStatus['dataLocation'] == '':
raise ValueError(f'Data Location does not exist')
else:
if 's3' in monitoringStatus.keys():
input_reader = dataReader(reader_type='s3',target_path=None, config=monitoringStatus['s3'])
log.info(f"Downloading '{monitoringStatus['s3']['file_name']}' from s3 bucket '{monitoringStatus['s3']['bucket_name']}'")
df = input_reader.read(monitoringStatus['s3']['file_name'])
else:
location = monitoringStatus['dataLocation']
log.info(f'Dataset path: {location}')
df = read_data(location)
else:
raise ValueError(f'Monitoring.json does not exist')
status = {}
output_data_path = targetPath/IOFiles['outputData']
log.log_dataframe(df)
required_features = list(set(config['selected_features'] + [config['target_feature']]))
log.info('Dataset features required: ' + ','.join(required_features))
missing_features = [x for x in required_features if x not in df.columns.tolist()]
if missing_features:
raise ValueError(f'Some feature/s is/are missing: {missing_features}')
log.info('Removing unused features: '+','.join(list(set(df.columns) - set(required_features))))
df = df[required_features]
log.info(f'Required features: {required_features}')
try:
log.info(f'Saving Dataset: {str(output_data_path)}')
write_data(df, output_data_path, index=False)
status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'Records':len(df)}
except:
raise ValueError('Unable to create data file')
meta_data_file = targetPath/IOFiles['metaData']
meta_data = dict()
meta_data['load_data'] = {}
meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if x != config['target_feature']]
meta_data['load_data']['Status'] = status
write_json(meta_data, meta_data_file)
output = json.dumps(status)
log.info(output)
return output
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def generateCode(self):
self.addValidateConfigCode()
self.addLoadDataCode()
self.addMainCode()
self.code_generated = True
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
if not self.code_generated:
self.generateCode()
return self.function_code + '\n' + self.codeText
|
input_drift.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def run_input_drift(config):
importer = importModule()
drifter = input_drift()
importer.addModule('sys')
importer.addModule('json')
importer.addModule('mlflow')
importer.addModule('platform')
importer.addModule('warnings')
importer.addModule('numpy', mod_as='np')
importer.addModule('pandas', mod_as='pd')
importer.addModule('stats', mod_from='scipy', mod_as='st')
importer.addModule('Path', mod_from='pathlib')
code = file_header(config['modelName']+'_'+config['modelVersion'])
code += importer.getCode()
drifter.generateCode()
code += drifter.getCode()
deploy_path = Path(config["deploy_path"])/'MLaC'/'InputDrift'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"input_drift.py"
with open(py_file, "w") as f:
f.write(code)
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
f.write(importer.getBaseModule())
create_docker_file('input_drift', deploy_path)
|
output_drift.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def run_output_drift(config):
importer = importModule()
drifter = output_drift(missing = get_variable('fillna', False), word2num_features= get_variable('word2num_features', False), cat_encoder = get_variable('cat_encoder', False),target_encoder = get_variable('target_encoder', False),normalizer = get_variable('normalizer', False),text_profiler = get_variable('text_features', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False),problem_type=config['problem_type'])
function = global_function()
importer.addModule('sys')
importer.addModule('math')
importer.addModule('json')
importer.addModule('platform')
importer.addModule('joblib')
importer.addModule('mlflow')
importer.addModule('sklearn')
importer.addModule('numpy', mod_as='np')
importer.addModule('pandas', mod_as='pd')
importer.addModule('Path', mod_from='pathlib')
importer.addModule('InfluxDBClient', mod_from='influxdb')
function.add_function('readWrite')
code = file_header(config['modelName']+'_'+config['modelVersion'])
code += importer.getCode()
code += function.getCode()
drifter.generateCode()
code += drifter.getCode()
deploy_path = Path(config["deploy_path"])/'MLaC'/'OutputDrift'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"output_drift.py"
with open(py_file, "w") as f:
f.write(code)
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
f.write(importer.getBaseModule())
create_docker_file('output_drift', deploy_path)
|
deploy.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import shutil
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
import tarfile
def add_text_dependency():
return """nltk==3.6.3
textblob==0.15.3
spacy==3.1.3
demoji==1.1.0
bs4==0.0.1
text_unidecode==1.3
contractions==0.1.73
"""
def get_deploy_params(config):
param_keys = ["modelVersion","problem_type","target_feature"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['ipAddress'] = '127.0.0.1'
data['portNo'] = '8094'
return data
def import_trainer_module(importer):
non_sklearn_modules = get_variable('non_sklearn_modules')
if non_sklearn_modules:
for mod in non_sklearn_modules:
module = get_module_mapping(mod)
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
imported_modules = [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'math', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'shutil', 'mod_from': None, 'mod_as': None},
{'module': 'mlflow', 'mod_from': None, 'mod_as': None},
{'module': 'sklearn', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None}
]
def run_deploy(config):
generated_files = []
importer = importModule()
deployer = deploy(target_encoder = get_variable('target_encoder', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False))
function = global_function()
importModules(importer, imported_modules)
if get_variable('cat_encoder', False):
importer.addModule('category_encoders')
import_trainer_module(importer)
if get_variable('word2num_features'):
function.add_function('s2n', importer)
if get_variable('text_features'):
importer.addLocalModule('textProfiler', mod_from='text.textProfiler')
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelServing'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('Prediction')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('*', mod_from='data_reader')
reader_obj = data_reader(['sqlite','influx'])
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# need to copy data profiler from AION code as code is splitted and merging code amnnually
# can add bugs
aion_utilities = Path(__file__).parent.parent.parent.parent / 'utilities'
with tarfile.open(aion_utilities / 'text.tar') as file:
file.extractall(deploy_path)
if (deploy_path / 'utils').exists():
shutil.rmtree(deploy_path / 'utils')
with tarfile.open(aion_utilities / 'utils.tar') as file:
file.extractall(deploy_path )
generated_files.append("text")
generated_files.append("utils")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
function.add_function('get_mlflow_uris')
code = file_header(usecase)
code += importer.getCode()
code += deployer.getInputOutputFiles()
code += function.getCode()
code += deployer.getCode()
# create prediction file
with open(deploy_path/"predict.py", 'w') as f:
f.write(code)
generated_files.append("predict.py")
# create groundtruth file
with open(deploy_path/"groundtruth.py", 'w') as f:
f.write(file_header(usecase) + deployer.getGroundtruthCode())
generated_files.append("groundtruth.py")
# create create service file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + deployer.getServiceCode())
generated_files.append("aionCode.py")
importer.addModule('seaborn')
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
if config["text_features"]:
req += add_text_dependency()
f.write(req)
generated_files.append("requirements.txt")
# create config file
config_file = deploy_path/"config.json"
config_data = get_deploy_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('Prediction', deploy_path,config['modelName'], generated_files, True if config["text_features"] else False) |
trainer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def get_model_name(algo, method):
if method == 'modelBased':
return algo + '_' + 'MLBased'
if method == 'statisticalBased':
return algo + '_' + 'StatisticsBased'
else:
return algo
def get_training_params(config, algo):
param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['algorithms'] = {algo: config['algorithms'][algo]}
data['targetPath'] = config['modelName']
return data
def addImporterLearner(model, importer):
module = get_module_mapping(model)
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
if not get_variable('non_sklearn_modules'):
update_variable('non_sklearn_modules', [])
if 'sklearn' not in mod_from:
modules = get_variable('non_sklearn_modules')
modules.append(model)
update_variable('non_sklearn_modules', modules)
def addEvaluator(scorer_type, optimizer,trainer, importer):
trainer.addStatement("if not X_test.empty:")
if optimizer == 'genetic':
trainer.addStatement('features = [x for i,x in enumerate(features) if grid.support_[i]]',indent=2)
trainer.addStatement('y_pred = estimator.predict(X_test[features])',indent=2)
if scorer_type == 'accuracy':
importer.addModule('accuracy_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(accuracy_score(y_test,y_pred),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement("log.info('Confusion Matrix:')",indent=2)
trainer.addStatement("log.info('\\n' + pd.DataFrame(confusion_matrix(y_test,y_pred)).to_string())",indent=2)
elif scorer_type == 'recall':
importer.addModule('recall_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(recall_score(y_test,y_pred,average='macro'),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=2)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2)
elif scorer_type == 'precision':
importer.addModule('precision_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(precision_score(y_test,y_pred,average='macro'),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=2)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2)
elif scorer_type == 'f1_score':
importer.addModule('f1_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(f1_score(y_test,y_pred,average='macro'),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=2)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2)
elif scorer_type == 'roc_auc':
importer.addModule('roc_auc_score', mod_from='sklearn.metrics')
trainer.addStatement("try:")
trainer.addStatement(f"test_score = round(roc_auc_score(y_test,y_pred),2) * 100", indent=3)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=3)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=3)
trainer.addStatement("except:")
trainer.addStatement("try:",indent=3)
trainer.addStatement("actual = pd.get_dummies(y_test)",indent=4)
trainer.addStatement("y_pred = pd.get_dummies(y_pred)",indent=4)
trainer.addStatement(f"test_score = round(roc_auc_score(y_test,y_pred,average='weighted', multi_class='ovr'),2) * 100", indent=3)
trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=4)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=4)
trainer.addStatement("except:",indent=3)
trainer.addStatement(f"test_score = 0.0", indent=4)
elif scorer_type == 'neg_mean_squared_error' or scorer_type == 'mse':
importer.addModule('mean_squared_error', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred),2)',indent=2)
update_variable('smaller_is_better', True)
elif scorer_type == 'neg_root_mean_squared_error' or scorer_type == 'rmse':
importer.addModule('mean_squared_error', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred,squared=False),2)',indent=2)
update_variable('smaller_is_better', True)
elif scorer_type == 'neg_mean_absolute_error' or scorer_type == 'mae':
importer.addModule('mean_absolute_error', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(mean_absolute_error(y_test,y_pred),2)',indent=2)
update_variable('smaller_is_better', True)
elif scorer_type == 'r2':
importer.addModule('r2_score', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(r2_score(y_test,y_pred),2)',indent=2)
def update_search_space(algo, config):
search_space = []
algoritms = config["algorithms"]
model = algo
params = algoritms[model]
model_dict = {model:get_module_mapping(model)['mod_from']}
d = {'algo': model_dict}
d['param'] = params
search_space.append(d)
config['search_space'] = search_space
def get_optimization(optimization, importer, function=None):
if optimization == 'grid':
importer.addModule('GridSearchCV', mod_from='sklearn.model_selection')
optimization = 'GridSearchCV'
elif optimization == 'random':
importer.addModule('RandomizedSearchCV', mod_from='sklearn.model_selection')
optimization = 'RandomizedSearchCV'
elif optimization == 'genetic':
importer.addModule('GeneticSelectionCV', mod_from='genetic_selection')
optimization = 'GeneticSelectionCV'
elif optimization == 'bayesopt':
optimization = 'BayesSearchCV'
function.add_function(optimization,importer)
return optimization
def scoring_criteria_reg(score_param):
scorer_mapping = {
'mse':'neg_mean_squared_error',
'rmse':'neg_root_mean_squared_error',
'mae':'neg_mean_absolute_error',
'r2':'r2'
}
return scorer_mapping.get(score_param, 'neg_mean_squared_error')
def addBalancing(balancingMethod, importer, code):
if balancingMethod == 'oversample':
importer.addModule('SMOTE', mod_from='imblearn.over_sampling')
code.addStatement("\n # data balancing")
code.addStatement("X_train, y_train = SMOTE(sampling_strategy='auto', k_neighbors=1, random_state=100).fit_resample(X_train, y_train)")
if balancingMethod == 'undersample':
importer.addModule('TomekLinks', mod_from='imblearn.under_sampling')
code.addStatement("\n # data balancing")
code.addStatement("X_train, y_train = TomekLinks().fit_resample(X_train, y_train)")
def run_trainer(base_config):
base_trainer = learner()
base_importer = importModule()
function = global_function()
base_importer.addModule('joblib')
base_importer.addModule('warnings')
base_importer.addModule('argparse')
base_importer.addModule('pandas', mod_as='pd')
base_importer.addModule('Path', mod_from='pathlib')
function.add_function('get_mlflow_uris')
function.add_function('mlflow_create_experiment')
importModules(base_importer,base_trainer.getPrefixModules())
base_trainer.addPrefixCode()
if base_config["algorithms"]:
base_trainer.add_train_test_split('train_features', 'target_feature', "config['test_ratio']")
if base_config["problem_type"] == 'classification':
if base_config["balancingMethod"]:
addBalancing(base_config["balancingMethod"],base_importer,base_trainer)
base_trainer.addStatement(f"log.info('Data balancing done')")
base_trainer.addStatement("\n #select scorer")
if base_config["problem_type"] == 'classification':
function.add_function('scoring_criteria', base_importer)
base_trainer.addStatement("scorer = scoring_criteria(config['scoring_criteria'],config['problem_type'], df[target_feature].nunique())")
else:
base_config['scoring_criteria'] = scoring_criteria_reg(base_config['scoring_criteria'])
base_trainer.addStatement(f"scorer = config['scoring_criteria']")
base_trainer.addStatement(f"log.info('Scoring criteria: {base_config['scoring_criteria']}')")
feature_selector = []
if base_config['feature_reducer']:
feature_selector.append(base_config['feature_reducer'])
elif base_config['feature_selector']:
feature_selector = base_config['feature_selector']
for algo in base_config["algorithms"].keys():
for method in feature_selector:
trainer = learner()
importer = importModule()
trainer.copyCode(base_trainer)
importer.copyCode(base_importer)
config = base_config
usecase = config['modelName']+'_'+config['modelVersion']
addImporterLearner(algo, importer)
trainer.addStatement("\n #Training model")
trainer.addStatement(f"log.info('Training {algo} for {method}')")
trainer.add_model_fit(algo, get_optimization(config["optimization"], importer, function), method, importer)
trainer.addStatement("\n #model evaluation")
addEvaluator(config['scoring_criteria'],config["optimization"], trainer, importer)
function.add_function('mlflowSetPath')
function.add_function('logMlflow')
importModules(importer, trainer.getSuffixModules())
importModules(importer, trainer.getMainCodeModules())
if base_config["problem_type"] == 'classification':
function.add_function('classification_metrices', importer)
trainer.addStatement("metrices = get_classification_metrices(y_test,y_pred)",indent=2)
trainer.add_100_trainsize_code()
trainer.addStatement("metrices.update({'train_score': train_score, 'test_score':test_score})")
else:
function.add_function('regression_metrices', importer)
trainer.addStatement("metrices = get_regression_metrices(y_test,y_pred)",indent=2)
trainer.add_100_trainsize_code()
trainer.addStatement("metrices.update({'train_score': train_score, 'test_score':test_score})")
trainer.addSuffixCode()
trainer.addMainCode()
model_name = get_model_name(algo,method)
deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name)
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('train')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += 'warnings.filterwarnings("ignore")\n'
code += f"\nmodel_name = '{model_name}'\n"
append_variable('models_name',model_name)
out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','performance':f'{model_name}_performance.json','metaDataOutput':f'{model_name}_modelMetaData.json'}
trainer.addOutputFiles(out_files)
code += trainer.getInputOutputFiles()
code += function.getCode()
trainer.addLocalFunctionsCode()
code += trainer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_training_params(config, algo), f, indent=4)
generated_files.append("config.json")
create_docker_file('train', deploy_path,config['modelName'], generated_files)
|
selector.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.ml.core import *
from .utility import *
output_file_map = {
'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'}
}
def get_selector_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_selector(config):
select = selector()
importer = importModule()
function = global_function()
importModules(importer,select.getPrefixModules())
select.addPrefixCode()
if config["target_feature"] in config["train_features"]:
config["train_features"].remove(config["target_feature"])
select.addStatement("train_features = df.columns.tolist()")
select.addStatement("target_feature = config['target_feature']")
select.addStatement("train_features.remove(target_feature)")
select.addStatement("cat_features = prev_step_output['cat_features']")
select.add_variable('total_features',[])
select.addStatement("log.log_dataframe(df)")
methods = config.get("feature_selector", None)
feature_reducer = config.get("feature_reducer", None)
select.addStatement("selected_features = {}")
select.addStatement("meta_data['featureengineering']= {}")
if feature_reducer:
update_variable('feature_reducer', True)
select.addStatement(f"log.info('Running dimensionality reduction technique( {feature_reducer})')")
if feature_reducer == 'pca':
importer.addModule('PCA', mod_from='sklearn.decomposition')
if int(config["n_components"]) == 0:
select.addStatement("dimension_reducer = PCA(n_components='mle',svd_solver = 'full')")
elif int(config["n_components"]) < 1:
select.addStatement("dimension_reducer = PCA(n_components=config['n_components'],svd_solver = 'full')")
else:
select.addStatement("dimension_reducer = PCA(n_components=config['n_components'])")
elif feature_reducer == 'svd':
importer.addModule('TruncatedSVD', mod_from='sklearn.decomposition')
if config["n_components"] < 2:
config["n_components"] = 2
select.addStatement("dimension_reducer = TruncatedSVD(n_components=config['n_components'], n_iter=7, random_state=42)")
elif feature_reducer == 'factoranalysis':
importer.addModule('FactorAnalysis', mod_from='sklearn.decomposition')
if config["n_components"] == 0:
select.addStatement("dimension_reducer = FactorAnalysis()")
else:
select.addStatement("dimension_reducer = FactorAnalysis(n_components=config['n_components'])")
elif feature_reducer == 'ica':
importer.addModule('FastICA', mod_from='sklearn.decomposition')
if config["n_components"] == 0:
select.addStatement("dimension_reducer = FastICA()")
else:
select.addStatement("dimension_reducer = FastICA(n_components=config['n_components'])")
select.addStatement("pca_array = dimension_reducer.fit_transform(df[train_features])")
select.addStatement("pca_columns = ['pca_'+str(e) for e in list(range(pca_array.shape[1]))]")
select.addStatement("scaledDF = pd.DataFrame(pca_array, columns=pca_columns)")
select.addStatement("scaledDF[target_feature] = df[target_feature]")
select.addStatement("df = scaledDF")
select.addStatement(f"selected_features['{feature_reducer}'] = pca_columns")
select.addStatement("total_features = df.columns.tolist()")
select.addStatement("meta_data['featureengineering']['feature_reducer']= {}")
select.addStatement("reducer_file_name = str(targetPath/IOFiles['feature_reducer'])")
importer.addModule('joblib')
select.addStatement("joblib.dump(dimension_reducer, reducer_file_name)")
select.addStatement("meta_data['featureengineering']['feature_reducer']['file']= IOFiles['feature_reducer']")
select.addStatement("meta_data['featureengineering']['feature_reducer']['features']= train_features")
select.addOutputFiles(output_file_map['feature_reducer'])
elif methods:
if 'allFeatures' in methods:
addDropFeature('target_feature', 'train_features', select)
select.addStatement("selected_features['allFeatures'] = train_features")
if 'modelBased' in methods:
select.addStatement(f"log.info('Model Based Correlation Analysis Start')")
select.addStatement("model_based_feat = []")
importer.addModule('numpy', mod_as='np')
importer.addModule('RFE', mod_from='sklearn.feature_selection')
importer.addModule('MinMaxScaler', mod_from='sklearn.preprocessing')
if config["problem_type"] == 'classification':
importer.addModule('ExtraTreesClassifier', mod_from='sklearn.ensemble')
select.addStatement("estimator = ExtraTreesClassifier(n_estimators=100)")
else:
importer.addModule('Lasso', mod_from='sklearn.linear_model')
select.addStatement("estimator = Lasso()")
select.addStatement("estimator.fit(df[train_features],df[target_feature])")
select.addStatement("rfe = RFE(estimator, n_features_to_select=1, verbose =0 )")
select.addStatement("rfe.fit(df[train_features],df[target_feature])")
select.addStatement("ranks = MinMaxScaler().fit_transform(-1*np.array([list(map(float, rfe.ranking_))]).T).T[0]")
select.addStatement("ranks = list(map(lambda x: round(x,2), ranks))")
select.addStatement("for item, rank in zip(df.columns,ranks):")
select.addStatement("if rank > 0.30:", indent=2)
select.addStatement("model_based_feat.append(item)", indent=3)
addDropFeature('target_feature', 'model_based_feat', select)
select.addStatement("selected_features['modelBased'] = model_based_feat")
select.addStatement(f"log.info(f'Highly Correlated Features : {{model_based_feat}}')")
if 'statisticalBased' in methods:
select.addStatement(f"log.info('Statistical Based Correlation Analysis Start')")
function.add_function('start_reducer',importer)
select.addStatement(f"features = start_reducer(df, target_feature, {config['corr_threshold']},{config['var_threshold']})")
select.addStatement("train_features = [x for x in features if x in train_features]")
select.addStatement("cat_features = [x for x in cat_features if x in features]")
select.addStatement("numeric_features = df[features].select_dtypes('number').columns.tolist()")
if config["problem_type"] == 'classification':
function.add_function('feature_importance_class')
select.addStatement(f"statistics_based_feat = feature_importance_class(df[features], numeric_features, cat_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})")
else:
function.add_function('feature_importance_reg')
select.addStatement(f"statistics_based_feat = feature_importance_reg(df[features], numeric_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})")
addDropFeature('target_feature', 'statistics_based_feat', select)
select.addStatement("selected_features['statisticalBased'] = statistics_based_feat")
select.addStatement(f"log.info('Highly Correlated Features : {{statistics_based_feat}}')")
select.addStatement("total_features = list(set([x for y in selected_features.values() for x in y] + [target_feature]))")
select.addStatement(f"df = df[total_features]")
select.addStatement("log.log_dataframe(df)")
select.addSuffixCode()
importModules(importer, select.getSuffixModules())
importModules(importer, select.getMainCodeModules())
select.addMainCode()
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('selector')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += select.getInputOutputFiles()
code += function.getCode()
select.addLocalFunctionsCode()
code += select.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_selector_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('selector', deploy_path,config['modelName'], generated_files) |
utility.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import datetime
from pathlib import Path
variables = {}
def init_variables():
global variables
variables = {}
def update_variable(name, value):
variables[name] = value
def get_variable(name, default=None):
return variables.get(name, default)
def append_variable(name, value):
data = get_variable(name)
if not data:
update_variable(name, [value])
elif not isinstance(data, list):
update_variable(name, [data, value])
else:
data.append(value)
update_variable(name, data)
def addDropFeature(feature, features_list, coder, indent=1):
coder.addStatement(f'if {feature} in {features_list}:', indent=indent)
coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1)
def importModules(importer, modules_list):
for module in modules_list:
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
def file_header(use_case, module_name=None):
time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ')
text = "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n"
return text + f"'''\nThis file is automatically generated by AION for {use_case} usecase.\nFile generation time: {time_str}\n'''"
def get_module_mapping(module):
mapping = {
"LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'}
,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'}
,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'}
,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'}
,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'}
,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'}
,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'}
,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'}
,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'}
,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'}
,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'}
,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'}
,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'}
,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'}
,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'}
,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'}
,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'}
,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'}
}
return mapping.get(module, None)
def create_docker_file(name, path,usecasename,files=[],text_feature=False):
text = ""
if name == 'load_data':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'transformer':
text='FROM python:3.8-slim-buster\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='''RUN \
'''
text+=''' pip install --no-cache-dir -r requirements.txt\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\
'''
text+='\n'
elif name == 'selector':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'train':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
text+='COPY requirements.txt requirements.txt'
text+='\n'
text+='COPY config.json config.json'
text+='\n'
text+='COPY aionCode.py aionCode.py'
text+='\n'
text+='COPY utility.py utility.py'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'register':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'Prediction':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='''RUN \
'''
text+='''pip install --no-cache-dir -r requirements.txt\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\
'''
text+='\n'
text+='ENTRYPOINT ["python", "aionCode.py","-ip","0.0.0.0","-pn","8094"]\n'
elif name == 'input_drift':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
file_name = Path(path)/'Dockerfile'
with open(file_name, 'w') as f:
f.write(text) |
drift_analysis.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
imported_modules = [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'math', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'mlflow', 'mod_from': None, 'mod_as': None},
{'module': 'sklearn', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_drift_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features","scoring_criteria","s3"]
data = {key:value for (key,value) in config.items() if key in param_keys}
usecase = config['modelName']
data['targetPath'] = usecase
if config['dataLocation'] != '':
data['inputUri'] = config['dataLocation']
else:
data['inputUri'] = '<input datalocation>'
data['prod_db_type'] = config.get('prod_db_type', 'sqlite')
data['db_config'] = config.get('db_config', {})
data['mlflow_config'] = config.get('mlflow_config', {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''})
return data
def run_drift_analysis(config):
init_variables()
importer = importModule()
function = global_function()
drifter = drift()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelMonitoring'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('drift')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
importer.addLocalModule('inputdrift', mod_from='input_drift')
code = file_header(usecase)
code += importer.getCode()
code += drifter.getInputOutputFiles()
code += function.getCode()
code += drifter.get_main_drift_code(config['problem_type'], get_variable('smaller_is_better', False))
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
input_drift_importer = importModule()
importModules(input_drift_importer, drifter.get_input_drift_import_modules())
code = file_header(usecase)
code += input_drift_importer.getCode()
code += drifter.get_input_drift_code()
with open(deploy_path/"input_drift.py", "w") as f:
f.write(code)
generated_files.append("input_drift.py")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_drift_params(config), f, indent=4)
generated_files.append("config.json")
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
f.write(importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer(), input_drift_importer]))
generated_files.append("requirements.txt")
create_docker_file('input_drift', deploy_path,config['modelName'], generated_files)
|
transformer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import shutil
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
import tarfile
output_file_map = {
'text' : {'text' : 'text_profiler.pkl'},
'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'},
'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'},
'normalizer' : {'normalizer' : 'normalizer.pkl'}
}
def add_common_imports(importer):
common_importes = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
def add_text_dependency():
return """nltk==3.6.3
textblob==0.15.3
spacy==3.1.3
demoji==1.1.0
bs4==0.0.1
text_unidecode==1.3
contractions==0.1.73
"""
def get_transformer_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio"] #Bugid 13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_transformer(config):
transformer = profiler()
importer = importModule()
function = global_function()
importModules(importer, transformer.getPrefixModules())
importer.addModule('warnings')
transformer.addPrefixCode()
importer.addModule('train_test_split', mod_from='sklearn.model_selection')
if config["problem_type"] == 'classification':
importer.addModule('LabelEncoder', mod_from='sklearn.preprocessing')
transformer.addInputFiles({'targetEncoder':'targetEncoder.pkl'})
update_variable('target_encoder', True)
transformer.addStatement("train_data, test_data = train_test_split(df,stratify=df[target_feature],test_size=config['test_ratio'])",indent=2) #Bugid 13217
transformer.addStatement("profilerObj = profiler(xtrain=train_data, target=target_feature, encode_target=True, config=config['profiler'],log=log)") #Bugid 13217
else:
transformer.addStatement("train_data, test_data = train_test_split(df,test_size=config['test_ratio'])",indent=2)
transformer.addStatement("profilerObj = profiler(xtrain=train_data, target=target_feature, config=config['profiler'],log=log)")
importModules(importer, transformer.getSuffixModules())
importModules(importer, transformer.getMainCodeModules())
transformer.addSuffixCode( config["problem_type"] == 'classification')
transformer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('transformer')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
# create the dataProfiler file
profiler_importer = importModule()
importer.addLocalModule('profiler', mod_from='dataProfiler')
profiler_obj = data_profiler(profiler_importer, True if config["text_features"] else False)
code_text = profiler_obj.get_code() # import statement will be generated when profiler_obj.get_code is called.
# need to copy data profiler from AION code as code is splitted and merging code amnnually
# can add bugs. need a better way to find the imported module
#aion_transformer = Path(__file__).parent.parent.parent.parent/'transformations'
aion_utilities = Path(__file__).parent.parent.parent.parent/'utilities' #added for non encryption --Usnish
(deploy_path/'transformations').mkdir(parents=True, exist_ok=True)
if not (aion_utilities/'transformations'/'dataProfiler.py').exists():
raise ValueError('Data profiler file removed from AION')
shutil.copy(aion_utilities/'transformations'/'dataProfiler.py',deploy_path/"dataProfiler.py")
shutil.copy(aion_utilities/'transformations'/'data_profiler_functions.py',deploy_path/"transformations"/"data_profiler_functions.py")
if (deploy_path/'text').exists():
shutil.rmtree(deploy_path/'text')
with tarfile.open(aion_utilities/'text.tar') as file:
file.extractall(deploy_path)
if (deploy_path/'utils').exists():
shutil.rmtree(deploy_path/'utils')
with tarfile.open(aion_utilities / 'utils.tar') as file:
file.extractall(deploy_path)
generated_files.append("dataProfiler.py")
generated_files.append("transformations")
generated_files.append("text")
generated_files.append("utils")
code = file_header(usecase)
code += "\nimport os\nos.path.abspath(os.path.join(__file__, os.pardir))\n" #chdir to import from current dir
code += importer.getCode()
code += '\nwarnings.filterwarnings("ignore")\n'
code += transformer.getInputOutputFiles()
code += function.getCode()
transformer.addLocalFunctionsCode()
code += transformer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), profiler_importer])
if config["text_features"]:
req += add_text_dependency()
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_transformer_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('transformer', deploy_path,config['modelName'], generated_files,True if config["text_features"] else False)
|
register.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def get_register_params(config, models):
param_keys = ["modelVersion","problem_type"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['models'] = models
return data
def run_register(config):
importer = importModule()
function = global_function()
registration = register(importer)
function.add_function('get_mlflow_uris')
models = get_variable('models_name')
smaller_is_better = get_variable('smaller_is_better', False)
registration.addClassCode(smaller_is_better)
registration.addLocalFunctionsCode(models)
registration.addPrefixCode()
registration.addMainCode(models)
importModules(importer, registration.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('register')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = registration.getImportCode()
code += '\nwarnings.filterwarnings("ignore")\n'
code += registration.getInputOutputFiles()
code += function.getCode()
code += registration.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_register_params(config, models), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('register', deploy_path,config['modelName'], generated_files) |
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .load_data import run_loader
from .transformer import run_transformer
from .selector import run_selector
from .trainer import run_trainer
from .register import run_register
from .deploy import run_deploy
from .drift_analysis import run_drift_analysis
|
load_data.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.ml.core import *
from .utility import *
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_load_data_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_loader(config):
generated_files = []
importer = importModule()
loader = tabularDataReader()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += loader.getInputOutputFiles()
code += loader.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_load_data_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('load_data', deploy_path,config['modelName'],generated_files) |
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
|
input_drift.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class input_drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def addInputDriftClass(self):
text = "\
\nclass inputdrift():\
\n\
\n def __init__(self,base_config):\
\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\
\n self.currentDataLocation = base_config['currentDataLocation']\
\n home = Path.home()\
\n if platform.system() == 'Windows':\
\n from pathlib import WindowsPath\
\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\
\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\
\n else:\
\n from pathlib import PosixPath\
\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\
\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\
\n if not output_model_dir.exists():\
\n raise ValueError(f'Configuration file not found at {output_model_dir}')\
\n\
\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\
\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\
\n mlflow.set_tracking_uri(tracking_uri)\
\n mlflow.set_registry_uri(registry_uri)\
\n client = mlflow.tracking.MlflowClient(\
\n tracking_uri=tracking_uri,\
\n registry_uri=registry_uri,\
\n )\
\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\
\n model = mlflow.pyfunc.load_model(model_version_uri)\
\n run = client.get_run(model.metadata.run_id)\
\n if run.info.artifact_uri.startswith('file:'):\
\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\
\n else:\
\n artifact_path = Path(run.info.artifact_uri)\
\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\
\n\
\n def get_input_drift(self,current_data, historical_data):\
\n curr_num_feat = current_data.select_dtypes(include='number')\
\n hist_num_feat = historical_data.select_dtypes(include='number')\
\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\
\n alert_count = 0\
\n data = {\
\n 'current':{'data':current_data},\
\n 'hist': {'data': historical_data}\
\n }\
\n dist_changed_columns = []\
\n dist_change_message = []\
\n for feature in num_features:\
\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\
\n if (curr_static_value < 0.05):\
\n distribution = {}\
\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\
\n distribution['curr'] = self.DistributionFinder( current_data[feature])\
\n if(distribution['hist']['name'] == distribution['curr']['name']):\
\n pass\
\n else:\
\n alert_count = alert_count + 1\
\n dist_changed_columns.append(feature)\
\n changed_column = {}\
\n changed_column['Feature'] = feature\
\n changed_column['KS_Training'] = curr_static_value\
\n changed_column['Training_Distribution'] = distribution['hist']['name']\
\n changed_column['New_Distribution'] = distribution['curr']['name']\
\n dist_change_message.append(changed_column)\
\n if alert_count:\
\n resultStatus = dist_change_message\
\n else :\
\n resultStatus='Model is working as expected'\
\n return(alert_count, resultStatus)\
\n\
\n def DistributionFinder(self,data):\
\n best_distribution =''\
\n best_sse =0.0\
\n if(data.dtype in ['int','int64']):\
\n distributions= {'bernoulli':{'algo':st.bernoulli},\
\n 'binom':{'algo':st.binom},\
\n 'geom':{'algo':st.geom},\
\n 'nbinom':{'algo':st.nbinom},\
\n 'poisson':{'algo':st.poisson}\
\n }\
\n index, counts = np.unique(data.astype(int),return_counts=True)\
\n if(len(index)>=2):\
\n best_sse = np.inf\
\n y1=[]\
\n total=sum(counts)\
\n mean=float(sum(index*counts))/total\
\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\
\n dispersion=mean/float(variance)\
\n theta=1/float(dispersion)\
\n r=mean*(float(theta)/1-theta)\
\n\
\n for j in counts:\
\n y1.append(float(j)/total)\
\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\
\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\
\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\
\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\
\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\
\n\
\n sselist = []\
\n for dist in distributions.keys():\
\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\
\n if np.isnan(distributions[dist]['sess']):\
\n distributions[dist]['sess'] = float('inf')\
\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\
\n best_distribution = best_dist\
\n best_sse = distributions[best_dist]['sess']\
\n\
\n elif (len(index) == 1):\
\n best_distribution = 'Constant Data-No Distribution'\
\n best_sse = 0.0\
\n elif(data.dtype in ['float64','float32']):\
\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\
\n best_distribution = st.norm.name\
\n best_sse = np.inf\
\n nrange = data.max() - data.min()\
\n\
\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\
\n x = (x + np.roll(x, -1))[:-1] / 2.0\
\n\
\n for distribution in distributions:\
\n with warnings.catch_warnings():\
\n warnings.filterwarnings('ignore')\
\n params = distribution.fit(data.astype(float))\
\n arg = params[:-2]\
\n loc = params[-2]\
\n scale = params[-1]\
\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\
\n sse = np.sum(np.power(y - pdf, 2.0))\
\n if( sse < best_sse):\
\n best_distribution = distribution.name\
\n best_sse = sse\
\n\
\n return {'name':best_distribution, 'sse': best_sse}\
\n\
"
return text
def addSuffixCode(self, indent=1):
text ="\n\
\ndef check_drift( config):\
\n inputdriftObj = inputdrift(config)\
\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\
\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\
\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\
\n if message == 'Model is working as expected':\
\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\
\n else:\
\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\
\n return(output_json)\
\n\
\nif __name__ == '__main__':\
\n try:\
\n if len(sys.argv) < 2:\
\n raise ValueError('config file not present')\
\n config = sys.argv[1]\
\n if Path(config).is_file() and Path(config).suffix == '.json':\
\n with open(config, 'r') as f:\
\n config = json.load(f)\
\n else:\
\n config = json.loads(config)\
\n output = check_drift(config)\
\n status = {'Status':'Success','Message':output}\
\n print('input_drift:'+json.dumps(status))\
\n except Exception as e:\
\n status = {'Status':'Failure','Message':str(e)}\
\n print('input_drift:'+json.dumps(status))"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addInputDriftClass()
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
|
output_drift.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class output_drift():
def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
self.missing = missing
self.word2num_features = word2num_features
self.cat_encoder = cat_encoder
self.target_encoder = target_encoder
self.normalizer = normalizer
self.text_profiler = text_profiler
self.feature_reducer = feature_reducer
self.score_smaller_is_better = score_smaller_is_better
self.problem_type = problem_type
def addDatabaseClass(self, indent=0):
text = "\
\nclass database():\
\n def __init__(self, config):\
\n self.host = config['host']\
\n self.port = config['port']\
\n self.user = config['user']\
\n self.password = config['password']\
\n self.database = config['database']\
\n self.measurement = config['measurement']\
\n self.tags = config['tags']\
\n self.client = self.get_client()\
\n\
\n def read_data(self, query)->pd.DataFrame:\
\n cursor = self.client.query(query)\
\n points = cursor.get_points()\
\n my_list=list(points)\
\n df=pd.DataFrame(my_list)\
\n return df\
\n\
\n def get_client(self):\
\n client = InfluxDBClient(self.host,self.port,self.user,self.password)\
\n databases = client.get_list_database()\
\n databases = [x['name'] for x in databases]\
\n if self.database not in databases:\
\n client.create_database(self.database)\
\n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\
\n\
\n def write_data(self,data):\
\n if isinstance(data, pd.DataFrame):\
\n sorted_col = data.columns.tolist()\
\n sorted_col.sort()\
\n data = data[sorted_col]\
\n data = data.to_dict(orient='records')\
\n for row in data:\
\n if 'time' in row.keys():\
\n p = '%Y-%m-%dT%H:%M:%S.%fZ'\
\n time_str = datetime.strptime(row['time'], p)\
\n del row['time']\
\n else:\
\n time_str = None\
\n if 'model_ver' in row.keys():\
\n self.tags['model_ver']= row['model_ver']\
\n del row['model_ver']\
\n json_body = [{\
\n 'measurement': self.measurement,\
\n 'time': time_str,\
\n 'tags': self.tags,\
\n 'fields': row\
\n }]\
\n self.client.write_points(json_body)\
\n\
\n def close(self):\
\n self.client.close()\
\n"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def addPredictClass(self, indent=0):
text = "\
\nclass predict():\
\n\
\n def __init__(self, base_config):\
\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\
\n self.dataLocation = base_config['dataLocation']\
\n self.db_enabled = base_config.get('db_enabled', False)\
\n if self.db_enabled:\
\n self.db_config = base_config['db_config']\
\n home = Path.home()\
\n if platform.system() == 'Windows':\
\n from pathlib import WindowsPath\
\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\
\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\
\n else:\
\n from pathlib import PosixPath\
\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\
\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\
\n if not output_model_dir.exists():\
\n raise ValueError(f'Configuration file not found at {output_model_dir}')\
\n\
\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\
\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\
\n mlflow.set_tracking_uri(tracking_uri)\
\n mlflow.set_registry_uri(registry_uri)\
\n client = mlflow.tracking.MlflowClient(\
\n tracking_uri=tracking_uri,\
\n registry_uri=registry_uri,\
\n )\
\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\
\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\
\n self.model = mlflow.pyfunc.load_model(model_version_uri)\
\n run = client.get_run(self.model.metadata.run_id)\
\n if run.info.artifact_uri.startswith('file:'): #remove file:///\
\n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\
\n else:\
\n self.artifact_path = Path(run.info.artifact_uri)\
\n with open(self.artifact_path/'deploy.json', 'r') as f:\
\n deployment_dict = json.load(f)\
\n with open(self.artifact_path/'features.txt', 'r') as f:\
\n self.train_features = f.readline().rstrip().split(',')\
\n\
\n self.dataLocation = base_config['dataLocation']\
\n self.selected_features = deployment_dict['load_data']['selected_features']\
\n self.target_feature = deployment_dict['load_data']['target_feature']\
\n self.output_model_dir = output_model_dir"
if self.missing:
text += "\n self.missing_values = deployment_dict['transformation']['fillna']"
if self.word2num_features:
text += "\n self.word2num_features = deployment_dict['transformation']['word2num_features']"
if self.cat_encoder == 'labelencoding':
text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']"
elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'):
text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']"
text += "\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']"
if self.target_encoder:
text += "\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])"
if self.normalizer:
text += "\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\
\n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']"
if self.text_profiler:
text += "\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\
\n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']"
if self.feature_reducer:
text += "\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\
\n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']"
text += """
def read_data_from_db(self):
if self.db_enabled:
try:
db = database(self.db_config)
query = "SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''".format(db.measurement, self.model_version, self.target_feature)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
data = db.read_data(query)
except:
raise ValueError('Unable to read from the database')
finally:
if db:
db.close()
return data
return None"""
text += "\
\n def predict(self, data):\
\n df = pd.DataFrame()\
\n if Path(data).exists():\
\n if Path(data).suffix == '.tsv':\
\n df=read_data(data,encoding='utf-8',sep='\t')\
\n elif Path(data).suffix == '.csv':\
\n df=read_data(data,encoding='utf-8')\
\n else:\
\n if Path(data).suffix == '.json':\
\n jsonData = read_json(data)\
\n df = pd.json_normalize(jsonData)\
\n elif is_file_name_url(data):\
\n df = read_data(data,encoding='utf-8')\
\n else:\
\n jsonData = json.loads(data)\
\n df = pd.json_normalize(jsonData)\
\n if len(df) == 0:\
\n raise ValueError('No data record found')\
\n missing_features = [x for x in self.selected_features if x not in df.columns]\
\n if missing_features:\
\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\
\n if self.target_feature not in df.columns:\
\n raise ValueError(f'Ground truth values/target column({self.target_feature}) not found in current data')\
\n df_copy = df.copy()\
\n df = df[self.selected_features]"
if self.word2num_features:
text += "\n for feat in self.word2num_features:"
text += "\n df[ feat ] = df[feat].apply(lambda x: s2n(x))"
if self.missing:
text += "\n df.fillna(self.missing_values, inplace=True)"
if self.cat_encoder == 'labelencoding':
text += "\n df.replace(self.cat_encoder, inplace=True)"
elif self.cat_encoder == 'targetencoding':
text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\n df = cat_enc.transform(df)"
elif self.cat_encoder == 'onehotencoding':
text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()"
text += "\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]"
if self.normalizer:
text += "\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])"
if self.text_profiler:
text += "\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\
\n df_vect=self.text_profiler.transform(text_corpus)\
\n if isinstance(df_vect, np.ndarray):\
\n df1 = pd.DataFrame(df_vect)\
\n else:\
\n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\
\n df1 = df1.add_suffix('_vect')\
\n df = pd.concat([df, df1],axis=1)"
if self.feature_reducer:
text += "\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"
else:
text += "\n df = df[self.train_features]"
if self.target_encoder:
text += "\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\
\n df_copy['prediction'] = output.idxmax(axis=1)"
else:
text += "\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\
\n df_copy['prediction'] = output"
text += "\n return df_copy"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def getClassificationMatrixCode(self, indent=0):
text = "\
\ndef get_classification_metrices(actual_values, predicted_values):\
\n result = {}\
\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\
\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\
\n average='macro')\
\n\
\n result['accuracy'] = accuracy_score\
\n result['precision'] = avg_precision\
\n result['recall'] = avg_recall\
\n result['f1'] = avg_f1\
\n return result\
\n\
"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def getRegrssionMatrixCode(self, indent=0):
text = "\
\ndef get_regression_metrices( actual_values, predicted_values):\
\n result = {}\
\n\
\n me = np.mean(predicted_values - actual_values)\
\n sde = np.std(predicted_values - actual_values, ddof = 1)\
\n\
\n abs_err = np.abs(predicted_values - actual_values)\
\n mae = np.mean(abs_err)\
\n sdae = np.std(abs_err, ddof = 1)\
\n\
\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\
\n mape = np.mean(abs_perc_err)\
\n sdape = np.std(abs_perc_err, ddof = 1)\
\n\
\n result['mean_error'] = me\
\n result['mean_abs_error'] = mae\
\n result['mean_abs_perc_error'] = mape\
\n result['error_std'] = sde\
\n result['abs_error_std'] = sdae\
\n result['abs_perc_error_std'] = sdape\
\n return result\
\n\
"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def addSuffixCode(self, indent=1):
text ="\n\
\ndef check_drift( config):\
\n prediction = predict(config)\
\n usecase = config['modelName'] + '_' + config['modelVersion']\
\n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\
\n if not train_data_path.exists():\
\n raise ValueError(f'Training data not found at {train_data_path}')\
\n curr_with_pred = prediction.read_data_from_db()\
\n if prediction.target_feature not in curr_with_pred.columns:\
\n raise ValueError('Ground truth not updated for corresponding data in database')\
\n train_with_pred = prediction.predict(train_data_path)\
\n performance = {}"
if self.problem_type == 'classification':
text += "\n\
\n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\
\n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
else:
text += "\n\
\n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\
\n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
text += "\n return performance"
text += "\n\
\nif __name__ == '__main__':\
\n try:\
\n if len(sys.argv) < 2:\
\n raise ValueError('config file not present')\
\n config = sys.argv[1]\
\n if Path(config).is_file() and Path(config).suffix == '.json':\
\n with open(config, 'r') as f:\
\n config = json.load(f)\
\n else:\
\n config = json.loads(config)\
\n output = check_drift(config)\
\n status = {'Status':'Success','Message':json.loads(output)}\
\n print('output_drift:'+json.dumps(status))\
\n except Exception as e:\
\n status = {'Status':'Failure','Message':str(e)}\
\n print('output_drift:'+json.dumps(status))"
if indent:
text = text.replace('\n', (self.tab * indent) + '\n')
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addDatabaseClass()
self.codeText += self.addPredictClass()
if self.problem_type == 'classification':
self.codeText += self.getClassificationMatrixCode()
elif self.problem_type == 'regression':
self.codeText += self.getRegrssionMatrixCode()
else:
raise ValueError(f"Unsupported problem type: {self.problem_type}")
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
|
deploy.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class deploy():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.input_files = {}
self.output_files = {}
self.addInputFiles({'metaData' : 'modelMetaData.json','log':'predict.log'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
text += '\n'
text += self.getOutputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def addStatement(self, statement, indent=1):
pass
def getPredictionCodeModules(self):
modules = [{'module':'json'}
,{'module':'joblib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'json_normalize', 'mod_from':'pandas'}
,{'module':'load_model', 'mod_from':'tensorflow.keras.models'}
]
return modules
def addPredictionCode(self):
self.codeText += """
class deploy():
def __init__(self, base_config, log=None):
self.targetPath = (Path('aion') / base_config['targetPath']).resolve()
if log:
self.logger = log
else:
log_file = self.targetPath / IOFiles['log']
self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
self.initialize(base_config)
except Exception as e:
self.logger.error(e, exc_info=True)
def initialize(self, base_config):
targetPath = Path('aion') / base_config['targetPath']
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
self.dateTimeFeature = meta_data['training']['dateTimeFeature']
self.targetFeature = meta_data['training']['target_feature']
normalization_file = meta_data['transformation']['Status']['Normalization_file']
self.normalizer = joblib.load(normalization_file)
self.lag_order = base_config['lag_order']
self.noofforecasts = base_config['noofforecasts']
run_id = str(meta_data['register']['runId'])
model_path = str(targetPath/'runs'/str(meta_data['register']['runId'])/meta_data['register']['model']/'model')
self.model = load_model(model_path)
self.model_name = meta_data['register']['model']
def predict(self, data=None):
try:
return self.__predict(data)
except Exception as e:
if self.logger:
self.logger.error(e, exc_info=True)
raise ValueError(json.dumps({'Status': 'Failure', 'Message': str(e)}))
def __predict(self, data=None):
jsonData = json.loads(data)
dataFrame = json_normalize(jsonData)
xtrain = dataFrame
if len(dataFrame) == 0:
raise ValueError('No data record found')
df_l = len(dataFrame)
pred_threshold = 0.1
max_pred_by_user = round((df_l) * pred_threshold)
# prediction for 24 steps or next 24 hours
if self.noofforecasts == -1:
self.noofforecasts = max_pred_by_user
no_of_prediction = self.noofforecasts
if (str(no_of_prediction) > str(max_pred_by_user)):
no_of_prediction = max_pred_by_user
noofforecasts = no_of_prediction
# self.sfeatures.remove(self.datetimeFeature)
features = self.targetFeature
if len(features) == 1:
xt = xtrain[features].values
else:
xt = xtrain[features].values
xt = xt.astype('float32')
xt = self.normalizer.transform(xt)
pred_data = xt
y_future = []
self.lag_order = int(self.lag_order)
for i in range(int(no_of_prediction)):
pdata = pred_data[-self.lag_order:]
if len(features) == 1:
pdata = pdata.reshape((1, self.lag_order))
else:
pdata = pdata.reshape((1, self.lag_order, len(features)))
if (len(features) > 1):
pred = self.model.predict(pdata)
predout = self.normalizer.inverse_transform(pred)
y_future.append(predout)
pred_data = np.append(pred_data, pred, axis=0)
else:
pred = self.model.predict(pdata)
predout = self.normalizer.inverse_transform(pred)
y_future.append(predout.flatten()[-1])
pred_data = np.append(pred_data, pred)
pred = pd.DataFrame(index=range(0, len(y_future)), columns=self.targetFeature)
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
predictions = pred
forecast_output = predictions.to_json(orient='records')
return forecast_output
"""
def getCode(self):
return self.codeText
def getServiceCode(self):
return """
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import os
from os.path import expanduser
import platform
import threading
import subprocess
import argparse
import re
import cgi
import json
import shutil
import logging
import sys
import time
import seaborn as sns
from pathlib import Path
from predict import deploy
import pandas as pd
import scipy.stats as st
import numpy as np
import warnings
from utility import *
warnings.filterwarnings("ignore")
config_input = None
IOFiles = {
"inputData": "rawData.dat",
"metaData": "modelMetaData.json",
"production": "production.json",
"log": "aion.log",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
def DistributionFinder(data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64" or data.dtype == "float32"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= 2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(message)
return distributionName, sse
def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()):
import matplotlib.pyplot as plt
import math
import io, base64, urllib
np.seterr(divide='ignore', invalid='ignore')
try:
plt.clf()
except:
pass
plt.rcParams.update({'figure.max_open_warning': 0})
sns.set(color_codes=True)
pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
if len(feature) > 4:
numneroffeatures = len(feature)
plt.figure(figsize=(10, numneroffeatures*2))
else:
plt.figure(figsize=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
dataframe[i[1]] = dataframe[i[1]].astype(int)
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0])
else:
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean())
plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1)
plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1)
distname, sse = DistributionFinder(dataframe[i[1]])
print(distname)
ax = sns.distplot(dataframe[i[1]], label=distname)
ax.legend(loc='best')
if newdataframe.empty == False:
dataType = newdataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]])
newdataframe[i[1]] = newdataframe[i[1]].cat.codes
newdataframe[i[1]] = newdataframe[i[1]].astype(int)
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0])
else:
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean())
distname, sse = DistributionFinder(newdataframe[i[1]])
print(distname)
ax = sns.distplot(newdataframe[i[1]],label=distname)
ax.legend(loc='best')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
usecase = self.path.split('/')[-2]
if usecase.lower() == config_input['targetPath'].lower():
operation = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
if operation.lower() == 'predict':
output=deployobj.predict(dataStr)
resp = output
elif operation.lower() == 'groundtruth':
gtObj = groundtruth(config_input)
output = gtObj.actual(dataStr)
resp = output
elif operation.lower() == 'delete':
targetPath = Path('aion')/config_input['targetPath']
for file in data:
x = targetPath/file
if x.exists():
os.remove(x)
resp = json.dumps({'Status':'Success'})
else:
outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'})
resp = outputStr
resp=resp+'\\n'
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print('python ==> else1')
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print('PYTHON ######## REQUEST ####### ENDED')
return
def do_GET(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
usecase = self.path.split('/')[-2]
self.send_response(200)
self.targetPath = Path('aion')/config_input['targetPath']
meta_data_file = self.targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
production_file = self.targetPath/IOFiles['production']
if production_file.exists():
production_data = read_json(production_file)
else:
raise ValueError(f'Production Details not found: {production_file}')
operation = self.path.split('/')[-1]
if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'):
self.send_header('Content-Type', 'text/html')
self.end_headers()
ModelString = production_data['Model']
ModelPerformance = ModelString+'_performance.json'
performance_file = self.targetPath/ModelPerformance
if performance_file.exists():
performance_data = read_json(performance_file)
else:
raise ValueError(f'Production Details not found: {performance_data}')
Scoring_Creteria = performance_data['scoring_criteria']
train_score = round(performance_data['metrices']['train_score'],2)
test_score = round(performance_data['metrices']['test_score'],2)
current_score = 'NA'
monitoring = read_json(self.targetPath/IOFiles['monitoring'])
reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config'])
inputDatafile = self.targetPath/IOFiles['inputData']
NoOfPrediction = 0
NoOfGroundTruth = 0
inputdistribution = ''
if reader.file_exists(IOFiles['prodData']):
dfPredict = reader.read(IOFiles['prodData'])
dfinput = pd.read_csv(inputDatafile)
features = meta_data['training']['features']
inputdistribution = getDriftDistribution(features,dfinput,dfPredict)
NoOfPrediction = len(dfPredict)
if reader.file_exists(IOFiles['prodDataGT']):
dfGroundTruth = reader.read(IOFiles['prodDataGT'])
NoOfGroundTruth = len(dfGroundTruth)
common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()]
proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner')
if Scoring_Creteria.lower() == 'accuracy':
from sklearn.metrics import accuracy_score
current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction'])
current_score = round((current_score*100),2)
elif Scoring_Creteria.lower() == 'recall':
from sklearn.metrics import accuracy_score
current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro')
current_score = round((current_score*100),2)
msg = \"""<html>
<head>
<title>Performance Details</title>
</head>
<style>
table, th, td {border}
</style>
<body>
<h2><b>Deployed Model:</b>{ModelString}</h2>
<br/>
<table style="width:50%">
<tr>
<td>No of Prediction</td>
<td>{NoOfPrediction}</td>
</tr>
<tr>
<td>No of GroundTruth</td>
<td>{NoOfGroundTruth}</td>
</tr>
</table>
<br/>
<table style="width:100%">
<tr>
<th>Score Type</th>
<th>Train Score</th>
<th>Test Score</th>
<th>Production Score</th>
</tr>
<tr>
<td>{Scoring_Creteria}</td>
<td>{train_score}</td>
<td>{test_score}</td>
<td>{current_score}</td>
</tr>
</table>
<br/>
<br/>
<img src="data:image/png;base64,{newDataDrift}" alt="" >
</body>
</html>
\""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution)
elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'):
self.send_header('Content-Type', 'text/plain')
self.end_headers()
log_file = self.targetPath/IOFiles['log']
if log_file.exists():
with open(log_file) as f:
msg = f.read()
f.close()
else:
raise ValueError(f'Log Details not found: {log_file}')
else:
self.send_header('Content-Type', 'application/json')
self.end_headers()
features = meta_data['load_data']['selected_features']
bodydes='['
for x in features:
if bodydes != '[':
bodydes = bodydes+','
bodydes = bodydes+'{"'+x+'":"value"}'
bodydes+=']'
urltext = '/AION/'+config_input['targetPath']+'/predict'
urltextgth='/AION/'+config_input['targetPath']+'/groundtruth'
urltextproduction='/AION/'+config_input['targetPath']+'/metrices'
msg=\"""
Version:{modelversion}
RunNo: {runNo}
URL for Prediction
==================
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
URL for GroundTruth
===================
URL:{urltextgth}
RequestType: POST
Content-Type=application/json
Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work
URL for Model In Production Analysis
====================================
URL:{urltextproduction}
RequestType: GET
Content-Type=application/json
\""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class file_status():
def __init__(self, reload_function, params, file, logger):
self.files_status = {}
self.initializeFileStatus(file)
self.reload_function = reload_function
self.params = params
self.logger = logger
def initializeFileStatus(self, file):
self.files_status = {'path': file, 'time':file.stat().st_mtime}
def is_file_changed(self):
if self.files_status['path'].stat().st_mtime > self.files_status['time']:
self.files_status['time'] = self.files_status['path'].stat().st_mtime
return True
return False
def run(self):
global config_input
while( True):
time.sleep(30)
if self.is_file_changed():
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config_input = read_json(config_file)
config_input['deployedModel'] = productionmodel['Model']
config_input['deployedRunNo'] = productionmodel['runNo']
self.logger.info('Model changed Reloading.....')
self.logger.info(f'Model: {config_input["deployedModel"]}')
self.logger.info(f'Version: {str(config_input["modelVersion"])}')
self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}')
self.reload_function(config_input)
class SimpleHttpServer():
def __init__(self, ip, port, model_file_path,reload_function,params, logger):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
self.status_checker = file_status( reload_function, params, model_file_path, logger)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.status_thread = threading.Thread(target=self.status_checker.run)
self.status_thread.start()
def waitForThread(self):
self.server_thread.join()
self.status_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('-ip','--ipAddress', help='HTTP Server IP')
parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
if args.ipAddress:
config['ipAddress'] = args.ipAddress
if args.portNo:
config['portNo'] = args.portNo
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config['deployedModel'] = productionmodel['Model']
config['deployedRunNo'] = productionmodel['runNo']
#server = SimpleHttpServer(config['ipAddress'],int(config['portNo']))
config_input = config
logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger(Path(__file__).parent.name)
deployobj = deploy(config_input, logger)
server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger)
logger.info('HTTP Server Running...........')
logger.info(f"IP Address: {config['ipAddress']}")
logger.info(f"Port No.: {config['portNo']}")
print('HTTP Server Running...........')
print('For Prediction')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/predict')
print('\\nFor GroundTruth')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/groundtruth')
print('\\nFor Help')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/help')
print('\\nFor Model In Production Analysis')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/metrices')
server.start()
server.waitForThread()
""" |
trainer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class learner():
def __init__(self, problem_type="classification", target_feature="", sample_method=None,indent=0, tab_size=4):
self.tab = " "*tab_size
self.df_name = 'df'
self.problem_type = problem_type
self.target_feature = target_feature
self.search_space = []
self.codeText = f"\ndef train(log):"
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'featureEngineeredData.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = utils.read_json(config_file)\
\n return config"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\n' + self.codeText
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += "\
"
def getSuffixModules(self):
modules = []
return modules
def addSuffixCode(self, indent=1):
self.codeText += "\n\
"
def getMainCodeModules(self):
modules = [{'module':'logging'}
]
return modules
def getMlpCodeModules(self):
modules = [{'module':'math'}
,{'module':'json'}
,{'module':'joblib'}
,{'module':'keras_tuner'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'r2_score', 'mod_from':'sklearn.metrics'}
,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'}
,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'}
,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'}
,{'module':'Sequential', 'mod_from':'tensorflow.keras'}
,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'}
]
return modules
def addMlpCode(self):
self.codeText = """
def getdlparams(config):
for k, v in config.items():
if (k == "activation"):
activation_fn = str(v)
elif (k == "optimizer"):
optimizer = str(v)
elif (k == "loss"):
loss_fn = str(v)
elif (k == "first_layer"):
if not isinstance(k, list):
first_layer = str(v).split(',')
else:
first_layer = k
elif (k == "lag_order"):
lag_order = int(v)
elif (k == "hidden_layers"):
hidden_layers = int(v)
elif (k == "dropout"):
if not isinstance(k, list):
dropout = str(v).split(',')
else:
dropout = k
elif (k == "batch_size"):
batch_size = int(v)
elif (k == "epochs"):
epochs = int(v)
elif (k == "model_name"):
model_name = str(v)
return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs, model_name
def numpydf(dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
subset = dataset[i:(i + look_back), 0]
dataX.append(subset)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def startTraining(dataset,train_size,mlpConfig,filename_scaler,target_feature,scoreParam,log):
log.info('Training started')
activation_fn, optimizer, loss_fn, first_layer, hidden_layers, look_back, dropout, batch_size, epochs, model_name = getdlparams(mlpConfig)
hp = keras_tuner.HyperParameters()
first_layer_min = round(int(first_layer[0]))
first_layer_max = round(int(first_layer[1]))
dropout_min = float(dropout[0])
dropout_max = float(dropout[1])
dataset = dataset.values
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
trainX, trainY = numpydf(train, look_back)
testX, testY = numpydf(test, look_back)
# create and fit Multilayer Perceptron model
model = Sequential()
model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), input_dim=look_back, activation=activation_fn)) #BUGID 13484
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) #BUGID 13484
model.add(Dense(1, activation='sigmoid'))
model.compile(loss=loss_fn, optimizer=optimizer)
model_fit = model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, verbose=2)
# Estimate model performance
trainScore = model.evaluate(trainX, trainY, verbose=0)
testScore = model.evaluate(testX, testY, verbose=0)
# Scoring values for the model
mse_eval = testScore
rmse_eval = math.sqrt(testScore)
# generate predictions for training
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
scaler = joblib.load(filename_scaler)
trainY = scaler.inverse_transform([trainY])
trainPredict = scaler.inverse_transform(trainPredict)
## For test data
testY = scaler.inverse_transform([testY])
testPredict = scaler.inverse_transform(testPredict)
mse_mlp = mean_squared_error(testY.T, testPredict)
scores = {}
r2 = round(r2_score(testY.T, testPredict), 2)
scores['R2'] = r2
mae = round(mean_absolute_error(testY.T, testPredict), 2)
scores['MAE'] = mae
scores['MSE'] = round(mse_mlp, 2)
rmse = round(math.sqrt(mse_mlp), 2)
scores['RMSE'] = rmse
scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE'])
log.info("mlp rmse: "+str(rmse))
log.info("mlp mse: "+str(round(mse_mlp, 2)))
log.info("mlp r2: "+str(r2))
log.info("mlp mae: "+str(mae))
return model, look_back, scaler,testScore,trainScore,scores
def train(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
status = dict()
usecase = config['targetPath']
df = utils.read_data(dataLoc)
target_feature = config['target_feature']
dateTimeFeature= config['dateTimeFeature']
df.set_index(dateTimeFeature, inplace=True)
train_size = int(len(df) * (1-config['test_ratio'])) #BugID:13217
mlpConfig = config['algorithms']['MLP']
filename = meta_data['transformation']['Status']['Normalization_file']
scoreParam = config['scoring_criteria']
log.info('Training MLP for TimeSeries')
mlp_model, look_back, scaler,testScore,trainScore, error_matrix = startTraining(df,train_size,mlpConfig,filename,target_feature,scoreParam,log)
score = error_matrix[scoreParam]
# Training model
model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name
model_file_name = str(model_path/'model')
mlp_model.save(model_file_name)
meta_data['training'] = {}
meta_data['training']['model_filename'] = model_file_name
meta_data['training']['dateTimeFeature'] = dateTimeFeature
meta_data['training']['target_feature'] = target_feature
utils.write_json(meta_data, targetPath / IOFiles['metaData'])
utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics'])
# return status
status = {'Status': 'Success', 'errorMatrix': error_matrix, 'test_score':testScore, 'train_score': trainScore,'score':error_matrix[scoreParam]}
log.info(f'Test score: {testScore}')
log.info(f'Train score: {trainScore}')
log.info(f'output: {status}')
return json.dumps(status)
"""
def getLstmCodeModules(self):
modules = [{'module':'math'}
,{'module':'json'}
,{'module':'joblib'}
,{'module':'keras_tuner'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'r2_score', 'mod_from':'sklearn.metrics'}
,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'}
,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'}
,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'}
,{'module':'Sequential', 'mod_from':'tensorflow.keras'}
,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'}
,{'module':'LSTM', 'mod_from':'tensorflow.keras.layers'}
,{'module':'TimeseriesGenerator', 'mod_from':'tensorflow.keras.preprocessing.sequence'}
,{'module':'train_test_split', 'mod_from':'sklearn.model_selection'}
]
return modules
def addLstmCode(self):
self.codeText = """
def getdlparams(config):
for k, v in config.items():
if (k == "activation"):
activation_fn = str(v)
elif (k == "optimizer"):
optimizer = str(v)
elif (k == "loss"):
loss_fn = str(v)
elif (k == "first_layer"):
if not isinstance(k, list):
first_layer = str(v).split(',')
else:
first_layer = k
elif (k == "lag_order"):
lag_order = int(v)
elif (k == "hidden_layers"):
hidden_layers = int(v)
elif (k == "dropout"):
if not isinstance(k, list):
dropout = str(v).split(',')
else:
dropout = k
elif (k == "batch_size"):
batch_size = int(v)
elif (k == "epochs"):
epochs = int(v)
return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs
def numpydf(dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
subset = dataset[i:(i + look_back), 0]
dataX.append(subset)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def startTraining(dataset,test_size,mlpConfig,filename_scaler,target_feature,scoreParam,log):
log.info('Training started')
activation_fn, optimizer, loss_fn, first_layer, look_back,hidden_layers, dropout, batch_size, epochs= getdlparams(mlpConfig)
n_features = len(target_feature)
n_input = look_back
hp = keras_tuner.HyperParameters()
first_layer_min = round(int(first_layer[0]))
first_layer_max = round(int(first_layer[1]))
dropout_min = float(dropout[0])
dropout_max = float(dropout[1])
dataset = dataset[target_feature]
dataset_np = dataset.values
train, test = train_test_split(dataset_np, test_size=test_size, shuffle=False)
generatorTrain = TimeseriesGenerator(train, train, length=n_input, batch_size=8)
generatorTest = TimeseriesGenerator(test, test, length=n_input, batch_size=8)
batch_0 = generatorTrain[0]
x, y = batch_0
epochs = int(epochs)
##Multivariate LSTM model
model = Sequential()
model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), activation=activation_fn, input_shape=(n_input, n_features)))
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1)))
model.add(Dense(n_features))
model.compile(optimizer=optimizer, loss=loss_fn)
# model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False)
model.fit_generator(generatorTrain, steps_per_epoch=1, epochs=epochs, shuffle=False, verbose=0)
# lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0)
predictions = []
future_pred_len = n_input
# To get values for prediction,taking look_back steps of rows
first_batch = train[-future_pred_len:]
c_batch = first_batch.reshape((1, future_pred_len, n_features))
current_pred = None
for i in range(len(test)):
# get pred for firstbatch
current_pred = model.predict(c_batch)[0]
predictions.append(current_pred)
# remove first val
c_batch_rmv_first = c_batch[:, 1:, :]
# update
c_batch = np.append(c_batch_rmv_first, [[current_pred]], axis=1)
## Prediction, inverse the minmax transform
scaler = joblib.load(filename_scaler)
prediction_actual = scaler.inverse_transform(predictions)
test_data_actual = scaler.inverse_transform(test)
mse = None
rmse = None
## Creating dataframe for actual,predictions
pred_cols = list()
for i in range(len(target_feature)):
pred_cols.append(target_feature[i] + '_pred')
predictions = pd.DataFrame(prediction_actual, columns=pred_cols)
actual = pd.DataFrame(test_data_actual, columns=target_feature)
actual.columns = [str(col) + '_actual' for col in dataset.columns]
df_predicted = pd.concat([actual, predictions], axis=1)
print("LSTM Multivariate prediction dataframe: \\n" + str(df_predicted))
# df_predicted.to_csv('mlp_prediction.csv')
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
target = target_feature
mse_dict = {}
rmse_dict = {}
mae_dict = {}
r2_dict = {}
lstm_var = 0
for name in target:
index = dataset.columns.get_loc(name)
mse = mean_squared_error(test_data_actual[:, index], prediction_actual[:, index])
mse_dict[name] = mse
rmse = sqrt(mse)
rmse_dict[name] = rmse
lstm_var = lstm_var + rmse
print("Name of the target feature: " + str(name))
print("RMSE of the target feature: " + str(rmse))
r2 = r2_score(test_data_actual[:, index], prediction_actual[:, index])
r2_dict[name] = r2
mae = mean_absolute_error(test_data_actual[:, index], prediction_actual[:, index])
mae_dict[name] = mae
## For VAR comparison, send last target mse and rmse from above dict
lstm_var = lstm_var / len(target)
select_msekey = list(mse_dict.keys())[-1]
l_mse = list(mse_dict.values())[-1]
select_rmsekey = list(rmse_dict.keys())[-1]
l_rmse = list(rmse_dict.values())[-1]
select_r2key = list(r2_dict.keys())[-1]
l_r2 = list(r2_dict.values())[-1]
select_maekey = list(mae_dict.keys())[-1]
l_mae = list(mae_dict.values())[-1]
log.info('Selected target feature of LSTM for best model selection: ' + str(select_rmsekey))
scores = {}
scores['R2'] = l_r2
scores['MAE'] = l_mae
scores['MSE'] = l_mse
scores['RMSE'] = l_rmse
scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE'])
log.info("lstm rmse: "+str(l_rmse))
log.info("lstm mse: "+str(l_mse))
log.info("lstm r2: "+str(l_r2))
log.info("lstm mae: "+str(l_mae))
return model,look_back,scaler, scores
def train(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
status = dict()
usecase = config['targetPath']
df = utils.read_data(dataLoc)
target_feature = config['target_feature']
dateTimeFeature= config['dateTimeFeature']
scoreParam = config['scoring_criteria']
testSize = config['test_ratio']
lstmConfig = config['algorithms']['LSTM']
filename = meta_data['transformation']['Status']['Normalization_file']
if (type(target_feature) is list):
pass
else:
target_feature = list(target_feature.split(","))
df.set_index(dateTimeFeature, inplace=True)
log.info('Training LSTM for TimeSeries')
mlp_model, look_back, scaler, error_matrix = startTraining(df,testSize,lstmConfig,filename,target_feature,scoreParam,log)
score = error_matrix[scoreParam]
log.info("LSTM Multivariant all scoring param results: "+str(error_matrix))
# Training model
model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name
model_file_name = str(model_path/'model')
mlp_model.save(model_file_name)
meta_data['training'] = {}
meta_data['training']['model_filename'] = model_file_name
meta_data['training']['dateTimeFeature'] = dateTimeFeature
meta_data['training']['target_feature'] = target_feature
utils.write_json(meta_data, targetPath / IOFiles['metaData'])
utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics'])
# return status
status = {'Status': 'Success', 'errorMatrix': error_matrix,'score':error_matrix[scoreParam]}
log.info(f'score: {error_matrix[scoreParam]}')
log.info(f'output: {status}')
return json.dumps(status)
"""
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(train(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def add_variable(self, name, value, indent=1):
if isinstance(value, str):
self.codeText += f"\n{self.tab * indent}{name} = '{value}'"
else:
self.codeText += f"\n{self.tab * indent}{name} = {value}"
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
|
selector.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class selector():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.pipe = 'pipe'
self.code_generated = False
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n return config"
return text
def addMainCode(self):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(featureSelector(config,targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\n' + self.codeText
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += """
def featureSelector(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
status = dict()
df = pd.read_csv(dataLoc)
log.log_dataframe(df)
csv_path = str(targetPath / IOFiles['outputData'])
write_data(df, csv_path, index=False)
status = {'Status': 'Success', 'dataFilePath': IOFiles['outputData']}
log.info(f'Selected data saved at {csv_path}')
meta_data['featureengineering'] = {}
meta_data['featureengineering']['Status'] = status
write_json(meta_data, str(targetPath / IOFiles['metaData']))
log.info(f'output: {status}')
return json.dumps(status)
"""
def getSuffixModules(self):
modules = []
return modules
def addSuffixCode(self, indent=1):
self.codeText += ""
def getMainCodeModules(self):
modules = [
{'module':'json'}
,{'module':'logging'}
]
return modules
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
def getPipe(self):
return self.pipe
|
utility.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
utility_functions = {
'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
}
#TODO convert read and write functions in to class functions
functions_code = {
'read_json':{'imports':[{'mod':'json'}],'code':"\n\
\ndef read_json(file_path):\
\n data = None\
\n with open(file_path,'r') as f:\
\n data = json.load(f)\
\n return data\
\n"},
'write_json':{'imports':[{'mod':'json'}],'code':"\n\
\ndef write_json(data, file_path):\
\n with open(file_path,'w') as f:\
\n json.dump(data, f)\
\n"},
'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\
\ndef read_data(file_path, encoding='utf-8', sep=','):\
\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\
\n"},
'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\
\ndef write_data(data, file_path, index=False):\
\n return data.to_csv(file_path, index=index)\
\n\
\n#Uncomment and change below code for google storage\
\n#from google.cloud import storage\
\n#def write_data(data, file_path, index=False):\
\n# file_name= file_path.name\
\n# data.to_csv('output_data.csv')\
\n# storage_client = storage.Client()\
\n# bucket = storage_client.bucket('aion_data')\
\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\
\n# return data\
\n"},
'is_file_name_url':{'imports':[],'code':"\n\
\ndef is_file_name_url(file_name):\
\n supported_urls_starts_with = ('gs://','https://','http://')\
\n return file_name.startswith(supported_urls_starts_with)\
\n"},
'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':"\n\
\nclass logger():\
\n #setup the logger\
\n def __init__(self, log_file, mode='w', logger_name=None):\
\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\
\n self.log = logging.getLogger(logger_name)\
\n\
\n #get logger\
\n def getLogger(self):\
\n return self.log\
\n\
\n def info(self, msg):\
\n self.log.info(msg)\
\n\
\n def error(self, msg, exc_info=False):\
\n self.log.error(msg,exc_info)\
\n\
\n # format and log dataframe\
\n def log_dataframe(self, df, rows=2, msg=None):\
\n buffer = io.StringIO()\
\n df.info(buf=buffer)\
\n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\
\n log_text += '\\n\\t'+str(df.head(rows)).replace('\\n','\\n\\t')\
\n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\
\n self.log.info(log_text)\
\n"},
}
class utility_function():
def __init__(self, module):
if module in utility_functions.keys():
self.module_name = module
else:
self.module_name = None
self.importer = importModule()
self.codeText = ""
def get_code(self):
code = ""
if self.module_name:
functions = utility_functions[self.module_name]
for function in functions:
self.codeText += self.get_function_code(function)
code = self.importer.getCode()
code += self.codeText
return code
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
if __name__ == '__main__':
obj = utility_function('load_data')
p = obj.get_utility_code()
print(p) |
drift_analysis.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class drift():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = utils.read_json(config_file)\
\n return config\
"
return text
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
def addPrefixCode(self, smaller_is_better=False, indent=1):
self.codeText += """
def monitoring(config, targetPath, log):
retrain = False
last_run_id = 0
retrain_threshold = config.get('retrainThreshold', 100)
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
if not meta_data.get('register', None):
log.info('Last time Pipeline not executed properly')
retrain = True
else:
last_run_id = meta_data['register']['runId']
df = utils.read_data(config['dataLocation'])
df_len = len(df)
if not meta_data['monitoring'].get('endIndex', None):
meta_data['monitoring']['endIndex'] = int(meta_data['load_data']['Status']['Records'])
meta_data['monitoring']['endIndexTemp'] = meta_data['monitoring']['endIndex']
if meta_data['register'].get('registered', False):
meta_data['monitoring']['endIndex'] = meta_data['monitoring']['endIndexTemp']
meta_data['register']['registered'] = False #ack registery
if (meta_data['monitoring']['endIndex'] + retrain_threshold) < df_len:
meta_data['monitoring']['endIndexTemp'] = df_len
retrain = True
else:
log.info('Pipeline running first time')
meta_data = {}
meta_data['monitoring'] = {}
retrain = True
if retrain:
meta_data['monitoring']['runId'] = last_run_id + 1
meta_data['monitoring']['retrain'] = retrain
utils.write_json(meta_data, targetPath/IOFiles['metaData'])
status = {'Status':'Success','retrain': retrain, 'runId':meta_data['monitoring']['runId']}
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas','mod_as':'pd'}
,{'module':'json'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(monitoring(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
def getCode(self, indent=1):
return self.function_code + '\n' + self.codeText
|
data_reader.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
supported_reader = ['sqlite', 'influx','s3']
functions_code = {
'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""
class dataReader():
def get_reader(self, reader_type, target_path=None, config=None):
if reader_type == 'sqlite':
return sqlite_writer(target_path=target_path)
elif reader_type == 'influx':
return Influx_writer(config=config)
elif reader_type == 'gcs':
return gcs(config=config)
elif reader_type == 'azure':
return azure(config=config)
elif reader_type == 's3':
return s3bucket(config=config)
else:
raise ValueError(reader_type)
"""
},
'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':"""\n\
class sqlite_writer():
def __init__(self, target_path):
self.target_path = Path(target_path)
database_file = self.target_path.stem + '.db'
self.db = sqlite_db(self.target_path, database_file)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
return self.db.read(file)
def write(self, data, file):
self.db.write(data, file)
def close(self):
self.db.close()
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name):
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()
"""
},
'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""\n\
class Influx_writer():
def __init__(self, config):
self.db = influx_db(config)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
query = "SELECT * FROM {}".format(file)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
return self.db.read(query)
def write(self, data, file):
self.db.write(data, file)
def close(self):
pass
class influx_db():
def __init__(self, config):
self.host = config['host']
self.port = config['port']
self.user = config.get('user', None)
self.password = config.get('password', None)
self.token = config.get('token', None)
self.database = config['database']
self.measurement = config['measurement']
self.tags = config['tags']
self.client = self.get_client()
def table_exists(self, name):
query = f"SHOW MEASUREMENTS ON {self.database}"
result = self.client(query)
for measurement in result['measurements']:
if measurement['name'] == name:
return True
return False
def read(self, query)->pd.DataFrame:
cursor = self.client.query(query)
points = cursor.get_points()
my_list=list(points)
df=pd.DataFrame(my_list)
return df
def get_client(self):
headers = None
if self.token:
headers={"Authorization": self.token}
client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers)
databases = client.get_list_database()
databases = [x['name'] for x in databases]
if self.database not in databases:
client.create_database(self.database)
return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers)
def write(self,data, measurement=None):
if isinstance(data, pd.DataFrame):
sorted_col = data.columns.tolist()
sorted_col.sort()
data = data[sorted_col]
data = data.to_dict(orient='records')
if not measurement:
measurement = self.measurement
for row in data:
if 'time' in row.keys():
p = '%Y-%m-%dT%H:%M:%S.%fZ'
time_str = datetime.strptime(row['time'], p)
del row['time']
else:
time_str = None
if 'model_ver' in row.keys():
self.tags['model_ver']= row['model_ver']
del row['model_ver']
json_body = [{
'measurement': measurement,
'time': time_str,
'tags': self.tags,
'fields': row
}]
self.client.write_points(json_body)
def delete(self, name):
pass
def close(self):
self.client.close()
"""
},
's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\
class s3bucket():
def __init__(self, config={}):
if 's3' in config.keys():
config = config['s3']
aws_access_key_id = config.get('aws_access_key_id','')
aws_secret_access_key = config.get('aws_secret_access_key','')
bucket_name = config.get('bucket_name','')
if not aws_access_key_id:
raise ValueError('aws_access_key_id can not be empty')
if not aws_secret_access_key:
raise ValueError('aws_secret_access_key can not be empty')
self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key))
self.bucket_name = bucket_name
def read(self, file_name):
try:
response = self.client.get_object(Bucket=self.bucket_name, Key=file_name)
return pd.read_csv(response['Body'])
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchBucket':
raise ValueError(f"Bucket '{self.bucket_name}' not found in aws s3 storage")
elif ex.response['Error']['Code'] == 'NoSuchKey':
raise ValueError(f"File '{file_name}' not found in s3 bucket '{self.bucket_name}'")
else:
raise
"""
},
'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\
def azure():
def __init__(self,config={}):
if 'azure' in config.keys():
config = config['azure']
account_name = config.get('account_name','')
account_key = config.get('account_key','')
container_name = config.get('container_name','')
if not account_name:
raise ValueError('Account name can not be empty')
if not account_key:
raise ValueError('Account key can not be empty')
if not container_name:
raise ValueError('Container name can not be empty')
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", account_name), credential=account_key)
self.file_system_client = service_client.get_file_system_client(container_name)
def read(self, directory_name):
root_dir = str(directory_name)
file_paths = self.file_system_client.get_paths(path=root_dir)
main_df = pd.DataFrame()
for path in file_paths:
if not path.is_directory:
file_client = file_system_client.get_file_client(path.name)
file_ext = Path(path.name).suffix
if file_ext in [".csv", ".tsv"]:
with open(csv_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
with open(csv_local, 'r') as file:
data = file.read()
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t'])
processed_df = pd.read_csv(csv_local, sep=row_delimiter)
elif file_ext == ".parquet":
stream = io.BytesIO()
file_client.download_file().readinto(stream)
processed_df = pd.read_parquet(stream, engine='pyarrow')
elif file_ext == ".avro":
with open(avro_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
processed_df = pdx.read_avro(avro_local)
if main_df.empty:
main_df = pd.DataFrame(processed_df)
else:
main_df = main_df.append(processed_df, ignore_index=True)
return main_df
"""
},
'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\
class gcs():
def __init__(self, config={}):
if 'gcs' in config.keys():
config = config['gcs']
account_key = config.get('account_key','')
bucket_name = config.get('bucket_name','')
if not account_key:
raise ValueError('Account key can not be empty')
if not bucket_name:
raise ValueError('bucket name can not be empty')
storage_client = storage.Client.from_service_account_json(account_key)
self.bucket = storage_client.get_bucket(bucket_name)
def read(self, bucket_name, file_name):
data = self.bucket.blob(file_name).download_as_text()
return pd.read_csv(data, encoding = 'utf-8', sep = ',')
"""
}
}
class data_reader():
def __init__(self, reader_type=[]):
self.supported_readers = supported_reader
if isinstance(reader_type, str):
self.readers = [reader_type]
elif not reader_type:
self.readers = self.supported_readers
else:
self.readers = reader_type
unsupported_reader = [ x for x in self.readers if x not in self.supported_readers]
if unsupported_reader:
raise ValueError(f"reader type '{unsupported_reader}' is not supported\nSupported readers are {self.supported_readers}")
self.codeText = ""
self.importer = importModule()
def get_reader_code(self, readers):
reader_code = {
'sqlite': 'return sqlite_writer(target_path=target_path)',
'influx': 'return Influx_writer(config=config)',
'gcs': 'return gcs(config=config)',
'azure': 'return azure(config=config)',
's3': 'return s3bucket(config=config)'
}
code = "\n\ndef dataReader(reader_type, target_path=None, config=None):\n"
for i, reader in enumerate(readers):
if not i:
code += f" if reader_type == '{reader}':\n"
else:
code += f" elif reader_type == '{reader}':\n"
code += f" {reader_code[reader]}\n"
if readers:
code += " else:\n"
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n"""
else:
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n"""
return code
def get_code(self):
code = self.get_reader_code(self.readers)
functions = []
for reader in self.readers:
functions.append(reader)
for function in functions:
code += self.get_function_code(function)
self.codeText += self.importer.getCode()
self.codeText += code
return self.codeText
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
|
imports.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\n"
else:
require += f"{mod}=={version(mod)}\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\n\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\n\n#local modules" + local_modules + '\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
|
transformer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class transformer():
def __init__(self, indent=0, tab_size=4):
self.df_name = 'df'
self.tab = ' ' * tab_size
self.codeText = ""
self.transformers = []
self.TxCols = []
self.imputers = {}
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','transformedData' : 'transformedData.dat','normalization' : 'normalization.pkl'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n return config"
return text
def getPrefixModules(self):
modules = [
{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'warnings'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'joblib'}
,{'module':'MinMaxScaler', 'mod_from':'sklearn.preprocessing'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += """
def transformation(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
df = read_data(dataLoc)
log.log_dataframe(df)
target_feature = config['target_feature']
dateTimeFeature=config['dateTimeFeature']
df.set_index(dateTimeFeature, inplace=True)
df = df.dropna()
df=df.fillna(df.mean())
if len(target_feature) == 1:
trainX = df[target_feature].to_numpy().reshape(-1,1)
else:
trainX = df[target_feature].to_numpy()
scaler = MinMaxScaler(feature_range=(0, 1))
trainX = scaler.fit_transform(trainX)
normalization_file_name = str(targetPath / IOFiles['normalization'])
joblib.dump(scaler, normalization_file_name)
df[target_feature] = trainX
log.log_dataframe(df)
csv_path = str(targetPath / IOFiles['transformedData'])
write_data(df, csv_path, index=True)
status = {'Status': 'Success', 'DataFilePath': IOFiles['transformedData'],
'target_feature': target_feature,'dateTimeFeature':dateTimeFeature,
"Normalization_file":normalization_file_name }
meta_data['transformation'] = {}
meta_data['transformation']['Status'] = status
write_json(meta_data, str(targetPath / IOFiles['metaData']))
log.info(f'Transformed data saved at {csv_path}')
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'argparse'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(transformation(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self, indent=1):
return self.function_code + '\n' + self.codeText
def getDFName(self):
return self.df_name
|
functions.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class global_function():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.available_functions = {
'iqr':{'name':'iqrOutlier','code':f"\n\ndef iqrOutlier(df):\
\n{self.tab}Q1 = df.quantile(0.25)\
\n{self.tab}Q3 = df.quantile(0.75)\
\n{self.tab}IQR = Q3 - Q1\
\n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\
\n{self.tab}return index"},
'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f"\n\ndef zscoreOutlier(df):\
\n{self.tab}z = numpy.abs(stats.zscore(df))\
\n{self.tab}index = (z < 3).all(axis=1)\
\n{self.tab}return index"},
'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f"\n\ndef iforestOutlier(df):\
\n{self.tab}from sklearn.ensemble import IsolationForest\
\n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\
\n{self.tab}isolation_forest.fit(df)\
\n{self.tab}y_pred_train = isolation_forest.predict(df)\
\n{self.tab}return y_pred_train == 1"},
'minMaxImputer':{'name':'minMaxImputer','code':f"\n\nclass minMaxImputer(TransformerMixin):\
\n{self.tab}def __init__(self, strategy='max'):\
\n{self.tab}{self.tab}self.strategy = strategy\
\n{self.tab}def fit(self, X, y=None):\
\n{self.tab}{self.tab}self.feature_names_in_ = X.columns\
\n{self.tab}{self.tab}if self.strategy == 'min':\
\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\
\n{self.tab}{self.tab}else:\
\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\
\n{self.tab}{self.tab}return self\
\n{self.tab}def transform(self, X):\
\n{self.tab}{self.tab}import numpy\
\n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)"},
'DummyEstimator':{'name':'DummyEstimator','code':f"\n\nclass DummyEstimator(BaseEstimator):\
\n{self.tab}def fit(self): pass\
\n{self.tab}def score(self): pass"},
'start_reducer':{'name':'start_reducer','code':"\n\
\ndef start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05):\
\n import numpy as np\
\n import pandas as pd\
\n import itertools\
\n from sklearn.feature_selection import VarianceThreshold\
\n\
\n train_features = df.columns.tolist()\
\n train_features.remove(target_feature)\
\n df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature\
\n numeric_features = df.select_dtypes(include='number').columns.tolist()\
\n non_numeric_features = df.select_dtypes(exclude='number').columns.tolist()\
\n if numeric_features and var_threshold:\
\n qconstantFilter = VarianceThreshold(threshold=var_threshold)\
\n tempDf=df[numeric_features]\
\n qconstantFilter.fit(tempDf)\
\n numeric_features = [x for x,y in zip(numeric_features,qconstantFilter.get_support()) if y]\
\n if numeric_features:\
\n numColPairs = list(itertools.product(numeric_features, numeric_features))\
\n for item in numColPairs:\
\n if(item[0] == item[1]):\
\n numColPairs.remove(item)\
\n tempArray = []\
\n for item in numColPairs:\
\n tempCorr = np.abs(df[item[0]].corr(df[item[1]]))\
\n if(tempCorr > corr_threshold):\
\n tempArray.append(item[0])\
\n tempArray = np.unique(tempArray).tolist()\
\n nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray))\
\n groupedFeatures = []\
\n if tempArray:\
\n corrDic = {}\
\n for feature in tempArray:\
\n temp = []\
\n for col in tempArray:\
\n tempCorr = np.abs(df[feature].corr(df[col]))\
\n temp.append(tempCorr)\
\n corrDic[feature] = temp\
\n #Similar correlation df\
\n corrDF = pd.DataFrame(corrDic,index = tempArray)\
\n corrDF.loc[:,:] = np.tril(corrDF, k=-1)\
\n alreadyIn = set()\
\n similarFeatures = []\
\n for col in corrDF:\
\n perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist()\
\n if perfectCorr and col not in alreadyIn:\
\n alreadyIn.update(set(perfectCorr))\
\n perfectCorr.append(col)\
\n similarFeatures.append(perfectCorr)\
\n updatedSimFeatures = []\
\n for items in similarFeatures:\
\n if(target_feature != '' and target_feature in items):\
\n for p in items:\
\n updatedSimFeatures.append(p)\
\n else:\
\n updatedSimFeatures.append(items[0])\
\n newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))\
\n updatedFeatures = list(set(newTempFeatures + non_numeric_features))\
\n else:\
\n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\
\n else:\
\n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\
\n return updatedFeatures"},
'feature_importance_class':{'name':'feature_importance_class','code':"\n\
\ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\
\n import pandas as pd\
\n from sklearn.feature_selection import chi2\
\n from sklearn.feature_selection import f_classif\
\n from sklearn.feature_selection import mutual_info_classif\
\n \
\n impFeatures = []\
\n if cat_features:\
\n categoricalData=df[cat_features]\
\n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\
\n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\
\n impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())\
\n if numeric_features:\
\n quantData=df[numeric_features]\
\n fclassScore=f_classif(quantData,df[target_feature])[1]\
\n miClassScore=mutual_info_classif(quantData,df[target_feature])\
\n fClassSeries=pd.Series(fclassScore,index=numeric_features)\
\n miClassSeries=pd.Series(miClassScore,index=numeric_features)\
\n impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())\
\n impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())\
\n pearsonScore=df.corr() \
\n targetPScore=abs(pearsonScore[target_feature])\
\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\
\n return list(set(sum(impFeatures, [])))"},
'feature_importance_reg':{'name':'feature_importance_reg','code':"\n\
\ndef feature_importance_reg(df, numeric_features, target_feature,pValTh,corrTh):\
\n import pandas as pd\
\n from sklearn.feature_selection import f_regression\
\n from sklearn.feature_selection import mutual_info_regression\
\n \
\n impFeatures = []\
\n if numeric_features:\
\n quantData =df[numeric_features]\
\n fregScore=f_regression(quantData,df[target_feature])[1]\
\n miregScore=mutual_info_regression(quantData,df[target_feature])\
\n fregSeries=pd.Series(fregScore,index=numeric_features)\
\n miregSeries=pd.Series(miregScore,index=numeric_features)\
\n impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())\
\n impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())\
\n pearsonScore=df.corr()\
\n targetPScore=abs(pearsonScore[target_feature])\
\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\
\n return list(set(sum(impFeatures, [])))"},
'scoring_criteria':{'name':'scoring_criteria','imports':[{'mod':'make_scorer','mod_from':'sklearn.metrics'},{'mod':'roc_auc_score','mod_from':'sklearn.metrics'}], 'code':"\n\
\ndef scoring_criteria(score_param, problem_type, class_count):\
\n if problem_type == 'classification':\
\n scorer_mapping = {\
\n 'recall':{'binary_class': 'recall', 'multi_class': 'recall_weighted'},\
\n 'precision':{'binary_class': 'precision', 'multi_class': 'precision_weighted'},\
\n 'f1_score':{'binary_class': 'f1', 'multi_class': 'f1_weighted'},\
\n 'roc_auc':{'binary_class': 'roc_auc', 'multi_class': 'roc_auc_ovr_weighted'}\
\n }\
\n if (score_param.lower() == 'roc_auc') and (class_count > 2):\
\n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\
\n else:\
\n class_type = 'binary_class' if class_count == 2 else 'multi_class'\
\n if score_param in scorer_mapping.keys():\
\n score_param = scorer_mapping[score_param][class_type]\
\n else:\
\n score_param = 'accuracy'\
\n return score_param"},
'log_dataframe':{'name':'log_dataframe','code':f"\n\
\ndef log_dataframe(df, msg=None):\
\n import io\
\n buffer = io.StringIO()\
\n df.info(buf=buffer)\
\n if msg:\
\n log_text = f'Data frame after {{msg}}:'\
\n else:\
\n log_text = 'Data frame:'\
\n log_text += '\\n\\t'+str(df.head(2)).replace('\\n','\\n\\t')\
\n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\
\n get_logger().info(log_text)"},
'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':"\n\
\nclass BayesSearchCV():\
\n\
\n def __init__(self, estimator, params, scoring, n_iter, cv):\
\n self.estimator = estimator\
\n self.params = params\
\n self.scoring = scoring\
\n self.iteration = n_iter\
\n self.cv = cv\
\n self.best_estimator_ = None\
\n self.best_score_ = None\
\n self.best_params_ = None\
\n\
\n def __min_fun(self, params):\
\n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\
\n acc = score.mean()\
\n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\
\n\
\n def fit(self, X, y):\
\n trials = Trials()\
\n self.X = X\
\n self.y = y\
\n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\
\n result = sorted(trials.results, key = lambda x: x['loss'])[0]\
\n self.best_estimator_ = result['model']\
\n self.best_score_ = result['score']\
\n self.best_params_ = result['params']\
\n self.best_estimator_.fit(X, y)\
\n\
\n def hyperOptParamConversion( paramSpace):\
\n paramDict = {}\
\n for j in list(paramSpace.keys()):\
\n inp = paramSpace[j]\
\n isLog = False\
\n isLin = False\
\n isRan = False\
\n isList = False\
\n isString = False\
\n try:\
\n # check if functions are given as input and reassign paramspace\
\n v = paramSpace[j]\
\n if 'logspace' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isLog = True\
\n elif 'linspace' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isLin = True\
\n elif 'range' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isRan = True\
\n elif 'list' in paramSpace[j]:\
\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\
\n isList = True\
\n elif '[' and ']' in paramSpace[j]:\
\n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\
\n isList = True\
\n x = paramSpace[j].split(',')\
\n except:\
\n x = paramSpace[j]\
\n str_arg = paramSpace[j]\
\n\
\n # check if arguments are string\
\n try:\
\n test = eval(x[0])\
\n except:\
\n isString = True\
\n\
\n if isString:\
\n paramDict.update({j: hp.choice(j, x)})\
\n else:\
\n res = eval(str_arg)\
\n if isLin:\
\n y = eval('np.linspace' + str(res))\
\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\
\n elif isLog:\
\n y = eval('np.logspace' + str(res))\
\n paramDict.update(\
\n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\
\n elif isRan:\
\n y = eval('np.arange' + str(res))\
\n paramDict.update({j: hp.choice(j, y)})\
\n # check datatype of argument\
\n elif isinstance(eval(x[0]), bool):\
\n y = list(map(lambda i: eval(i), x))\
\n paramDict.update({j: hp.choice(j, eval(str(y)))})\
\n elif isinstance(eval(x[0]), float):\
\n res = eval(str_arg)\
\n if len(str_arg.split(',')) == 3 and not isList:\
\n y = eval('np.linspace' + str(res))\
\n #print(y)\
\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\
\n else:\
\n y = list(res) if isinstance(res, tuple) else [res]\
\n paramDict.update({j: hp.choice(j, y)})\
\n else:\
\n res = eval(str_arg)\
\n if len(str_arg.split(',')) == 3 and not isList:\
\n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\
\n else:\
\n y = list(res) if isinstance(res, tuple) else [res]\
\n paramDict.update({j: hp.choice(j, y)})\
\n return paramDict"},
's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':"\n\
\ndef s2n(value):\
\n try:\
\n x=eval(value)\
\n return x\
\n except:\
\n try:\
\n return w2n.word_to_num(value)\
\n except:\
\n return np.nan"},
'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':"\n\
\ndef read_json(file_path):\
\n data = None\
\n with open(file_path,'r') as f:\
\n data = json.load(f)\
\n return data\
\n\
\ndef write_json(data, file_path):\
\n with open(file_path,'w') as f:\
\n json.dump(data, f)\
\n\
\ndef read_data(file_path, encoding='utf-8', sep=','):\
\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\
\n\
\ndef write_data(data, file_path, index=False):\
\n return data.to_csv(file_path, index=index)\
\n\
\n#Uncomment and change below code for google storage\
\n#def write_data(data, file_path, index=False):\
\n# file_name= file_path.name\
\n# data.to_csv('output_data.csv')\
\n# storage_client = storage.Client()\
\n# bucket = storage_client.bucket('aion_data')\
\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\
\n# return data\
\n\
\ndef is_file_name_url(file_name):\
\n supported_urls_starts_with = ('gs://','https://','http://')\
\n return file_name.startswith(supported_urls_starts_with)\
\n"},
'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f"\n\
\nlog = None\
\ndef set_logger(log_file, mode='a'):\
\n global log\
\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\
\n log = logging.getLogger(Path(__file__).parent.name)\
\n return log\
\n\
\ndef get_logger():\
\n return log\n"},
'mlflowSetPath':{'name':'mlflowSetPath','code':f"\n\ndef mlflowSetPath(path, name):\
\n{self.tab}db_name = str(Path(path)/'mlruns')\
\n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\
\n{self.tab}mlflow.set_experiment(str(Path(path).name))\
\n"},
'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f"\n\ndef mlflow_create_experiment(config, path, name):\
\n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\
\n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\
\n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\
\n{self.tab}client = mlflow.tracking.MlflowClient()\
\n{self.tab}experiment = client.get_experiment_by_name(name)\
\n{self.tab}if experiment:\
\n{self.tab}{self.tab}experiment_id = experiment.experiment_id\
\n{self.tab}else:\
\n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\
\n{self.tab}return client, experiment_id\
\n"},
'get_mlflow_uris':{'name':'get_mlflow_uris','code':f"\n\ndef get_mlflow_uris(config, path):\
\n artifact_uri = None\
\n tracking_uri_type = config.get('tracking_uri_type',None)\
\n if tracking_uri_type == 'localDB':\
\n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\
\n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\
\n tracking_uri = config['tracking_uri']\
\n if config.get('artifacts_uri', None):\
\n if Path(config['artifacts_uri']).exists():\
\n artifact_uri = 'file:' + config['artifacts_uri']\
\n else:\
\n artifact_uri = config['artifacts_uri']\
\n else:\
\n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\
\n else:\
\n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\
\n artifact_uri = None\
\n if config.get('registry_uri', None):\
\n registry_uri = config['registry_uri']\
\n else:\
\n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\
\n return tracking_uri, artifact_uri, registry_uri\
\n"},
'logMlflow':{'name':'logMlflow','code':f"\n\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\
\n{self.tab}run_id = None\
\n{self.tab}for k,v in params.items():\
\n{self.tab}{self.tab}mlflow.log_param(k, v)\
\n{self.tab}for k,v in metrices.items():\
\n{self.tab}{self.tab}mlflow.log_metric(k, v)\
\n{self.tab}if 'CatBoost' in algoName:\
\n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\
\n{self.tab}else:\
\n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\
\n{self.tab}tags['processed'] = 'no'\
\n{self.tab}tags['registered'] = 'no'\
\n{self.tab}mlflow.set_tags(tags)\
\n{self.tab}if model_info:\
\n{self.tab}{self.tab}run_id = model_info.run_id\
\n{self.tab}return run_id\
\n"},
'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':"\ndef get_classification_metrices( actual_values, predicted_values):\
\n result = {}\
\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\
\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\
\n average='macro')\
\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\
\n average='macro')\
\n\
\n result['accuracy'] = math.floor(accuracy_score*10000)/100\
\n result['precision'] = math.floor(avg_precision*10000)/100\
\n result['recall'] = math.floor(avg_recall*10000)/100\
\n result['f1'] = math.floor(avg_f1*10000)/100\
\n return result\
\n"},
'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':"\ndef get_regression_metrices( actual_values, predicted_values):\
\n result = {}\
\n\
\n me = np.mean(predicted_values - actual_values)\
\n sde = np.std(predicted_values - actual_values, ddof = 1)\
\n\
\n abs_err = np.abs(predicted_values - actual_values)\
\n mae = np.mean(abs_err)\
\n sdae = np.std(abs_err, ddof = 1)\
\n\
\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\
\n mape = np.mean(abs_perc_err)\
\n sdape = np.std(abs_perc_err, ddof = 1)\
\n\
\n result['mean_error'] = me\
\n result['mean_abs_error'] = mae\
\n result['mean_abs_perc_error'] = mape\
\n result['error_std'] = sde\
\n result['abs_error_std'] = sdae\
\n result['abs_perc_error_std'] = sdape\
\n return result\
\n"}
}
def add_function(self, name, importer=None):
if name in self.available_functions.keys():
self.codeText += self.available_functions[name]['code']
if importer:
if 'imports' in self.available_functions[name].keys():
for module in self.available_functions[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
def get_function_name(self, name):
if name in self.available_functions.keys():
return self.available_functions[name]['name']
return None
def getCode(self):
return self.codeText
|