file
stringlengths
6
44
content
stringlengths
38
162k
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
edge_convert.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import json import shutil import logging import sys from AionConfigManager import AionConfigManager from sklearn.externals import joblib class edgeformats: def __init__(self,deploy_path): self.deploy_path = deploy_path self.edge_deploy_path = os.path.join(deploy_path,"edge") os.mkdir(self.edge_deploy_path) def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file): if edge_format == 'onnx': from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType xtrain = xtrain[features] initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))] filename = os.path.join(self.deploy_path,saved_model) loaded_model = joblib.load(filename) onx = convert_sklearn(loaded_model, initial_types=initial_type) onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx') with open(onnx_filename, "wb") as f: f.write(onx.SerializeToString()) self.createedgeruntimeFile(onnx_filename,profiled_data_file,features) def createedgeruntimeFile(self,onnx_filename,datafilepath,features): runtimefilecontent = '' runtimefilecontent += 'import pandas' runtimefilecontent += '\n' runtimefilecontent += 'import numpy' runtimefilecontent += '\n' runtimefilecontent += 'import sys' runtimefilecontent += '\n' runtimefilecontent += 'import onnxruntime as rt' runtimefilecontent += '\n' runtimefilecontent += 'def onnx_runtime_validation():' runtimefilecontent += '\n' runtimefilecontent += ' modelfile = r"'+str(onnx_filename)+'"' runtimefilecontent += '\n' runtimefilecontent += ' datafile = r"'+str(datafilepath)+'"' runtimefilecontent += '\n' runtimefilecontent += ' dataframe = pandas.read_csv(datafile)' runtimefilecontent += '\n' runtimefilecontent += ' dataframe = dataframe['+str(features)+']' runtimefilecontent += '\n' runtimefilecontent += ' df = dataframe.head(8)' runtimefilecontent += '\n' runtimefilecontent += ' dataset = df.values' runtimefilecontent += '\n' runtimefilecontent += ' sess = rt.InferenceSession(modelfile)' runtimefilecontent += '\n' runtimefilecontent += ' input_name = sess.get_inputs()[0].name' runtimefilecontent += '\n' runtimefilecontent += ' label_name = sess.get_outputs()[0].name' runtimefilecontent += '\n' runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape' runtimefilecontent += '\n' runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)' runtimefilecontent += '\n' runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]' runtimefilecontent += '\n' runtimefilecontent += ' df[\'predictions\'] = pred_onx' runtimefilecontent += '\n' runtimefilecontent += ' result = df.to_json(orient="records")' runtimefilecontent += '\n' runtimefilecontent += ' return(result)' runtimefilecontent += '\n' runtimefilecontent += 'if __name__ == "__main__":' runtimefilecontent += '\n' runtimefilecontent += ' output = onnx_runtime_validation()' runtimefilecontent += '\n' runtimefilecontent += ' print("predictions:",output)' filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py') f = open(filename, "w") f.write(str(runtimefilecontent)) f.close()
common.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package import utility from AION.prediction_package.utility import TAB_CHAR from importlib.metadata import version """ This file provide the functionality which is common for most of the problem types deployment. """ def main_code(): return """ class predict(): def __init__(self): self.profiler = inputprofiler() self.selector = selector() self.trainer = trainer() self.formatter = output_format() def run(self, data): try: df = self._parse_data(data) raw_df = df.copy() df = self.profiler.run(df) df = self.selector.run(df) df = self.trainer.run(df) output = self.formatter.run(raw_df, df) print("predictions:",output) return (output) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print("predictions:",json.dumps(output)) return (json.dumps(output)) def _parse_data(self, data): file_path = Path(data) if file_path.suffix == ".tsv": df = pd.read_csv(data,encoding='utf-8',sep='\\t',skipinitialspace = True,na_values=['-','?']) elif file_path.suffix in [".csv", ".dat"]: df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) elif file_path.suffix in [".gz"] and file_path.stem.endswith('.csv'): df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) elif file_path.suffix == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) df = pd.json_normalize(jsonData) else: jsonData = json.loads(data) df = pd.json_normalize(jsonData) return df import sys if __name__ == "__main__": output = predict().run(sys.argv[1]) """ def profiler_code(params, indent=0): """ This will create the profiler file based on the config file. separated file is created as profiler is required for input drift also. """ imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'scipy', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] importer = importModule() utility.import_modules(importer, imported_modules) code = """ class inputprofiler(): """ init_code = """ def __init__(self): """ if params.get('text_features'): imported_modules.append({'module':'importlib.util'}) init_code += """ # preprocessing preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if not preprocess_path.exists(): raise ValueError(f'Preprocess model file not found: {preprocess_path}') self.profiler = joblib.load(preprocess_path) """ run_code = """ def run(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ if params.get('input_features_type'): imported_modules.append({'module':'dtype','mod_from':'numpy'}) run_code += f""" df = df.astype({params.get('input_features_type')}) """ if params.get('word2num_features'): imported_modules.append({'module':'w2n','mod_from':'word2number'}) run_code += f""" def s2n(value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))""" if params.get('unpreprocessed_columns'): run_code += f""" unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}'] df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True) """ if params.get('force_numeric_conv'): run_code += f""" df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')""" if params.get('conversion_method','').lower() == 'glove': code_text, modules = __profiler_glove_code(params) imported_modules.extend( modules) init_code += code_text elif params.get('conversion_method','').lower() == 'fasttext': init_code += __profiler_fasttext_code(params) run_code += __profiler_main_code(params) if params.get('unpreprocessed_columns'): run_code += f""" df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data """ utility.import_modules(importer, imported_modules) import_code = importer.getCode() return import_code + code + init_code + run_code def __profiler_glove_code(params, indent=2): modules = [] modules.append({'module':'load_pretrained','mod_from':'text.Embedding'}) modules.append({'module':'TextProcessing','mod_from':'text'}) code = """ model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') embed_size, pretrained_model = load_pretrained(model_path) self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) """ return code.replace('\n', '\n'+(indent * TAB_CHAR)), modules def __profiler_fasttext_code(params, indent=2): code = """ def get_pretrained_model_path(): try: from AION.appbe.dataPath import DATA_DIR modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' except: modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' if not modelsPath.exists(): modelsPath.mkdir(parents=True, exist_ok=True) return modelsPath if not importlib.util.find_spec('fasttext'): raise ValueError('fastText not installed') else: import os import fasttext import fasttext.util cwd = os.getcwd() os.chdir(get_pretrained_model_path()) fasttext.util.download_model('en', if_exists='ignore') pretrained_model = fasttext.load_model('cc.en.300.bin') os.chdir(cwd) self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary') """ return code.replace('\n', '\n'+(indent * TAB_CHAR)) def __profiler_main_code(params, indent=2): code = f""" df = self.profiler.transform(df) columns = {params['output_features']} if isinstance(df, scipy.sparse.spmatrix): df = pd.DataFrame(df.toarray(), columns=columns) else: df = pd.DataFrame(df, columns=columns) return df """ return code.replace('\n', '\n'+(indent * TAB_CHAR)) def feature_selector_code( params, indent=0): modules = [ {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'} ] code = """ class selector(): # this class def __init__(self): pass def run(self, df):""" code +=f""" return df[{params['output_features']}] """ return code, modules def feature_reducer_code( params, indent=0): modules = [ {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] code = f""" class selector(): def __init__(self): reducer_file = (Path(__file__).parent/"model")/"{params['reducer_file']}" if not reducer_file.exists(): raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}') self.model = joblib.load(reducer_file) def run(self, df): reducer_input = {params['input_features']} reducer_output = {params['output_features']} df = self.model.transform(df[reducer_input]) return pd.DataFrame(df,columns=reducer_output) """ if indent: code = code.replace('\n', '\n'+(indent * TAB_CHAR)) return code, modules def create_feature_list(config=None, target_feature=None, deploy_path=None): featurelist = [] if 'profiler' in config: if 'input_features_type' in config['profiler']: input_features = config['profiler']['input_features_type'] for x in input_features: featurelt={} featurelt['feature'] = x if x == target_feature: featurelt['Type'] = 'Target' else: if input_features[x] in ['int','int64','float','float64']: featurelt['Type'] = 'Numeric' elif input_features[x] == 'object': featurelt['Type'] = 'Text' elif input_features[x] == 'category': featurelt['Type'] = 'Category' else: featurelt['Type'] = 'Unknown' featurelist.append(featurelt) featurefile = f""" import json def getfeatures(): try: features = {featurelist} outputjson = {{"status":"SUCCESS","features":features}} output = json.dumps(outputjson) print("Features:",output) return(output) except Exception as e: output = {{"status":"FAIL","message":str(e).strip(\'"\')}} print("Features:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = getfeatures() """ with open( deploy_path/'featureslist.py', 'wb') as f: f.write( str(featurefile).encode('utf8')) def requirement_file(deploy_path,model,textFeatures,learner_type='ML'): modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] requires = '' for mod in modules: requires += f"{mod}=={version(mod)}\n" if len(textFeatures) > 0: tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] for mod in tmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Extreme Gradient Boosting (XGBoost)': mmodules = ['xgboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Light Gradient Boosting (LightGBM)': mmodules = ['lightgbm'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Categorical Boosting (CatBoost)': mmodules = ['catboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'arima': mmodules = ['pmdarima'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'fbprophet': mmodules = ['prophet'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': mmodules = ['tensorflow'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 mmodules = ['lifelines'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'sentencetransformer': #bug 12833 mmodules = ['sentence_transformers'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" with open( deploy_path/'requirements.txt', 'wb') as f: f.write(str(requires).encode('utf8')) def create_readme_file(deploy_path,modelfile,features): data = json.dumps([{x:x+'_value'} for x in features]) backslash_data = data.replace('"', '\\"') content = f""" ========== Files Structures ========== {modelfile} ------ Trained Model aion_prediction.py --> Python package entry point script/inputprofiler.py --> Profiling like FillNA and Category to Numeric ========== How to call the model ========== ============== From Windows Terminal ========== python aion_prediction.py "{backslash_data}" ============== From Linux Terminal ========== python aion_prediction.py "{data}" ============== Output ========== {{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}}]}} ## for single Row/Record {{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}},{{"Data1":"Value","prediction":"Value"}}]}} ## For Multiple Row/Record {{"status":"ERROR","message":"description"}} ## In Case Exception or Error """ filename = deploy_path/'readme.txt' with open(filename, 'w') as f: f.write(content) def create_util_folder(deploy_path): import tarfile ext_path = Path(__file__).parent.parent/'utilities' for x in ext_path.iterdir(): if x.suffix == '.tar': if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']: my_tar = tarfile.open(x) my_tar.extractall(deploy_path) my_tar.close()
model_deploy.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys import platform import json import shutil import logging from pathlib import Path from prediction_package import production from prediction_package import prediction_transformation as cs class DeploymentManager: def __init__(self): self.requirementfile='' self.modelfile='' self.s2i_environmentfile='' self.selectorfile='' self.profilerfile='' self.readmepackagename='' self.pythonpackage='' self.log = logging.getLogger('eion') def include_import_file(self,learner_type,method,scoreParam,model_type,model): if((learner_type == 'DL') or (learner_type == 'TextDL')): self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras import backend as K' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'): self.modelfile += 'import joblib' self.modelfile += '\n' self.modelfile += 'import os' self.modelfile += '\n' self.modelfile += 'import pandas as pd' self.modelfile += '\n' self.modelfile += 'import numpy as np' self.modelfile += '\n' self.modelfile += 'from pathlib import Path' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' self.modelfile += 'from keras.models import load_model' self.modelfile += '\n' self.modelfile += 'import warnings' self.modelfile += '\n' self.modelfile += 'from sklearn.preprocessing import StandardScaler' self.modelfile += '\n' self.modelfile += 'warnings.filterwarnings("ignore")' self.modelfile += '\n' if(learner_type == 'ImageClassification'): self.modelfile += 'import os' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.models import Sequential' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.preprocessing import image' self.modelfile += '\n' self.modelfile += 'import numpy as np' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.layers import Input' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.models import Model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.optimizers import Adam' self.modelfile += '\n' self.modelfile += 'import cv2' self.modelfile += '\n' if(learner_type == 'objectDetection'): self.modelfile += 'import os\n' self.modelfile += 'from object_detection.utils import label_map_util\n' self.modelfile += 'from object_detection.utils import config_util\n' self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\n' self.modelfile += 'from object_detection.builders import model_builder\n' self.modelfile += 'import tensorflow as tf\n' self.modelfile += 'import numpy as np\n' self.modelfile += 'from PIL import Image\n' self.modelfile += 'import matplotlib.pyplot as plt\n' self.modelfile += 'import pandas as pd\n' self.modelfile += 'from pathlib import Path\n' if(learner_type == 'Text Similarity'): self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras import backend as K' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' if(model == 'Neural Architecture Search'): self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras import backend as K' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' self.modelfile += 'import joblib' self.modelfile += '\n' self.modelfile += 'import os' self.modelfile += '\n' self.modelfile += 'import pandas as pd' self.modelfile += '\n' self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\n' self.modelfile += 'import numpy as np\n' self.modelfile += 'from pathlib import Path\n' if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': self.modelfile += 'from tensorflow import constant' self.modelfile += '\n' self.modelfile += 'from tf_agents.trajectories import time_step' self.modelfile += '\n' self.requirementfile += 'tensorflow==2.5.0' if model.lower() == 'lstm' or model.lower() == 'mlp': self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.requirementfile += 'tensorflow==2.5.0' if(learner_type == 'Text Similarity'): self.modelfile += 'def cosine_distance(vests):' self.modelfile += '\n'; self.modelfile += ' x, y = vests' self.modelfile += '\n'; self.modelfile += ' x = K.l2_normalize(x, axis=-1)' self.modelfile += '\n'; self.modelfile += ' y = K.l2_normalize(y, axis=-1)' self.modelfile += '\n'; self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)' self.modelfile += '\n'; self.modelfile += 'def cos_dist_output_shape(shapes):' self.modelfile += '\n'; self.modelfile += ' shape1, shape2 = shapes' self.modelfile += '\n'; self.modelfile += ' return (shape1[0],1)' self.modelfile += '\n'; if(learner_type == 'TextDL' or learner_type == 'DL'): if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'): self.modelfile += 'def recall_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())' self.modelfile += '\n'; self.modelfile += ' return recall' self.modelfile += '\n'; if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'): self.modelfile += 'def precision_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' precision = true_positives / (predicted_positives + K.epsilon())' self.modelfile += '\n'; self.modelfile += ' return precision' self.modelfile += '\n'; if(scoreParam.lower() == 'f1_score'): self.modelfile += 'def f1_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' precision = precision_m(y_true, y_pred)' self.modelfile += '\n'; self.modelfile += ' recall = recall_m(y_true, y_pred)' self.modelfile += '\n'; self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))' self.modelfile += '\n'; if(scoreParam.lower() == 'rmse'): self.modelfile += 'def rmse_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))' self.modelfile += '\n'; if(scoreParam.lower() =='r2'): self.modelfile += 'def r_square(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))' self.modelfile += '\n'; self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))' self.modelfile += '\n'; self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))' self.modelfile += '\n'; if(learner_type.lower() in ['similarityidentification','contextualsearch']): self.modelfile += 'from pathlib import Path\n' if model_type == 'BM25': self.modelfile += 'from rank_bm25 import BM25Okapi\n' elif scoreParam == 'VectorDB Cosine': self.modelfile += 'import chromadb\n' else: self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\n' self.pythonpackage += '========== Python Packags Requires =========' self.pythonpackage += '\n' self.pythonpackage += 'scikit-learn' self.pythonpackage += '\n' self.pythonpackage += 'scipy' self.pythonpackage += '\n' self.pythonpackage += 'numpy' self.pythonpackage += '\n' if((learner_type == 'DL') or (learner_type =='TextDL')): self.modelfile += 'import numpy as np' self.modelfile += '\n' self.requirementfile += 'scikit-learn==0.21.3' self.requirementfile += '\n' self.requirementfile += 'scipy==1.3.3' self.requirementfile += '\n' self.requirementfile += 'numpy==1.17.4' self.requirementfile += '\n' if(learner_type == 'TextML'): self.requirementfile += 'spacy==2.2.3' self.requirementfile += '\n' self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz' self.requirementfile += '\n' if(learner_type == 'DL' or learner_type == 'TextDL'): self.requirementfile += 'keras==2.3.1' self.requirementfile += '\n' self.requirementfile += 'tensorflow==2.0.0b1' self.requirementfile += '\n' if(learner_type == 'RecommenderSystem'): self.requirementfile += 'surprise' self.requirementfile += '\n' if(method == 'package'): self.modelfile += 'import surprise' self.modelfile += '\n' self.modelfile += 'import statsmodels' self.modelfile += '\n' self.requirementfile += 'statsmodels==0.10.2' self.requirementfile += '\n' def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False): self.readme='========== Files Structures ==========' self.readme+='\n' self.readme+=modelfile+' ------ Trained Model' self.readme+='\n' self.readme+='aion_prediction.py --> Python package entry point' self.readme+='\n' if not single_file: self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric' self.readme+='\n' self.readme+='script/selector.py --> Feature Selection' self.readme+='\n' self.readme+='script/trained_model.py --> Read the model file and call the prediction' self.readme+='\n' self.readme+='script/output_format.py --> Output formatter file' self.readme+='\n' self.readme+= self.pythonpackage self.readme+= '========== How to call the model ==========' self.readme+='\n' self.readme+= '============== From Windows Terminal ==========' self.readme+='\n' if method == 'optimus_package': self.readme += 'python aion_prediction.py filename.json' self.readme +='\n' self.readme += '========== Embedded Methods ==========' self.readme +='\n' self.readme += 'Function Name: predict_from_json - When input is Json Data' self.readme +='\n' self.readme += 'Function Name: predict_from_file - When input is Json File' self.readme +='\n' else: callpython = 'python aion_prediction.py "[{' for x in features: if(callpython != 'python prediction.py "[{'): callpython += ',' callpython += '\\\"'+str(x)+'\\\"'+':'+'\\\"'+str(x)+'_value'+'\\\"' callpython += '}]"' self.readme += callpython self.readme+='\n' self.readme+= '============== From Linux Terminal ==========' self.readme+='\n' callpython = 'python aion_prediction.py \'[{' temp =callpython for x in features: if(callpython != temp): callpython += ',' callpython += '"'+str(x)+'"'+':'+'"'+str(x)+'_value'+'"' callpython += '}]\'' self.readme += callpython self.readme+='\n' self.readme+= '============== Output ==========' self.readme+='\n' self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"}]}' ## For Single Row/Record' self.readme+='\n' self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"},{"Data1":"Value","prediction":"Value"}]} ## For Multiple Row/Record' self.readme+='\n' self.readme+= '{"status":"ERROR","message":"description"} ## In Case Exception or Error' self.readme+='\n' #print(self.readme) filename = os.path.join(deploy_path,'readme.txt') self.log.info('-------> Readme File Location: '+filename) f = open(filename, "wb") f.write(str(self.readme).encode('utf8')) f.close() def create_class(self,classname): #self.modelfile += 'class '+classname+'(object):' self.modelfile += 'class trained_model(object):' self.modelfile += '\n' def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''): profiler = deploy.get('profiler',{}) if isinstance(features, str): features = features.split(',') code = f""" import scipy import joblib import numpy as np import pandas as pd from pathlib import Path """ if text_feature: code += """ import importlib.util\n""" if wordToNumericFeatures: code += """ from word2number import w2n def s2n(value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan """ if 'code' in deploy.get('preprocess',{}).keys(): code += deploy['preprocess']['code'] if profiler.get('conversion_method','').lower() == 'glove': code += """ class inputprofiler(object): def __init__(self): self.model = None preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if preprocess_path.exists(): self.model = joblib.load(preprocess_path) from text.Embedding import load_pretrained from text import TextProcessing model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') embed_size, loaded_model = load_pretrained(model_path) self.model.set_params(text_process__vectorizer__external_model = loaded_model) else: raise ValueError('Preprocess model not found') def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ elif profiler.get('conversion_method','').lower() == 'fasttext': code += """ def get_pretrained_model_path(): try: from AION.appbe.dataPath import DATA_DIR modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' except: modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' if not modelsPath.exists(): modelsPath.mkdir(parents=True, exist_ok=True) return modelsPath class inputprofiler(object): def __init__(self): self.model = None preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if preprocess_path.exists(): self.model = joblib.load(preprocess_path) if not importlib.util.find_spec('fasttext'): raise ValueError('fastText not installed') else: import os import fasttext import fasttext.util cwd = os.getcwd() os.chdir(get_pretrained_model_path()) fasttext.util.download_model('en', if_exists='ignore') loaded_model = fasttext.load_model('cc.en.300.bin') os.chdir(cwd) self.model.set_params(text_process__vectorizer__external_model = loaded_model) self.model.set_params(text_process__vectorizer__external_model_type = 'binary') else: raise ValueError('Preprocess model not found') def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ else: code += """ class inputprofiler(object): def __init__(self): self.model = None preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if preprocess_path.exists(): self.model = joblib.load(preprocess_path) else: raise ValueError('Preprocess model not found') def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ if 'code' in deploy.get('preprocess',{}).keys(): code += " df = preprocess( df)\n" if wordToNumericFeatures: code += f""" df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))""" if profiler.get('unpreprocessed_columns'): code += f""" unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}'] df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True) """ if profiler.get('force_numeric_conv'): code += f""" df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce') """ code += f""" if self.model: df = self.model.transform(df)""" code += f""" columns = {output_columns} if isinstance(df, scipy.sparse.spmatrix): df = pd.DataFrame(df.toarray(), columns=columns) else: df = pd.DataFrame(df, columns=columns) """ ##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': pass else: if profiler.get('unpreprocessed_columns'): code += f""" df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data """ if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': ##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now. # code += f""" # df.set_index('{datetimeFeature}', inplace=True)""" code += f""" return(df,'{datetimeFeature}')\n""" else: code += f""" return(df)""" return code def no_profiling_code(self, features): if isinstance(features, str): features = features.split(',') return f""" import pandas as pd import numpy as np class inputprofiler(object): def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) return df[{features}] """ def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None,datetimeFeature=''): filename = str(Path(deploy_path)/'script'/'inputprofiler.py') if 'profiler' in config: if model_type == 'BM25': code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features']) elif model == 'KaplanMeierFitter': code = self.no_profiling_code(features) elif model.lower() in ['arima', 'fbprophet']: #task 12627 code = self.no_profiling_code('noofforecasts') else: code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature) if code: with open(filename,'w',encoding="utf-8") as f: f.write(code) self.log.info('-------> Profiler File Location :'+filename) return self.profilerfile += 'import pandas as pd' self.profilerfile += '\n' self.profilerfile += 'import joblib' self.profilerfile += '\n' self.profilerfile += 'import os' self.profilerfile += '\n' self.profilerfile += 'from word2number import w2n' self.profilerfile += '\n' self.profilerfile += 'import numpy as np' self.profilerfile += '\nfrom pathlib import Path\n' #print("1") #print(profiler) if(learner_type == 'Text Similarity' or len(text_features) > 0): self.profilerfile += 'from text import TextProcessing' self.profilerfile += '\n' self.profilerfile += 'def textCleaning(textCorpus):' self.profilerfile += '\n' self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()' self.profilerfile += '\n' self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)' self.profilerfile += '\n' self.profilerfile += ' return(textCorpus)' self.profilerfile += '\n' self.profilerfile += 'class inputprofiler(object):' self.profilerfile += '\n' self.profilerfile += ' def s2n(self,value):' self.profilerfile += '\n' self.profilerfile += ' try:' self.profilerfile += '\n' self.profilerfile += ' x=eval(value)' self.profilerfile += '\n' self.profilerfile += ' return x' self.profilerfile += '\n' self.profilerfile += ' except:' self.profilerfile += '\n' self.profilerfile += ' try:' self.profilerfile += '\n' self.profilerfile += ' return w2n.word_to_num(value)' self.profilerfile += '\n' self.profilerfile += ' except:' self.profilerfile += '\n' self.profilerfile += ' return np.nan ' self.profilerfile += '\n' self.profilerfile += ' def apply_profiler(self,df):' self.profilerfile += '\n' if(len(wordToNumericFeatures) > 0): for w2nFeature in wordToNumericFeatures: if w2nFeature not in features: continue self.profilerfile += " df['"+w2nFeature+"']=df['"+w2nFeature+"'].apply(lambda x: self.s2n(x))" self.profilerfile += '\n' self.profilerfile += " df = df.replace(r'^\s*$', np.NaN, regex=True)" self.profilerfile += '\n' self.profilerfile += ' try:' self.profilerfile += '\n' self.profilerfile += ' df.dropna(how="all",axis=1,inplace=True)' self.profilerfile += '\n' self.profilerfile += ' except:' self.profilerfile += '\n' self.profilerfile += ' df.fillna(0)' self.profilerfile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/"model"/"preprocess_pipe.pkl"\n' self.profilerfile += ' if preprocess_path.exists():\n' self.profilerfile += ' model = joblib.load(preprocess_path)\n' if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder': self.profilerfile += f" df[{features}] = model.transform(df[{features}])\n" else: self.profilerfile += f" df = model.transform(df)\n" if 'operation' in profiler: y = profiler['operation'] for action in y: feature = action['feature'] #if feature not in features: # continue operation = action['Action'] if(operation == 'Drop'): self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' self.profilerfile += " df.drop(columns=['"+feature+"'],inplace = True)" self.profilerfile += '\n' if(operation == 'FillValue'): self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' fvalue = action['value'] self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value='"+fvalue+"')" self.profilerfile += '\n' if(operation == 'Encoder'): value = action['value'] value = value.replace("\n", "\\n") self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' self.profilerfile += " le_dict="+str(value) self.profilerfile += '\n' self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].apply(lambda x: le_dict.get(x,-1))" self.profilerfile += '\n' self.profilerfile += " if -1 in df['"+feature+"'].values:" self.profilerfile += '\n' self.profilerfile += " raise Exception('Category value of "+feature+" not present in training data')" self.profilerfile += '\n' if 'conversion' in profiler: catergoryConverton = profiler['conversion'] #print(catergoryConverton) if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton): self.profilerfile += " encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))" self.profilerfile += '\n' self.profilerfile += " CategoryFeatures = "+str(catergoryConverton['features']) self.profilerfile += '\n' if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding': self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures]).toarray()" self.profilerfile += '\n' self.profilerfile += " feature_labels = encoder.get_feature_names(CategoryFeatures)" self.profilerfile += '\n' self.profilerfile += " transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) " self.profilerfile += '\n' else: self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures])" self.profilerfile += '\n' self.profilerfile += " dataColumns=list(df.columns)" self.profilerfile += '\n' self.profilerfile += " nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))" self.profilerfile += '\n' self.profilerfile += " dataArray=df[nonNormFeatures]" self.profilerfile += '\n' self.profilerfile += " df = pd.concat([dataArray, transformed_data],axis=1)" self.profilerfile += '\n' y = json.loads(numericToLabel_json) for feature_details in y: feature = feature_details['feature'] if feature not in features: continue label = feature_details['Labels'] bins = feature_details['Bins'] self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' self.profilerfile += " cut_bins="+str(bins) self.profilerfile += '\n' self.profilerfile += " cut_labels="+str(label) self.profilerfile += '\n' self.profilerfile += " df['"+feature+"'] = pd.cut(df['"+feature+"'],bins=cut_bins,labels=cut_labels)" self.profilerfile += '\n' self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value=0)" self.profilerfile += '\n' if(len(text_features) > 0): if(len(text_features) > 1): self.profilerfile += ' merge_features = '+str(text_features) self.profilerfile += '\n' self.profilerfile += ' df[\'combined\'] = df[merge_features].apply(lambda row: \' \'.join(row.values.astype(str)), axis=1)' self.profilerfile += '\n' self.profilerfile += ' features = [\'combined\']' self.profilerfile += '\n' else: self.profilerfile += " features = "+str(text_features) self.profilerfile += '\n' if model_type == 'BM25': self.profilerfile += """\ df_text = df[features[0]] pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) df['tokenize'] = pipe.transform(df_text)\n""".format(preprocessing_pipe=preprocessing_pipe) elif conversion_method == 'sentenceTransformer': self.profilerfile += """\ df_text = df[features[0]] from sentence_transformers import SentenceTransformer model = SentenceTransformer(\'sentence-transformers/msmarco-distilroberta-base-v2\') df_vect = model.encode(df_text) for empCol in {text_features}: df = df.drop(columns=[empCol]) if isinstance(df_vect, np.ndarray): df1 = pd.DataFrame(df_vect) else: df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\'vectorizer\'].get_feature_names()) df1 = df1.add_suffix(\'_vect\') df = pd.concat([df, df1],axis=1)\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) else: self.profilerfile += """\ df_text = df[features[0]] pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) df_vect=pipe.transform(df_text) for empCol in {text_features}: df = df.drop(columns=[empCol]) if isinstance(df_vect, np.ndarray): df1 = pd.DataFrame(df_vect) else: df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\'vectorizer\'].get_feature_names()) df1 = df1.add_suffix(\'_vect\') df = pd.concat([df, df1],axis=1)\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) if(learner_type == 'Text Similarity'): self.profilerfile += ' df[\''+firstDocFeature+'\'] = textCleaning(df[\''+firstDocFeature+'\'])' self.profilerfile += '\n' self.profilerfile += ' df[\''+secondDocFeature+'\'] = textCleaning(df[\''+secondDocFeature+'\'])' self.profilerfile += '\n' if len(normFeatures) > 0 and normalizer != '': self.profilerfile += " normFeatures = "+str(normFeatures) self.profilerfile += '\n' self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\'..\',\'model\',\''+normalizer+'\'))' self.profilerfile += '\n' self.profilerfile += ' dataColumns=list(df.columns)' self.profilerfile += '\n' self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))' self.profilerfile += '\n' self.profilerfile += ' dataframe=df[normFeatures]' self.profilerfile += '\n' self.profilerfile += ' transDf = normalizepipe.transform(dataframe)' self.profilerfile += '\n' self.profilerfile += ' nontransDF=df[nonNormFeatures].values' self.profilerfile += '\n' self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures' self.profilerfile += '\n' self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)' self.profilerfile += '\n' self.profilerfile += ' df=scaledDf' self.profilerfile += '\n' else: self.profilerfile += ' df=df.dropna()\n' self.profilerfile += ' return(df)' filename = os.path.join(deploy_path,'script','inputprofiler.py') self.log.info('-------> Profiler File Location :'+filename) f = open(filename, "w",encoding="utf-8") f.write(str(self.profilerfile)) f.close() def isEnglish(self, s): try: s.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: return False else: return True def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None): cs.create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config) def create_init_function_for_regression(self,modelfile): self.modelfile += ' def __init__(self):' self.modelfile += '\n' self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig): cs.create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig) def create_predict_proba(self,learner_type,method): self.modelfile += ' def predict(self,X,features_names):' self.modelfile += '\n' self.modelfile += ' return self.model.predict_proba(X)' def create_forcast(self,method,no_of_prediction): self.modelfile += ' def predict(self,X,features_names):' self.modelfile += '\n' self.modelfile += ' no_of_prediction = '+str(no_of_prediction) self.modelfile += '\n' self.modelfile += ' lag_order = self.model.k_ar' self.modelfile += '\n' self.modelfile += ' return self.model.forecast(X.values[-lag_order:],steps=no_of_prediction)' def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None): scorePrm = scoreParam cs.create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scorePrm) def save_model_deploy(self,outputfolder,modelname): #filename = outputfolder+modelname+'.py' filename = os.path.join(outputfolder,'script','trained_model.py') self.log.info('-------> Model File Location :'+filename) f = open(filename, "w",encoding="utf-8") f.write(str(self.modelfile)) f.close() def create_TextCleaner(self,outputfolder): profilerPath = os.path.join(outputfolder,'profiler') try: os.makedirs(profilerPath) except OSError: self.log.info("ProfilePath Folder Already Exists") try: textprofileFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','profiler','textDataProfiler.py') initFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','profiler','__init__.py') shutil.copy2(textprofileFileLocation,profilerPath) shutil.copy2(initFileLocation,profilerPath) ''' if(platform.system() == 'Windows'): shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'\\..\\profiler\\textDataProfiler.py',profilerPath) shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'\\..\\profiler\\__init__.py',profilerPath) else: shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'/../profiler/textDataProfiler.py',profilerPath) shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'/../profiler/__init__.py',profilerPath) ''' except OSError: self.log.info("Copy to Profiler Path Failed") def listToString(self,s): str1='[' for feature in s: if(str1 != '['): str1 += ',' str1 += '"'+feature+'"' str1+=']' return str1 def print_files(self): self.log.info(self.modelfile) def create_util_folder(self, deploy_path,learner_type): import tarfile ext_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..','utilities')) for x in os.listdir(ext_path): if x.endswith('.tar'): if x not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']: tarPackage = os.path.join(ext_path, x) my_tar = tarfile.open(tarPackage) my_tar.extractall(deploy_path) my_tar.close() else: if learner_type == 'RecommenderSystem': tarPackage = os.path.join(ext_path, x) my_tar = tarfile.open(tarPackage) my_tar.extractall(deploy_path) my_tar.close() def deploy_model(self,deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deploy_path,features,profiler,datalocation,output_label,column_merge_flag,textFeatures,numericalFeatures,nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,optimizationmethod,deployFolder,iterName,iterVersion,wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,no_of_prediction,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,config=None): try: serviceName = '{}{}{}'.format(iterName, '_' if iterVersion != '' else '', iterVersion) self.log.info('-------> Deploy Location :'+deploy_path) if production.is_supported(model_type.lower()): if learner_type == 'Text Similarity': coder = production.get_deployer(learner_type) coder.create_file(deploy_path, preprocessing_pipe, saved_model, firstDocFeature, secondDocFeature) elif model_type.lower() in ['classification', 'regression','clustering','timeseriesforecasting']: params = {} params['usecase_name']= iterName params['usecase_ver']= iterVersion params['features']={} params['features']['input_feat'] = config['profiler']['input_features'] params['features']['target_feat'] = targetFeature params['features']['text_feat'] = textFeatures params['paths']={} params['paths']['deploy'] = Path(deploy_path) params['paths']['usecase'] = params['paths']['deploy'].parent params['profiler']=config['profiler'] if 'code' in config.get('preprocess',{}).keys(): params['profiler']['preprocess']=config['preprocess'] params['selector']={} params['selector']['reducer']=True if pcaModel_pickle_file else False params['selector']['reducer_file']=pcaModel_pickle_file if pcaModel_pickle_file: params['selector']['input_features']=bpca_features params['selector']['output_features']=apca_features else: params['selector']['input_features']=config['profiler']['input_features'] params['selector']['output_features']=features params['training']={} params['training']['algo']= model params['training']['model_file']=saved_model if model_type.lower() == 'timeseriesforecasting': if params['training']['algo'] in ['LSTM','MLP','ENCODER_DECODER_LSTM_MVI_UVO']: params['training']['lag_order'] = int(lag_order) params['training']['scaler_file'] = Path(scalertransformationFile).name elif params['training']['algo'] == 'VAR': params['training']['dictDiffCount'] = dictDiffCount params['training']['no_of_prediction'] = no_of_prediction elif params['training']['algo'] == 'FBPROPHET': params['training']['sessonal_freq'] = sessonal_freq params['training']['additional_regressors'] = additional_regressors self.log.info(params) deployer = production.get_deployer(model_type.lower(), params=params) deployer.run( ) self.log.info('Status:- |... Model deployment files created') self.log.info('Status:- |... Model deployment completed') return else: # for output_formatter.py from prediction_package.output_formatter import outputFormatter outputObj = outputFormatter() outputObj.crate_output_format_file(deploy_path, learner_type, model_type, model, output_label, threshold, trained_data_file, dictDiffCount, targetFeature, features,datetimeFeature) #for aion_predict.py from prediction_package.aion_prediction import aionPrediction predictionObj = aionPrediction() # print(deploy_path) predictionObj.create_prediction_file(deploy_name, deploy_path, learner_type, grouperbyjson,rowfilterexpression,model_type,datetimeFeature) # for aion_service.py predictionObj.create_model_service(deploy_path, serviceName, model_type) # for aion_publish.py predictionObj.create_publish_service(usecaseLocation, iterName, iterVersion, model_type) if learner_type.lower()=="recommendersystem": # Task 11190--- #For recommender system from prediction_package.recommender_code import generate_recommender_code generate_recommender_code(deploy_path) return #self.create_TextCleaner(deploy_path) if(len(textFeatures) > 0): self.create_TextCleaner(deploy_path) self.include_import_file(learner_type,deployJson['method'],scoreParam, model_type,model) if((learner_type == 'TS' and model.lower() not in ['lstm','mlp','var']) or learner_type == 'RecommenderSystem'): features=[] self.create_class(deploy_name) if len(bpca_features) != 0: self.create_profiler_file(learner_type,deploy_path,profiler,bpca_features,numericToLabel_json,column_merge_flag,textFeatures,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder, model, config,datetimeFeature) else: self.create_profiler_file(learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,textFeatures,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder, model, config,datetimeFeature) self.create_selector_file(deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature,model_type, model,config) self.create_init_function_for_classification(saved_model,'classes',learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,model,model_type,imageconfig) except Exception as e: print(e) import traceback exception_type, exception_object, exception_traceback = sys.exc_info() filename = exception_traceback.tb_frame.f_code.co_filename line_number = exception_traceback.tb_lineno self.log.info("Exception type: ", exception_type) self.log.info("File name: ", filename) self.log.info("Line number: ", line_number) self.log.info("multivariate model build error traceback: \n"+str(traceback.print_exc())) raise Exception(e) #print(model) if(model.lower() == 'var'): self.log.info("Create Forecast Function") self.create_forcast(deployJson['method'],no_of_prediction) else: self.create_predict(learner_type,deployJson['method'],model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,features,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam) self.save_model_deploy(deploy_path,deploy_name) if(len(textFeatures) > 0): if model_type.lower() == 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': predictionObj.create_text_drift_file(deploy_path,textFeatures,targetFeature,model_type) if model_type.lower() == 'classification': predictionObj.create_classification_text_performance_file(deploy_path,textFeatures,targetFeature) elif model_type.lower() == 'regression': predictionObj.create_regression_text_performance_file(deploy_path,textFeatures,targetFeature) else: if model_type.lower() == 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': #task 11997 predictionObj.create_drift_file(deploy_path,features,targetFeature,model_type) if model_type.lower() == 'classification': predictionObj.create_classification_performance_file(deploy_path,features,targetFeature) elif model_type.lower() == 'regression': predictionObj.create_regression_performance_file(deploy_path,features,targetFeature) self.log.info('Status:- |... Model deployment files created') self.crate_readme_file(deploy_path,saved_model,features,deployJson['method']) from prediction_package.requirements import requirementfile requirementfile(deploy_path,model,textFeatures,learner_type) os.chdir(deploy_path) textdata = False if(learner_type == 'Text Similarity' or len(textFeatures) > 0): textdata = True self.create_util_folder(deploy_path,learner_type) self.log.info('Status:- |... Model deployment completed') def deployTSum(self,deploy_path,preTrainedModellocation): def create_predict(preTrainedModellocation): text = f""" import sys import json def predict(data): try: import pandas as pd import numpy as np from pathlib import Path keywordsFile =Path(__file__).parent/'data'/'keywordDataBase.csv' outputSumFile =Path(__file__).parent/'data'/'summarizedOutput.csv' fileName=data #print("fileName---",fileName) inputDataFileFrame = pd.DataFrame() inputDataFileFrame['Sentences']="" rowIndex=0 if fileName.endswith(".pdf"): from pypdf import PdfReader reader = PdfReader(fileName) number_of_pages = len(reader.pages) text="" textOutputForFile="" OrgTextOutputForFile="" for i in range(number_of_pages) : page = reader.pages[i] text1 = page.extract_text() text=text+text1 import nltk tokens = nltk.sent_tokenize(text) for sentence in tokens: sentence=sentence.replace("\\n", " ") if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) : continue inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip()) rowIndex=rowIndex+1 if fileName.endswith(".txt"): data=[] with open(fileName, "r",encoding="utf-8") as f: data.append(f.read()) str1 = "" for ele in data: str1 += ele sentences=str1.split(".") count=0 for sentence in sentences: count += 1 inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip()) rowIndex=rowIndex+1 inputDataFileFrame['LabelByKw']=0 #print(inputDataFileFrame) keywordsFileFrame=pd.read_csv(keywordsFile,encoding='utf-8') Keyword_list = keywordsFileFrame['Keyword'].tolist() for i in inputDataFileFrame.index: for x in Keyword_list: if (str(inputDataFileFrame["Sentences"][i])).lower().find(x) != -1: inputDataFileFrame['LabelByKw'][i]=1 break import pickle from sklearn.preprocessing import LabelEncoder pkl_filename='classificationModel.sav' pkl_filename =Path(__file__).parent/'model'/'classificationModel.sav' with open(pkl_filename, 'rb') as file: pickle_model = pickle.load(file) testsample=inputDataFileFrame[["Sentences"]] labelencoder = LabelEncoder() testsample["Sentences"] = labelencoder.fit_transform(testsample["Sentences"]) y_predicted = pickle_model.predict_proba(testsample) df=pd.DataFrame({{"SectionName":np.nan,"Sentences":np.nan, "Predicted_Prob":y_predicted[:,1]}}) df['LabelByModel']=df['Predicted_Prob'].apply(lambda x: 0 if x <= 0.5 else 1 ) inputDataFileFrame['LabelByModel']= df['LabelByModel'] textToSum="" for i in inputDataFileFrame.index: if (inputDataFileFrame['LabelByModel'][i] or inputDataFileFrame['LabelByKw'][i]) : textToSum=textToSum+" "+inputDataFileFrame["Sentences"][i] stdir=r"{preTrainedModellocation}" stdir = stdir.replace('\\\\', '\\\\\\\\') from transformers import AutoTokenizer, AutoModelForSeq2SeqLM modelbert = AutoModelForSeq2SeqLM.from_pretrained(stdir,local_files_only=True) tokenizer = AutoTokenizer.from_pretrained(stdir,local_files_only=True) inputs = tokenizer("summarize: " + textToSum, return_tensors="pt", max_length=512, truncation=True) outputs = modelbert.generate(inputs["input_ids"], max_length=512, min_length=140, length_penalty=2.0, num_beams=4, early_stopping=True) summarizedOutputOfSection= tokenizer.decode(outputs[0]) summarizedOutputOfSection=summarizedOutputOfSection.replace("</s>","") summarizedOutputOfSection=summarizedOutputOfSection.replace("<s>","") sumDatadata = [summarizedOutputOfSection] df = pd.DataFrame(sumDatadata, columns=['textSum']) df.to_csv(outputSumFile,encoding='utf-8') outputjson = {{"status":"SUCCESS","msg":"Press Download button to download summarized output","data":summarizedOutputOfSection}} print("predictions:",json.dumps(outputjson)) return (json.dumps(outputjson)) except KeyError as e: output = {{"status":"FAIL","message":str(e).strip('"')}} print("predictions:",json.dumps(output)) return (json.dumps(output)) except Exception as e: output = {{"status":"FAIL","message":str(e).strip('"')}} print("predictions:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = predict(sys.argv[1]) """ return text deploy_path = Path(deploy_path) aion_prediction = deploy_path/'aion_predict.py' with open(aion_prediction, 'w') as f: f.write(create_predict(preTrainedModellocation))
recommender_code.py
#task 11190: Item based Recommender system---Usnish import os def generate_recommender_code(deployPath): code = """ import pandas as pd import numpy as np import os ITEMID = 'itemId' DATA_FOLDER = 'data' USER_ITEM_MATRIX = 'user_item_matrix.csv' ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv' RATING = 'rating' SIMILARITY_SCORE = 'similarity_score' class collaborative_filter(object): def __init__(self): self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0) self.matrix.index.name = ITEMID self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX)) self.item_similarity_cosine.index.name = ITEMID self.item_similarity_cosine.columns.name = ITEMID def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5): import operator if not isinstance(picked_userid,str): picked_userid = str(picked_userid) if picked_userid not in self.matrix.columns: raise KeyError("UserID Does Not Exist") # Movies that the target user has not watched try: picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index() picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist() # Movies that the target user has watched picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \ .sort_values(ascending=False)) \ .reset_index() \ .rename(columns={picked_userid: 'rating'}) # Dictionary to save the unwatched movie and predicted rating pair rating_prediction = {} # Loop through unwatched movies for picked_movie in picked_userid_unwatched: if not isinstance(picked_movie,str): picked_movie = str(picked_movie) # Calculate the similarity score of the picked movie with other movies try: picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename( columns={picked_movie: SIMILARITY_SCORE}) # Rank the similarities between the picked user watched movie and the picked unwatched movie. picked_userid_watched_similarity = pd.merge(left=picked_userid_watched, right=picked_movie_similarity_score, on=ITEMID, how='inner') \ .sort_values(SIMILARITY_SCORE, ascending=False)[ :number_of_similar_items] # Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user try: predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6) except Exception as e: predicted_rating = 0 # Save the predicted rating in the dictionary rating_prediction[picked_movie] = predicted_rating except Exception as e: rating_prediction[picked_movie] = 0 # Return the top recommended movies return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations] except Exception as e: print(e) raise KeyError(str(e)) def predict(self,X): predictions = [] for index,row in X.iterrows(): score = self.item_based_rec(int(row["uid"]),int(row["numberOfRecommendation"])) df = pd.DataFrame(score,columns=['ItemId','Ratings']) predictions.append(df) return predictions""" filename = os.path.join(deployPath, 'script', 'item_recommendation.py') # print(deploy_path) f = open(filename, "wb") f.write(str(code).encode('utf8')) f.close()
aion_prediction.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import json import shutil import logging class aionPrediction: def __init__(self): self.log = logging.getLogger('eion') def create_optimus_prediction_file (self,classname,deploy_path,learner_type): self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from pandas import json_normalize' self.predictionFile += '\n' self.predictionFile += 'from importlib import import_module' self.predictionFile += '\n' self.predictionFile += 'import importlib.util' self.predictionFile += '\n' self.predictionFile += 'class prediction:' self.predictionFile += '\n' self.predictionFile += ' def predict_from_json(self,json_data):' self.predictionFile += '\n' self.predictionFile += ' data = json.loads(json_data)' self.predictionFile += '\n' self.predictionFile += ' output=self.predict(data)' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",output)' self.predictionFile += '\n' self.predictionFile += '\n' self.predictionFile += ' def predict_from_file(self,filename):' self.predictionFile += '\n' self.predictionFile += ' with open(filename,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' data = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' output=self.predict(data)' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",output)' self.predictionFile += '\n' self.predictionFile += '\n' self.predictionFile += ' def predict(self,json_data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' #self.predictionFile += ' jsonData = json.loads(json_data)' self.predictionFile += ' jsonData=json_data' self.predictionFile += '\n' self.predictionFile += ' model_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/trained_model.py")' self.predictionFile += '\n' self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)' self.predictionFile += '\n' self.predictionFile += ' model_obj.loader.exec_module(model)' self.predictionFile += '\n' #if(learner_type != 'TextML'): self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/inputprofiler.py")' self.predictionFile += '\n' self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)' self.predictionFile += '\n' self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)' self.predictionFile += '\n' self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/selector.py")' self.predictionFile += '\n' self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)' self.predictionFile += '\n' self.predictionFile += ' selector_obj.loader.exec_module(selector)' self.predictionFile += '\n' self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/output_format.py")' self.predictionFile += '\n' self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)' self.predictionFile += '\n' self.predictionFile += ' output_format_obj.loader.exec_module(output_format)' self.predictionFile += '\n' self.predictionFile += ' df = json_normalize(jsonData)' self.predictionFile += '\n' self.predictionFile += ' df0 = df.copy()' self.predictionFile += '\n' #if(learner_type != 'TextML'): self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()' self.predictionFile += '\n' self.predictionFile += ' df = profilerobj.apply_profiler(df)' self.predictionFile += '\n' self.predictionFile += ' selectobj = selector.selector()' self.predictionFile += '\n' self.predictionFile += ' df = selectobj.apply_selector(df)' self.predictionFile += '\n' self.predictionFile += ' output = model.trained_model().predict(df,"")' self.predictionFile += '\n' self.predictionFile += ' outputobj = output_format.output_format()' self.predictionFile += '\n' self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' #self.predictionFile += '\n' #self.predictionFile += ' print(output)' self.predictionFile += '\n' self.predictionFile += ' return output' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' return json.dumps(output)' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' return json.dumps(output)' self.predictionFile += '\n' self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' predictobj = prediction()' self.predictionFile += '\n' self.predictionFile += ' predictobj.predict_from_file(sys.argv[1])' self.predictionFile += '\n' filename = os.path.join(deploy_path,'prediction.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_text_drift_file(self,deploy_path,features,target,model_type): #task-14549 self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from monitoring import check_drift' self.predictionFile += '\n' self.predictionFile += 'def drift(data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".json":' self.predictionFile += '\n' self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.loads(data)' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'features\'] = \''+",".join([feature for feature in features])+'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'target\'] = \''+target+'\'' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += ' htmlfilepath=evidently_details(jsonData)' self.predictionFile += '\n' else: self.predictionFile += ' htmlfilepath=\'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.dumps(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = check_drift(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = json.loads(output)' self.predictionFile += '\n' self.predictionFile += ' output[\'htmlPath\'] = str(htmlfilepath)' self.predictionFile += '\n' self.predictionFile += ' print("drift:", json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return(output)' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += 'def evidently_details(deployJson):' self.predictionFile += '\n' self.predictionFile += ' features = deployJson[\'features\'].split(\',\')' self.predictionFile += '\n' self.predictionFile += ' target = deployJson[\'target\']' self.predictionFile += '\n' self.predictionFile += """\ try: from evidently.report import Report from evidently.metrics import TextDescriptorsDriftMetric, ColumnDriftMetric from evidently.pipeline.column_mapping import ColumnMapping from sklearn.preprocessing import LabelEncoder historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?']) currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?']) historicaldataFrame.columns = historicaldataFrame.columns.str.strip() currentdataFrame.columns = currentdataFrame.columns.str.strip() hdf = historicaldataFrame.dropna(subset=features) cdf = currentdataFrame.dropna(subset=features) hdf['Text_Features'] = hdf[features].apply("-".join, axis=1) cdf['Text_Features'] = cdf[features].apply("-".join, axis=1) hdf['target'] = historicaldataFrame[target] cdf['target'] = currentdataFrame[target] le = LabelEncoder() le.fit(hdf['target']) hdf['target'] = le.transform(hdf['target']) le.fit(cdf['target']) cdf['target'] = le.transform(cdf['target']) hd = hdf[['Text_Features', 'target']] cd = cdf[['Text_Features', 'target']] column_mapping = ColumnMapping() column_mapping.target = 'target' column_mapping.prediction = 'target' column_mapping.text_features = ['Text_Features'] column_mapping.numerical_features = [] column_mapping.categorical_features = [] performance_report = Report(metrics=[ColumnDriftMetric('target'),TextDescriptorsDriftMetric(column_name='Text_Features')]) performance_report.run(reference_data=hd, current_data=cd,column_mapping=column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),"log","My_report.html") performance_report.save_html(report) return(report) except Exception as e: print('Error: ', e) return('NA')""" self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' output = drift(sys.argv[1])' filename = os.path.join(deploy_path,'aion_ipdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_drift_file(self,deploy_path,features,target,model_type): self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from monitoring import check_drift' self.predictionFile += '\n' self.predictionFile += 'from pandas import json_normalize' self.predictionFile += '\n' self.predictionFile += 'from script.inputprofiler import inputprofiler' self.predictionFile += '\n' self.predictionFile += 'def drift(data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".json":' self.predictionFile += '\n' self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.loads(data)' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'features\'] = \''+",".join([feature for feature in features])+'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'target\'] = \''+target+'\'' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += ' htmlfilepath=evidently_details(jsonData)' self.predictionFile += '\n' else: self.predictionFile += ' htmlfilepath=\'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.dumps(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = check_drift(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = json.loads(output)' self.predictionFile += '\n' self.predictionFile += ' output[\'htmlPath\'] = str(htmlfilepath)' self.predictionFile += '\n' self.predictionFile += ' output = json.dumps(output)' self.predictionFile += '\n' self.predictionFile += ' print("drift:",output)' self.predictionFile += '\n' self.predictionFile += ' return(output)' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += 'def evidently_details(deployJson):' self.predictionFile += '\n' self.predictionFile += ' features = deployJson[\'features\'].split(\',\')' self.predictionFile += '\n' self.predictionFile += ' target = deployJson[\'target\']' self.predictionFile += '\n' self.predictionFile += """\ try: from evidently.report import Report from evidently.metric_preset import DataDriftPreset historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?']) currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?']) historicaldataFrame.columns = historicaldataFrame.columns.str.strip() currentdataFrame.columns = currentdataFrame.columns.str.strip() profilerobj = inputprofiler() historicaldataFramep = profilerobj.run(historicaldataFrame) currentdataFramep = profilerobj.run(currentdataFrame) hdf = historicaldataFramep[features] cdf = currentdataFramep[features] hdf['target'] = historicaldataFrame[target] cdf['target'] = currentdataFrame[target] data_drift_report = Report(metrics = [DataDriftPreset()]) data_drift_report.run(reference_data=hdf,current_data=cdf,column_mapping = None) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','my_report.html') data_drift_report.save_html(report) return(report) except Exception as e: print('Error') return('NA')""" self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' output = drift(sys.argv[1])' filename = os.path.join(deploy_path,'aion_ipdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_prediction_file(self,classname,deploy_path,learner_type,grouperbyjson,rowfilterexpression,model_type,datetimeFeature): self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from pandas import json_normalize' self.predictionFile += '\n' if(learner_type.lower() != 'recommendersystem'): #task 11190 self.predictionFile += 'from script.selector import selector' self.predictionFile += '\n' self.predictionFile += 'from script.inputprofiler import inputprofiler' self.predictionFile += '\n' #self.predictionFile += 'from '+classname+' import '+classname self.predictionFile += 'from script.trained_model import trained_model' self.predictionFile += '\n' else: self.predictionFile += 'from script.item_recommendation import collaborative_filter' self.predictionFile += '\n' self.predictionFile += 'from script.output_format import output_format' self.predictionFile += '\n' if (learner_type != 'RecommenderSystem'): #task 11190 self.predictionFile += 'profilerobj = inputprofiler()' self.predictionFile += '\n' self.predictionFile += 'selectobj = selector()' self.predictionFile += '\n' self.predictionFile += 'modelobj = trained_model()' self.predictionFile += '\n' else: self.predictionFile += 'colabobj = collaborative_filter()' self.predictionFile += '\n' self.predictionFile += 'outputobj = output_format()' self.predictionFile += '\n' self.predictionFile += 'def predict(data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".tsv":' self.predictionFile += '\n' self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',sep=\'\\t\',skipinitialspace = True,na_values=[\'-\',\'?\'])' self.predictionFile += '\n' self.predictionFile += ' elif os.path.splitext(data)[1] == ".csv":' self.predictionFile += '\n' self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',skipinitialspace = True,na_values=[\'-\',\'?\'])' self.predictionFile += '\n' self.predictionFile += ' elif os.path.splitext(data)[1] == ".dat":' self.predictionFile += '\n' self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',skipinitialspace = True,na_values=[\'-\',\'?\'])' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".json":' self.predictionFile += '\n' self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.loads(data)' self.predictionFile += '\n' self.predictionFile += ' df = json_normalize(jsonData)' self.predictionFile += '\n' self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)' self.predictionFile += '\n' if str(rowfilterexpression) != '': self.predictionFile += ' filterexpression = "'+rowfilterexpression+'"' self.predictionFile += '\n' self.predictionFile += ' df = df.query(filterexpression)' self.predictionFile += '\n' #print(grouperbyjson) if str(grouperbyjson) != '': datetime = grouperbyjson['datetime'] unit = grouperbyjson['unit'] if unit == '': self.predictionFile += ' df[\'date\'] = pd.to_datetime(df[\''+datetime+'\'])' self.predictionFile += '\n' else: self.predictionFile += ' df[\'date\'] = pd.to_datetime(df[\''+datetime+'\'],unit=\''+unit+'\')' self.predictionFile += '\n' self.predictionFile += ' df = df.reset_index()' self.predictionFile += '\n' self.predictionFile += ' df.set_index(\'date\',inplace=True)' self.predictionFile += '\n' self.predictionFile += ' df = df.'+grouperbyjson['groupbystring'] self.predictionFile += '\n' self.predictionFile += ' df.columns = df.columns.droplevel(0)' self.predictionFile += '\n' self.predictionFile += ' df = df.reset_index()' self.predictionFile += '\n' self.predictionFile += ' df0 = df.copy()' self.predictionFile += '\n' if(learner_type != 'RecommenderSystem'): #task 11190 if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)' self.predictionFile += '\n' else: self.predictionFile += ' df = profilerobj.apply_profiler(df)' self.predictionFile += '\n' self.predictionFile += ' df = selectobj.apply_selector(df)' self.predictionFile += '\n' #self.predictionFile += ' modelobj = '+classname+'()' self.predictionFile += ' output = modelobj.predict(df,"")' self.predictionFile += '\n' else: self.predictionFile += ' output = colabobj.predict(df)' self.predictionFile += '\n' if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)' self.predictionFile += '\n' else: self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",output)' self.predictionFile += '\n' self.predictionFile += ' return(output)' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' output = predict(sys.argv[1])' filename = os.path.join(deploy_path,'aion_predict.py') f = open(filename, "w") f.write(str(self.predictionFile)) f.close() def create_classification_text_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize # from evidently.dashboard import Dashboard # from evidently.tabs import ClassificationPerformanceTab from evidently.pipeline.column_mapping import ColumnMapping from aion_predict import predict from evidently.report import Report from evidently.pipeline.column_mapping import ColumnMapping from evidently.metric_preset import ClassificationPreset def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.text_features = features.split(',') iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()]) iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') iris_model_performance_dashboard.save_html(report) metrics_output = iris_model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_classification_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize from evidently.report import Report from evidently.metric_preset import ClassificationPreset from evidently.pipeline.column_mapping import ColumnMapping from aion_predict import predict def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.numerical_features = features.split(',') model_performance_dashboard = Report(metrics = [ClassificationPreset()]) model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') model_performance_dashboard.save_html(report) metrics_output = model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) else: output = {"status":"SUCCESS","htmlPath":'NA'} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_model_service(self,deploy_path,serviceName,problemType): filedata = """ from flask import Flask, jsonify, request from flask_restful import Resource, Api from aion_predict import predict""" if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ from aion_xai import local_analysis from aion_ipdrift import drift from aion_opdrift import odrift""" filedata += """ import json import os import pandas as pd import io import argparse from pathlib import Path from flask_cors import CORS, cross_origin app = Flask(__name__) #cross origin resource from system arguments parser = argparse.ArgumentParser() parser.add_argument('-ip', '--ipaddress', help='IP Address') parser.add_argument('-p', '--port', help='Port Number') parser.add_argument("-cors", type=str, required=False) d = vars(parser.parse_args()) modelPath = Path(__file__).parent try: with open( (modelPath/'etc')/'display.json', 'r') as f: disp_data = json.load(f) is_explainable = not disp_data.get('textFeatures') except: disp_data = {} is_explainable = True if "cors" in d.keys(): if d["cors"] != '' and d["cors"] != None: d["cors"] = [s.strip() for s in d["cors"].split(",")] #cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}}) cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}}) api = Api(app) class predictapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): data = request.get_json() output = predict().run(json.dumps(data)) return jsonify(json.loads(output)) class predictfileapi(Resource): def post(self): if 'file' in request.files: file = request.files['file'] urlData = file.read() rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) data = rawData.to_json(orient='records') output = predict().run(data) return jsonify(json.loads(output)) else: displaymsg='File is mising' return jsonify(displaymsg) def get(self): msg=\""" RequestType: POST Body:send file content in body\""" return jsonify(msg) """ if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ class explainapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): data = request.get_json() if is_explainable: output = local_analysis(json.dumps(data)) else: output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"}) return jsonify(json.loads(output)) class monitoringapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): data = request.get_json() output = drift(json.dumps(data)) return jsonify(json.loads(output)) class performanceapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): data = request.get_json() output = odrift(json.dumps(data)) return jsonify(json.loads(output)) """ filedata += """ api.add_resource(predictapi, '/AION/{serviceName}/predict')""".format(serviceName=serviceName) filedata += """ api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')""".format(serviceName=serviceName) if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ api.add_resource(explainapi, '/AION/{serviceName}/explain') api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') api.add_resource(performanceapi, '/AION/{serviceName}/performance')""".format(serviceName=serviceName) filedata += """ if __name__ == '__main__': args = parser.parse_args() app.run(args.ipaddress,port = args.port,debug = True)""" filename = os.path.join(deploy_path,'aion_service.py') f = open(filename, "wb") f.write(str(filedata).encode('utf8')) f.close() def create_regression_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize from evidently.report import Report from evidently.metric_preset import RegressionPreset from evidently.pipeline.column_mapping import ColumnMapping from aion_predict import predict def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.numerical_features = features.split(',') iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') iris_model_performance_dashboard.save_html(report) metrics_output = iris_model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) else: output = {"status":"SUCCESS","htmlPath":'NA'} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_regression_text_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize from aion_predict import predict from evidently.report import Report from evidently.pipeline.column_mapping import ColumnMapping from evidently.metric_preset import RegressionPreset def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.numerical_features = features.split(',') iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') iris_model_performance_dashboard.save_html(report) metrics_output = iris_model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) else: output = {"status":"SUCCESS","htmlPath":'NA'} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_publish_service(self,datalocation,usecaseid,version,problemType): filename = os.path.join(datalocation,'aion_publish_service.py') if not os.path.exists(filename): filedata = """ import sys import json import time import sqlite3 import argparse import pandas as pd import io from pathlib import Path from datetime import datetime filename = Path(__file__).parent/'config.json' with open (filename, "r") as f: data = json.loads(f.read()) modelVersion = str(data['version']) modelPath = Path(__file__).parent/modelVersion sys.path.append(str(modelPath)) try: with open( (modelPath/'etc')/'display.json', 'r') as f: disp_data = json.load(f) is_explainable = not disp_data.get('textFeatures') except: disp_data = {} is_explainable = True from flask import Flask, jsonify, request from flask_restful import Resource, Api from flask_cors import CORS, cross_origin from flask import Response from aion_predict import predict """ if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ from aion_ipdrift import drift from aion_opdrift import odrift if is_explainable: from aion_xai import local_analysis """ filedata += """ dataPath = Path(__file__).parent/'data' dataPath.mkdir(parents=True, exist_ok=True) app = Flask(__name__) #cross origin resource from system arguments parser = argparse.ArgumentParser() parser.add_argument('-ip', '--ipaddress', help='IP Address') parser.add_argument('-p', '--port', help='Port Number') parser.add_argument("-cors", type=str, required=False) d = vars(parser.parse_args()) if "cors" in d.keys(): if d["cors"] != '' and d["cors"] != None: d["cors"] = [s.strip() for s in d["cors"].split(",")] #cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}}) cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}}) api = Api(app) class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem + '.db' db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() self.tables = [] def table_exists(self, name): if name in self.tables: return True elif name: query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() if len(listOfTables) > 0 : self.tables.append(name) return True return False def read(self, table_name,condition=''): if condition == '': return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) else: return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def update(self,table_name,updates,condition): update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' self.cursor.execute(update_query) self.conn.commit() return True def write(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def delete(self, name): pass def close(self): self.conn.close()""" filedata += """ app = Flask(__name__) api = Api(app) class predictapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('metrices'): data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) data = request.get_json() output = predict().run(json.dumps(data)) outputobj = json.loads(output) if outputobj['status'] == 'SUCCESS': try: df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') if not sqlite_dbObj.table_exists('prodData'): sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) sqlite_dbObj.write(df2,'prodData') except: pass try: data = sqlite_dbObj.read('metrices') #print(data) if len(data) == 0: data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}] data = pd.read_json(json.dumps(data), orient ='records') sqlite_dbObj.write(data,'metrices') else: noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0") except Exception as e: print(e) pass return jsonify(json.loads(output)) class predictfileapi(Resource): def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('metrices'): data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) if 'file' in request.files: file = request.files['file'] urlData = file.read() rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) data = rawData.to_json(orient='records') output = predict().run(data) outputobj = json.loads(output) if outputobj['status'] == 'SUCCESS': try: df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') if not sqlite_dbObj.table_exists('prodData'): sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) sqlite_dbObj.write(df2,'prodData') except: pass try: data = sqlite_dbObj.read('metrices') #print(data) if len(data) == 0: data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}] data = pd.read_json(json.dumps(data), orient ='records') sqlite_dbObj.write(data,'metrices') else: noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0") except Exception as e: print(e) pass return jsonify(json.loads(output)) else: output = {'status':'error','msg':'File is missing'} return jsonify(output) """ if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ class explainapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): data = request.get_json() if is_explainable: output = local_analysis(json.dumps(data)) else: output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"}) return jsonify(json.loads(output)) class monitoringapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('monitoring'): data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' if not sqlite_dbObj.table_exists('prodData'): return jsonify({'status':'Error','msg':'Prod data not available'}) data = sqlite_dbObj.read('prodData') filetimestamp = str(int(time.time())) dataFile = dataPath/('AION_' + filetimestamp+'.csv') data.to_csv(dataFile, index=False) data = request.get_json() data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} output = drift(json.dumps(data)) outputData = json.loads(output) status = outputData['status'] if status == 'SUCCESS': Msg = str(outputData['data']) else: Msg = 'Error during drift analysis' now = datetime.now() # current date and time date_time = now.strftime("%m/%d/%Y, %H:%M:%S") data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.write(data,'monitoring') return jsonify(json.loads(output))""" filedata += """ class matricesapi(Resource): def get(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if sqlite_dbObj.table_exists('metrices'): df1 = sqlite_dbObj.read('metrices') else: df1 = pd.DataFrame() #print(df1) if sqlite_dbObj.table_exists('monitoring'): df2 = sqlite_dbObj.read('monitoring') else: df2 = pd.DataFrame() msg = {'Deployed Version':str(modelVersion)} if df1.shape[0] > 0: msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])}) else: msg.update({'noOfPredictCalls':'0'}) driftDetails = [] for idx in reversed(df2.index): driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])} driftDetails.append(driftd) msg.update({'driftDetails':driftDetails}) return jsonify(msg) class performanceapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('monitoring'): data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' if not sqlite_dbObj.table_exists('prodData'): return jsonify({'status':'Error','msg':'Prod data not available'}) data = sqlite_dbObj.read('prodData') filetimestamp = str(int(time.time())) dataFile = dataPath/('AION_' + filetimestamp+'.csv') data.to_csv(dataFile, index=False) data = request.get_json() data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} output = odrift(json.dumps(data)) return jsonify(json.loads(output)) """ filedata += """ api.add_resource(predictapi, '/AION/{serviceName}/predict') api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file') api.add_resource(matricesapi, '/AION/{serviceName}/metrices')""".format(serviceName=usecaseid) if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ api.add_resource(explainapi, '/AION/{serviceName}/explain') api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') api.add_resource(performanceapi, '/AION/{serviceName}/performance') """.format(serviceName=usecaseid) filedata += """ if __name__ == '__main__': args = parser.parse_args() app.run(args.ipaddress,port = args.port,debug = True)""" f = open(filename, "wb") f.write(str(filedata).encode('utf8')) f.close() data = {'version':version} filename = os.path.join(datalocation,'config.json') with open(filename, "w") as outfile: json.dump(data, outfile) outfile.close()
utility.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' TAB_CHAR = ' ' * 4 def import_modules(importer, modules_list): for module in modules_list: mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
imports.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from importlib.metadata import version import sys class importModule(): def __init__(self): self.importModule = {} self.stdlibModule = [] self.localModule = {} def addLocalModule(self,module, mod_from=None, mod_as=None): if module == '*': if module not in self.localModule.keys(): self.localModule[module]= [mod_from] else: self.localModule[module].append(mod_from) elif module not in self.localModule.keys(): self.localModule[module] = {'from':mod_from, 'as':mod_as} def addModule(self, module, mod_from=None, mod_as=None): if module not in self.importModule.keys(): self.importModule[module] = {'from':mod_from, 'as':mod_as} if module in sys.stdlib_module_names: self.stdlibModule.append(module) elif isinstance(self.importModule[module], list): if mod_as not in [x['as'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as not in [x['from'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as != self.importModule[module]['as']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list elif mod_from != self.importModule[module]['from']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list def getModules(self): return (self.importModule, self.stdlibModule) def getBaseModule(self, extra_importers=[]): modules_alias = { 'sklearn':'scikit-learn', 'genetic_selection':'sklearn-genetic', 'google': 'google-cloud-storage', 'azure':'azure-storage-file-datalake'} local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} modules = [] require = "" if extra_importers: extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] importers_module = [self.importModule] + extra_importers for importer_module in importers_module: for k,v in importer_module.items(): if v['from']: mod = v['from'].split('.')[0] else: mod = k if mod in modules_alias.keys(): mod = modules_alias[mod] modules.append(mod) modules = list(set(modules)) for mod in modules: try: if mod in local_modules.keys(): require += f"{local_modules[mod]}\n" else: require += f"{mod}=={version(mod)}\n" except : if mod not in sys.stdlib_module_names: raise return require def getCode(self): def to_string(k, v): mod = '' if v['from']: mod += 'from {} '.format(v['from']) mod += 'import {}'.format(k) if v['as']: mod += ' as {} '.format(v['as']) return mod modules = "" local_modules = "" std_lib_modules = "" third_party_modules = "" for k,v in self.importModule.items(): if k in self.stdlibModule: std_lib_modules = std_lib_modules + '\n' + to_string(k, v) elif isinstance(v, dict): third_party_modules = third_party_modules + '\n' + to_string(k, v) elif isinstance(v, list): for alias in v: third_party_modules = third_party_modules + '\n' + to_string(k, alias) for k,v in self.localModule.items(): if k != '*': local_modules = local_modules + '\n' + to_string(k, v) else: for mod_from in v: local_modules = local_modules + '\n' + f'from {mod_from} import {k}' if std_lib_modules: modules = modules + "\n#Standard Library modules" + std_lib_modules if third_party_modules: modules = modules + "\n\n#Third Party modules" + third_party_modules if local_modules: modules = modules + "\n\n#local modules" + local_modules + '\n' return modules def copyCode(self, importer): self.importModule, self.stdlibModule = importer.getModules()
EncryptPythonSourceCode.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import subprocess import os import glob import sys import python_minifier def encrypt_files(path): cwd = os.getcwd() secure_path = os.path.join(path,'SecuredScripts') try: if not os.path.exists(secure_path): os.mkdir(secure_path) files = [f for f in glob.glob(path + "/*.py")] for file in files: #encrypted_file_details[0] = file #file = files[0] #print(file) #filename_w_dir = os.path.splitext(file) filename_w_ext = os.path.basename(file) filename, file_extension = os.path.splitext(filename_w_ext) file_folder_path = os.path.join(secure_path,filename) #print(file_folder_path) if not os.path.exists(file_folder_path): os.mkdir(file_folder_path) # Minify python source code minify_file = os.path.join(file_folder_path,filename+'_minify.py') pythonfolder,_ = os.path.split(sys.executable) pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe') minify_command = "\""+sys.executable+"\" \""+pyminify_script+ "\" \"" + file + "\" > \"" + minify_file+"\"" subprocess.call(minify_command, shell=True) # Change directory to folder path os.chdir(file_folder_path) # Obfuscate minified file pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe') obfusc_commmand = "\""+sys.executable+"\" \""+pyarmor_script+"\" obfuscate \"" + minify_file+"\"" #print(obfusc_commmand) subprocess.call(obfusc_commmand, shell=True) # Change directory to dist path obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py') #print(obfusc_file) chdirpath = os.path.join(file_folder_path,'dist') os.chdir(chdirpath) # Compress obfuscated file compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py') #print(compressed_file) pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe') compress_command = "\""+sys.executable+"\" \""+pyminifier_script+"\" --gzip -o \"" +compressed_file + "\" \"" + obfusc_file+"\"" #print(compress_command) subprocess.call(compress_command, shell=True) #compile_command = sys.executable+'-m py_compile "' + compressed_file+'"' #print(compile_command) #subprocess.call(compile_command , shell=True) #encrypted_file_details['compiled_file'] = file #compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc') #encrypted_file_details[1] = compiled_file #encrypted_file_list.append(encrypted_file_details) #encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc' #encrypt_command = "python " + cwd + "\\Encrypt_Key_Dcrypt.py " + compiled_file + ' ' + encrypted_file + " --g -e" #print(encrypt_command) #subprocess.call(encrypt_command, shell=True) #encrypted_file_list += ']' #return(encrypted_file_list) os.chdir(path) except OSError as err: print ("Creation of the directory %s failed "+str(err)) # Driver function if __name__=="__main__": path = sys.argv[1] encrypt_files(path) #(base) C:\Himanshu\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py #Obfuscate #(base) C:\Himanshu\DataPreprocessing>pyarmor obfuscate C:\Himanshu\DataPreprocessing\DataPreprocessing_minify.py #Compression #(base) C:\Himanshu\DataPreprocessing>pyminifier --gzip -o C:\Himanshu\DataPreprocessing\dist\DataPreprocessing_compressed.py C:\Himanshu\DataPreprocessing\dist\DataPreprocessing_minify.py #(base) C:\Himanshu\DataPreprocessing>cd dist #(base) C:\Himanshu\DataPreprocessing\dist>python DataPreprocessing_compressed.py "DocumentText" "Label" 90 ".csv" "C:\Himanshu\DataAcquisition\ClassificationDataNewBalanced.csv" #Compiling compressed .py to .pyc file #(base) C:\Himanshu\DataPreprocessing\dist>python -m py_compile DataPreprocessing_compressed.py #Encrypt .pyc file #(base) C:\Himanshu\DataPreprocessing\dist>python C:\Himanshu\Encrypt_Key_Dcrypt.py C:\Himanshu\DataPreprocessing\dist\__pycache__\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e #Decrypt file #(base) C:\Himanshu\DataPreprocessing\dist>python C:\Himanshu\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d #Run decrypted file #(base) C:\Himanshu\DataPreprocessing\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc "DocumentText" "Label" 90 ".csv" "C:\Himanshu\DataAcquisition\ClassificationDataNewBalanced.csv"
create_docker.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import shutil import subprocess from os.path import expanduser import platform deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target') modelname='AION_12' version='1' def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata): modelPath = os.path.join(deploymentfolder) filename = os.path.join(deploymentfolder,'docker_image') modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py') shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh') aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl') drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl') sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages') model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version) docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version) model_sitepackage = os.path.join(model_dockerSetup,'site-packages') model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py') model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh') model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl') model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl') try: os.mkdir(model_dockerSetup) except Exception as e: print("Error in creating Setup directpry "+str(e)) pass shutil.copytree(modelPath, docker_setup) if textdata: shutil.copytree(sitepackage, model_sitepackage) modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing') ''' try: os.makedirs(modelpretrainpath, exist_ok=True) except Exception as e: print("Error in creating Setup directpry "+str(e)) pass ''' home = expanduser("~") if platform.system() == 'Windows': hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing') else: hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing') shutil.copytree(hostpretrainpath, modelpretrainpath) shutil.copyfile(modelservice, model_dockerSetupservicefile) shutil.copyfile(shellscript, model_dockershellscript) shutil.copyfile(aix, model_aix) shutil.copyfile(drift,model_drift) try: os.mkdir(filename) except: pass requirementfilename = os.path.join(model_dockerSetup,'requirements.txt') installfilename = os.path.join(model_dockerSetup,'install.py') dockerfile = os.path.join(model_dockerSetup,'Dockerfile') dockerdata='FROM python:3.8-slim-buster' dockerdata+='\n' if textdata: dockerdata+='WORKDIR /root' dockerdata+='\n' dockerdata+='COPY HCLT HCLT' dockerdata+='\n' dockerdata+='WORKDIR /app' dockerdata+='\n' dockerdata+='COPY requirements.txt requirements.txt' dockerdata+='\n' dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version dockerdata+='\n' if textdata: dockerdata+='COPY site-packages site-packages' dockerdata+='\n' dockerdata+='COPY install.py install.py' dockerdata+='\n' dockerdata+='COPY run_modelService.py run_modelService.py' dockerdata+='\n' dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl' dockerdata+='\n' dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl' dockerdata+='\n' dockerdata+='COPY start_modelservice.sh start_modelservice.sh' dockerdata+='\n' if textdata: dockerdata+='''RUN apt-get update \ && apt-get install -y build-essential manpages-dev \ && python -m pip install --no-cache-dir --upgrade pip \ && python -m pip install --no-cache-dir pandas==1.2.4 \ && python -m pip install --no-cache-dir numpy==1.19.5 \ && python -m pip install --no-cache-dir joblib==1.0.1 \ && python -m pip install --no-cache-dir Cython==0.29.23 \ && mv site-packages/* /usr/local/lib/python3.8/site-packages \ && python -m pip install --no-cache-dir scipy==1.6.3 \ && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir scikit-learn==0.24.2 \ && python -m pip install --no-cache-dir spacy==2.2.3 \ && python -m pip install --no-cache-dir nltk==3.6.2 \ && python -m pip install --no-cache-dir textblob==0.15.3 \ && python -m pip install --no-cache-dir gensim==3.8.3 \ && python -m pip install --no-cache-dir demoji==1.1.0 \ && python -m pip install --no-cache-dir lxml==4.6.3 \ && python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \ && python -m pip install --no-cache-dir Unidecode==1.2.0 \ && python -m pip install --no-cache-dir pyspellchecker==0.6.2 \ && python -m pip install --no-cache-dir pycontractions==2.0.1 \ && python -m pip install --no-cache-dir tensorflow==2.4.1 \ && python -m pip install --no-cache-dir nltk==3.6.2 \ && python -m pip install --no-cache-dir -r requirements.txt \ && python install.py \ && chmod +x start_modelservice.sh ENTRYPOINT ["./start_modelservice.sh"] ''' else: dockerdata+='''RUN apt-get update \ && apt-get install -y build-essential manpages-dev \ && python -m pip install --no-cache-dir --upgrade pip \ && python -m pip install --no-cache-dir pandas==1.2.4 \ && python -m pip install --no-cache-dir numpy==1.19.5 \ && python -m pip install --no-cache-dir joblib==1.0.1 \ && python -m pip install --no-cache-dir Cython==0.29.23 \ && python -m pip install --no-cache-dir scipy==1.6.3 \ && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir scikit-learn==0.24.2 \ && python -m pip install --no-cache-dir -r requirements.txt \ && chmod +x start_modelservice.sh ENTRYPOINT ["./start_modelservice.sh"] ''' f = open(dockerfile, "w") f.write(str(dockerdata)) f.close() requirementdata='' requirementdata+='word2number==1.1' if learner_type == 'DL': requirementdata+='\n' requirementdata+='tensorflow==2.5.0' f = open(requirementfilename, "w") f.write(str(requirementdata)) f.close() if textdata: installfile=''' import nltk import ssl try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context nltk.download('punkt') nltk.download('wordnet') nltk.download('stopwords') nltk.download('averaged_perceptron_tagger')''' f = open(installfilename, "w") f.write(str(installfile)) f.close() try: command = 'docker pull python:3.8-slim-buster' os.system(command); #subprocess.check_call(["chmod", "+x", "start_modelservice.sh"], cwd=model_dockerSetup) subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+version,"."], cwd=model_dockerSetup) subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+version+".tar",modelname.lower()+":"+version], cwd=model_dockerSetup) dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+"_"+version+".tar") shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+"_"+version+".tar")) shutil.rmtree(model_dockerSetup) return 'Success','SUCCESSFULLY' except Exception as e: print("Error: "+str(e)) shutil.rmtree(model_dockerSetup) return 'Error',str(e) #createDockerImage(deploymentfolder,modelname,version)
requirements.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from importlib.metadata import version import sys import os def requirementfile(deploy_path,model,textFeatures,learner_type): print('hola', model) modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] requires = '' for mod in modules: requires += f"{mod}=={version(mod)}\n" if len(textFeatures) > 0: tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] for mod in tmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Extreme Gradient Boosting (XGBoost)': mmodules = ['xgboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Light Gradient Boosting (LightGBM)': mmodules = ['lightgbm'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Categorical Boosting (CatBoost)': mmodules = ['catboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'arima': mmodules = ['pmdarima'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'fbprophet': mmodules = ['prophet'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': mmodules = ['tensorflow'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 mmodules = ['lifelines'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'sentencetransformer': #bug 12833 mmodules = ['sentence_transformers'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" filename = os.path.join(deploy_path,'requirements.txt') f = open(filename, "wb") f.write(str(requires).encode('utf8')) f.close()
eion_compress.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import sys import subprocess import glob import shutil import time from aion_deployment.EncryptPythonSourceCode import encrypt_files import json def encrypt(alldirs): for dir in alldirs: try: encrypt_files(dir) except Exception as error_obj: print("Exception in encrypting", error_obj) print("-"*50) def replace_by_compressed(alldirs): for dir in alldirs: try: #print("Processing dir", dir) files = [f for f in glob.glob(dir + "/*.py")] secure_path = os.path.join(dir, 'SecuredScripts') time.sleep(6) for file in files: try: filename_w_ext = os.path.basename(file) filename, file_extension = os.path.splitext(filename_w_ext) if filename == "__init__": continue #print("Processing file", file) file_folder_path = os.path.join(secure_path, filename, 'dist') compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py') shutil.copy(compressed_file_path, dir) os.remove(file) new_compressed_file_path = os.path.join(dir, filename+'_compressed.py') target_file_path = os.path.join(dir, filename_w_ext) os.rename(new_compressed_file_path, target_file_path) if filename == 'aion_prediction': shutil.copytree(os.path.join(file_folder_path, 'pytransform'), os.path.join(dir, 'pytransform')) except Exception as error_obj: print("Exception in file ", error_obj) shutil.rmtree(secure_path) except Exception as error_obj: print("Exception in dir ", error_obj) def start_Obfuscate(path): project_path = path subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] alldirs = [ project_path, ] for subdir in subdirs: if(subdir != 'pytransform'): alldirs.append(os.path.join(project_path, subdir)) encrypt(alldirs) replace_by_compressed(alldirs) if __name__=="__main__": project_path = sys.argv[1] print("project_path", project_path) subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] alldirs = [ project_path, ] for subdir in subdirs: alldirs.append(os.path.join(project_path, subdir)) encrypt(alldirs) print("*"*50) replace_by_compressed(alldirs) # python eion_compress.py "C:\Users\ashwani.s\Desktop\22April\22April\Mohita" "C:\Users\ashwani.s\Desktop\eion\eion" > logfile.log
production.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package.aion_prediction import aionPrediction from AION.prediction_package.utility import TAB_CHAR from AION.prediction_package import utility from AION.prediction_package import common from AION.prediction_package.base import deployer def is_supported(problem_type, algo=None): """ Return True if problem_type supported otherwise False """ supported = ['classification','regression','clustering','timeseriesforecasting','Text Similarity'] return problem_type in supported def get_deployer(problem_type, algo=None, params={}): """ Return deployer class object based on problem type Raise error if no class is associated with problem type """ params['problem_type'] = problem_type if problem_type == 'classification': return classification( params) elif problem_type == 'regression': return regression( params) elif problem_type == 'clustering': return clustering( params) elif problem_type == 'timeseriesforecasting': from AION.prediction_package.time_series import forecasting return forecasting.get_deployer( params) elif problem_type == 'Text Similarity': return textSimilarity( params) else: raise ValueError('deployment is not supported') class classification( deployer): def __init__(self, params={}): super().__init__( params) self.feature_reducer = False if not self.name: self.name = 'classification' def create_idrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) else: obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) def create_odrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_classification_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat']) else: obj.create_classification_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}')""" run_code = f""" def run(self, df):\ """ if self.params['training']['algo'] in ['Neural Network']: self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code += f""" self.model = load_model(model_file) """ run_code += """ df = df.astype(np.float32) return pd.DataFrame(np.argmax(self.model.predict(df),axis=1)) """ elif self.params['training']['algo'] in ['Neural Architecture Search']: self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') self.importer.addModule(module='autokeras',mod_as='ak') init_code += f""" self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) """ run_code += """ df = df.astype(np.float32) return pd.DataFrame(self.model.predict(df)) """ elif self.params['training']['algo'] in ['Deep Q Network','Dueling Deep Q Network']: self.importer.addModule('joblib') self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='constant',mod_from='tensorflow') self.importer.addModule(module='time_step',mod_from='tf_agents.trajectories') init_code += f""" self.model = joblib.load(model_file) """ run_code += """ df = df.astype(np.float32) q, _ = self.model(np.array(df), step_type=constant([time_step.StepType.FIRST] * np.array(df).shape[0]), training=False) return pd.DataFrame(q.numpy()) """ elif self.params['training']['algo'] in ['Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code += f""" self.model = load_model(model_file) """ run_code += """ df = np.expand_dims(df, axis=2) df = df.astype(np.float32) return pd.DataFrame(np.argmax(self.model.predict(df),axis=1)) """ else: self.importer.addModule(module='joblib') self.importer.addModule(module='numpy',mod_as='np') init_code += f""" self.model = joblib.load(model_file) """ run_code += """ df = df.astype(np.float32) return pd.DataFrame(self.model.predict_proba(df), columns=self.model.classes_) """ return init_code, run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('joblib') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__(self): pass def run(self, raw_df, output): output = round(output,2) encoder_file = (Path(__file__).parent/"model")/"label_encoder.pkl" if encoder_file.exists(): encoder = joblib.load(encoder_file) output.rename(columns=dict(zip(output.columns, encoder.inverse_transform(list(output.columns)))), inplace=True) raw_df['prediction'] = output.idxmax(axis=1) raw_df['probability'] = output.max(axis=1).round(2) raw_df['remarks'] = output.apply(lambda x: x.to_json(double_precision=2), axis=1) outputjson = raw_df.to_json(orient='records',double_precision=5) outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) """ class regression( deployer): def __init__(self, params={}): super().__init__( params) self.feature_reducer = False if not self.name: self.name = 'regression' def create_idrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) else: obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) def create_odrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_regression_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat']) else: obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') code = f""" class trainer(): """ init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') """ run_code = f""" def run(self, df):\ """ if self.params['training']['algo'] in ['Neural Architecture Search']: self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') self.importer.addModule(module='autokeras',mod_as='ak') init_code += f""" self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) """ run_code += """ df = df.astype(np.float32) return self.model.predict(df).reshape(1, -1) """ elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code += f""" self.model = load_model(model_file) """ run_code += """ df = np.expand_dims(df, axis=2) df = df.astype(np.float32) return self.model.predict(df).reshape(1, -1) """ else: self.importer.addModule('joblib') init_code += f""" self.model = joblib.load(model_file) """ run_code += """ df = df.astype(np.float32) return self.model.predict(df).reshape(1, -1) """ return code + init_code + run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__(self): pass def run(self, raw_df, output): raw_df['prediction'] = output[0] raw_df['prediction'] = raw_df['prediction'].round(2) outputjson = raw_df.to_json(orient='records',double_precision=5) outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) """ class clustering( deployer): def __init__(self, params={}): super().__init__( params) self.feature_reducer = False if not self.name: self.name = 'clustering' def training_code( self): self.importer.addModule('joblib') self.importer.addModule(module='pandas',mod_as='pd') code = f""" class trainer(): """ init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') """ run_code = f""" def run(self, df):\ """ if self.params['training']['algo'] == 'DBSCAN': init_code += f""" self.model = joblib.load(model_file) """ run_code += """ return self.model.fit_predict(df) """ else: init_code += f""" self.model = joblib.load(model_file) """ run_code += """ return self.model.predict(df).reshape(1, -1) """ return code + init_code + run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__(self): pass def run(self, raw_df, output): raw_df['prediction'] = output[0] raw_df['prediction'] = raw_df['prediction'].round(2) outputjson = raw_df.to_json(orient='records',double_precision=2) outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) """ return code if __name__ == '__main__': config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}} deployer = get_deployer('classification',params=config) deployer.run( )
output_formatter.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import json import shutil import logging class outputFormatter: def __init__(self): self.log = logging.getLogger('eion') self.log.info('========> Inside Output Formatter') def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature): self.output_formatfile = 'import json' self.output_formatfile += '\n' self.output_formatfile += 'import numpy as np' self.output_formatfile += '\n' self.output_formatfile += 'import pandas as pd' self.output_formatfile += '\n' self.output_formatfile += 'import os' self.output_formatfile += '\n' self.output_formatfile += 'from pathlib import Path' self.output_formatfile += '\n' if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"): self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity' self.output_formatfile += '\n' self.output_formatfile += 'class output_format(object):' self.output_formatfile += '\n' if(model == 'VAR'): self.output_formatfile += ' def invertTransformation(self,predictions):' self.output_formatfile += '\n' self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","data","trainingdata.csv"))' self.output_formatfile += '\n' self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount) self.output_formatfile += '\n' self.output_formatfile += ' targetFeature = "'+str(targetFeature)+'"' self.output_formatfile += '\n' self.output_formatfile += ' columns = targetFeature.split(",")' self.output_formatfile += '\n' self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)' self.output_formatfile += '\n' self.output_formatfile += ' for j in range(0,len(columns)):' self.output_formatfile += '\n' self.output_formatfile += ' for i in range(0, len(predictions)):' self.output_formatfile += '\n' self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)' self.output_formatfile += '\n' self.output_formatfile += ' prediction = pred' self.output_formatfile += '\n' self.output_formatfile += ' for col in columns:' self.output_formatfile += '\n' self.output_formatfile += ' if col in dictDiffCount:' self.output_formatfile += '\n' self.output_formatfile += ' if dictDiffCount[col]==2:' self.output_formatfile += '\n' self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()' self.output_formatfile += '\n' self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()' self.output_formatfile += '\n' self.output_formatfile += ' prediction = pred' self.output_formatfile += '\n' self.output_formatfile += ' return(prediction)' self.output_formatfile += '\n' self.log.info("op:modelType: \n"+str(modelType)) if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"): # if modelType == 'anomaly_detection': self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):' self.output_formatfile += '\n' self.output_formatfile += ' try:' self.output_formatfile += '\n' self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) ' self.output_formatfile += '\n' self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) ' self.output_formatfile += '\n' self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() ' self.output_formatfile += '\n' self.output_formatfile += ' except Exception as e:' self.output_formatfile += '\n' self.output_formatfile += ' print(f"find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} ")\n' self.output_formatfile += ' return anomaly_info_df' self.output_formatfile += '\n' if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"): if (datetimeFeature!='' and datetimeFeature!='NA'): self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):' self.output_formatfile += '\n' else: self.output_formatfile += ' def apply_output_format(self,df,modeloutput):' self.output_formatfile += '\n' else: self.output_formatfile += ' def apply_output_format(self,df,modeloutput):' self.output_formatfile += '\n' if modelType.lower() == 'classification': self.output_formatfile += ' modeloutput = round(modeloutput,2)' self.output_formatfile += '\n' if(learner_type == 'ImageClassification'): if(str(output_label) != '{}'): inv_mapping_dict = {v: k for k, v in output_label.items()} self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict) self.output_formatfile += '\n' self.output_formatfile += ' predictions = []' self.output_formatfile += '\n' self.output_formatfile += ' for x in modeloutput:' self.output_formatfile += '\n' self.output_formatfile += ' x = le_dict[x]' self.output_formatfile += '\n' self.output_formatfile += ' predictions.append(x)' self.output_formatfile += '\n' else: self.output_formatfile += ' predictions=modeloutput' self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = predictions' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\')' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' elif(learner_type == 'Text Similarity'): self.output_formatfile += ' df[\'prediction\'] = np.where(modeloutput > '+str(threshold)+',1,0)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = modeloutput' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' elif(learner_type == 'TS'): if(model == 'VAR'): self.output_formatfile += ' modeloutput = self.invertTransformation(modeloutput)' self.output_formatfile += '\n' self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}' elif(model.lower() == 'fbprophet'): self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\')' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}' elif((model.lower() == 'lstm' or model.lower() == 'mlp') and len(features) >= 1): self.output_formatfile += ' modeloutput = modeloutput.round(2)\n' self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\')\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}\n' else: self.output_formatfile += ' modeloutput = modeloutput.round(2)' self.output_formatfile += '\n' self.output_formatfile += ' modeloutput = json.dumps(modeloutput.tolist())' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":eval(modeloutput)}' self.output_formatfile += '\n' elif(learner_type in ['RecommenderSystem','similarityIdentification','contextualSearch']): self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = modeloutput' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' else: if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'): if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'): if(str(output_label) != '{}'): inv_mapping_dict = {v: k for k, v in output_label.items()} self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict) self.output_formatfile += '\n' ''' if(model in ['SGDClassifier']): self.output_formatfile += ' modeloutput = modeloutput.replace({"predict_class": le_dict})' else: self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)' ''' if modelType != 'anomaly_detection': self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)' self.output_formatfile += '\n' if(threshold != -1): ''' if(model in ['SGDClassifier']): self.output_formatfile += ' df[\'prediction\'] = np.where(modeloutput[\'probability\'] > '+str(threshold)+',1,0)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = modeloutput[\'probability\']' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = ""' self.output_formatfile += '\n' else: self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]' self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = np.where(df[\'prediction\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' ''' self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]' self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = np.where(df[\'prediction\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' else: ''' if(model in ['SGDClassifier']): self.output_formatfile += ' df[\'prediction\'] = modeloutput[\'predict_class\']' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = ""' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = "NA"' self.output_formatfile += '\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput.idxmax(axis=1)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = modeloutput.max(axis=1)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' ''' if modelType == 'anomaly_detection': # if (model.lower()=='autoencoder'): if model.lower() in ['autoencoder']: if (datetimeFeature != '' and datetimeFeature.lower() != 'na'): self.output_formatfile += ' df[modeloutput.columns] = modeloutput\n' self.output_formatfile += ' anomaly_df=df[df[\'anomaly\'] == True]\n' self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\n' self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\'data\')\n' self.output_formatfile += ' anomaly_prediction_df.to_csv(f"{new_dir}/anomaly_data.csv")\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\n' self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\n' self.output_formatfile += ' anomaly_prediction_df.drop("Time_diff",axis=1,inplace=True)\n' self.output_formatfile += ' except:\n' self.output_formatfile += ' pass\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\'left\')\n' self.output_formatfile += ' df_out[\'anomaly\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n' self.output_formatfile += ' df_out[\'anomalyType\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n' self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str) \n' self.output_formatfile += ' df_out.drop("time_diff",axis=1,inplace=True)\n' self.output_formatfile += ' except Exception as e:\n' self.output_formatfile += ' print("anomaly data updated issue",e)\n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n' self.output_formatfile += ' df=df_out \n' else: self.output_formatfile += ' df[modeloutput.columns] = modeloutput\n' elif (model.lower()=='dbscan'): if (datetimeFeature != '' and datetimeFeature.lower() != 'na'): self.output_formatfile += ' df[\'anomaly\'] = modeloutput[\'cluster\']== -1\n' self.output_formatfile += ' anomaly_df=df[df[\'anomaly\'] == True]\n' self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\n' self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\'data\')\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\n' self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\n' self.output_formatfile += ' except:\n' self.output_formatfile += ' pass\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\'left\')\n' self.output_formatfile += ' df_out[\'anomaly\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n' self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n' self.output_formatfile += ' except Exception as e:\n' self.output_formatfile += ' print("anomaly data updated.")\n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n' self.output_formatfile += ' df=df_out \n' else: self.output_formatfile += ' df[\'anomaly\'] = modeloutput[\'cluster\']== -1\n' self.output_formatfile += ' df.sort_values(by=[\'anomaly\'], ascending=False, inplace=True)\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput' self.output_formatfile += '\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput.idxmax(axis=1)' self.output_formatfile += '\n' if learner_type != 'DL': self.output_formatfile += ' df[\'probability\'] = modeloutput.max(axis=1).round(2)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' else: if model == 'COX': self.output_formatfile += '\n' self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)' self.output_formatfile += '\n' #self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\'records\',double_precision=2)' #self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = modeloutput' self.output_formatfile += '\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput[0]' if(learner_type == 'objectDetection'): self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = df[\'prediction\']' else: self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = df[\'prediction\'].round(2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' self.output_formatfile += ' return(json.dumps(outputjson))' filename = os.path.join(deploy_path,'script','output_format.py') #print(deploy_path) f = open(filename, "wb") self.log.info('-------> Output Mapping File Location :'+filename) f.write(str(self.output_formatfile).encode('utf8')) f.close()
inputdrift.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import scipy import warnings import scipy.stats as st import logging import json class inputdrift(): def __init__(self,conf): self.log = logging.getLogger('eion') def get_input_drift(self,ndf,hdf,outputfolder): selectedColumns = self.features.split(',') dataalertcount=0 distributionChangeColumns="" distributionChangeMessage=[] for i in range(0,len(selectedColumns)): data1=hdf[selectedColumns[i]] data2=ndf[selectedColumns[i]] if(data1.dtype !="str" and data2.dtype !="str" ): cumulativeData=data1.append(data2) teststaticValue=teststatic(self,data1,data2) if (teststaticValue < 0.05): distributionName1,sse1=DistributionFinder(self,data1) distributionName2,sse2=DistributionFinder(self,data2) if(distributionName1 == distributionName2): dataalertcount = dataalertcount else: dataalertcount = dataalertcount+1 distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+"," changedColumn = {} changedColumn['Feature'] = selectedColumns[i] changedColumn['KS_Training'] = teststaticValue changedColumn['Training_Distribution'] = distributionName1 changedColumn['New_Distribution'] = distributionName2 distributionChangeMessage.append(changedColumn) else : dataalertcount = dataalertcount else : response ="Selected Columns should be Numerical Values" if(dataalertcount == 0): resultStatus="Model is working as expected" else : resultStatus=json.dumps(distributionChangeMessage) return(dataalertcount,resultStatus) def DistributionFinder(self,data): try: distributionName ="" sse =0.0 KStestStatic=0.0 dataType="" if(data.dtype == "float64"): dataType ="Continuous" elif(data.dtype =="int"): dataType="Discrete" elif(data.dtype =="int64"): dataType="Discrete" if(dataType == "Discrete"): distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson] index, counts = np.unique(data.astype(int),return_counts=True) if(len(index)>=2): best_sse = np.inf y1=[] total=sum(counts) mean=float(sum(index*counts))/total variance=float((sum(index**2*counts) -total*mean**2))/(total-1) dispersion=mean/float(variance) theta=1/float(dispersion) r=mean*(float(theta)/1-theta) for j in counts: y1.append(float(j)/total) pmf1=st.bernoulli.pmf(index,mean) pmf2=st.binom.pmf(index,len(index),p=mean/len(index)) pmf3=st.geom.pmf(index,1/float(1+mean)) pmf4=st.nbinom.pmf(index,mean,r) pmf5=st.poisson.pmf(index,mean) sse1 = np.sum(np.power(y1 - pmf1, 2.0)) sse2 = np.sum(np.power(y1 - pmf2, 2.0)) sse3 = np.sum(np.power(y1 - pmf3, 2.0)) sse4 = np.sum(np.power(y1 - pmf4, 2.0)) sse5 = np.sum(np.power(y1- pmf5, 2.0)) sselist=[sse1,sse2,sse3,sse4,sse5] for i in range(0,len(sselist)): if best_sse > sselist[i] > 0: best_distribution = distributions[i].name best_sse = sselist[i] elif (len(index) == 1): best_distribution = "Constant Data-No Distribution" best_sse = 0.0 distributionName =best_distribution sse=best_sse elif(dataType == "Continuous"): distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] best_distribution = st.norm.name best_sse = np.inf datamin=data.min() datamax=data.max() nrange=datamax-datamin y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: with warnings.catch_warnings(): warnings.filterwarnings('ignore') params = distribution.fit(data.astype(float)) # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Calculate fitted PDF and error with fit in distribution pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if(best_sse >sse > 0): best_distribution = distribution.name best_sse = sse distributionName =best_distribution sse=best_sse except: response = str(sys.exc_info()[0]) message='Job has Failed'+response print(message) return distributionName,sse ##KStestStatic -pvalue finding def teststatic(self,data1,data2): try: teststatic =st.ks_2samp(data1,data2) pValue=0.0 scipyVersion =scipy.__version__ if(scipyVersion <= "0.14.1"): pValue =teststatic[1] else: pValue =teststatic.pvalue except: response = str(sys.exc_info()[0]) print("Input Drift Job Failed "+response) return pValue
prediction_transformation.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys import platform import json import shutil import logging from pathlib import Path def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None): self.selectorfile += 'import pandas as pd' self.selectorfile += '\n' self.selectorfile += 'import joblib' self.selectorfile += '\n' self.selectorfile += 'import os' self.selectorfile += '\n' self.selectorfile += 'import numpy as np' self.selectorfile += '\n' self.selectorfile += 'class selector(object):' self.selectorfile += '\n' self.selectorfile += ' def apply_selector(self,df):' self.selectorfile += '\n' if pcaModel_pickle_file != '': self.selectorfile += " pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+pcaModel_pickle_file+"'))" self.selectorfile += '\n' self.selectorfile += ' bpca_features = '+str(bpca_features) self.selectorfile += '\n' self.selectorfile += ' apca_features = '+str(apca_features) self.selectorfile += '\n' self.selectorfile += ' df = pcaModel.transform(df[bpca_features])' self.selectorfile += '\n' self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)' self.selectorfile += '\n' if(len(features) != 0) and model_type != 'BM25': if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder': self.selectorfile += ' df = df['+str(features)+']' self.selectorfile += '\n' self.selectorfile += ' return(df)' filename = os.path.join(deploy_path,'script','selector.py') f = open(filename, "wb") self.log.info('-------> Feature Selector File Location :'+filename) f.write(str(self.selectorfile).encode('utf8')) f.close() featurefile = 'import json' featurefile +='\n' featurefile += 'def getfeatures():' featurefile +='\n' featurefile +=' try:' featurefile +='\n' featurelist = [] if 'profiler' in config: if 'input_features_type' in config['profiler']: inputfeatures = config['profiler']['input_features_type'] for x in inputfeatures: featurelt={} featurelt['feature'] = x print(x,inputfeatures[x]) if x == targetFeature: featurelt['Type'] = 'Target' else: if inputfeatures[x] in ['int','int64','float','float64']: featurelt['Type'] = 'Numeric' elif inputfeatures[x] == 'object': featurelt['Type'] = 'Text' elif inputfeatures[x] == 'category': featurelt['Type'] = 'Category' else: featurelt['Type'] = 'Unknown' featurelist.append(featurelt) featurefile +=' features = '+str(featurelist) featurefile +='\n' featurefile +=' outputjson = {"status":"SUCCESS","features":features}' featurefile +='\n' featurefile +=' output = json.dumps(outputjson)' featurefile +='\n' featurefile +=' print("Features:",output)' featurefile +='\n' featurefile +=' return(output)' featurefile +='\n' featurefile +=' except Exception as e:' featurefile +='\n' featurefile +=' output = {"status":"FAIL","message":str(e).strip(\'"\')}' featurefile +='\n' featurefile +=' print("Features:",json.dumps(output))' featurefile +='\n' featurefile +=' return (json.dumps(output))' featurefile +='\n' featurefile +='if __name__ == "__main__":' featurefile +='\n' featurefile +=' output = getfeatures()' filename = os.path.join(deploy_path,'featureslist.py') f = open(filename, "wb") f.write(str(featurefile).encode('utf8')) f.close() def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig): self.modelfile += ' def __init__(self):' self.modelfile += '\n' if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()=="autoencoder"): modelfile=modelfile.replace('.sav','') self.modelfile+=" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif(learner_type == 'TextDL' or learner_type == 'DL'): if modelName.lower() == 'googlemodelsearch': self.modelfile += ' import autokeras as ak' self.modelfile += '\n' self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))" self.modelfile += '\n' else: if scoreParam == 'recall': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'recall': recall_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[recall_m])' self.modelfile += '\n' elif scoreParam == 'precision': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'precision': precision_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[precision_m])' self.modelfile += '\n' elif scoreParam == 'roc_auc': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[tf.keras.metrics.AUC()])' self.modelfile += '\n' elif scoreParam == 'f1_score': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'f1_score': f1_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[f1_m])' self.modelfile += '\n' elif scoreParam == 'r2': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'r2': r_square},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[r_square])' self.modelfile += '\n' elif scoreParam == 'rmse': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'rmse': rmse_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[rmse_m])' self.modelfile += '\n' elif scoreParam == 'mse': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif scoreParam == 'mae': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif scoreParam == 'accuracy': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' else: self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif(learner_type == 'Text Similarity'): self.modelfile += " self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+preprocessing_pipe+"'))" self.modelfile += '\n' self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})" self.modelfile += '\n' elif(learner_type in ['similarityIdentification','contextualSearch']): if scoreParam == 'VectorDB Cosine': vectorfiledbname = 'trainingdataVecDB' self.modelfile += f"\ \n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\ \n client = chromadb.PersistentClient(path=persist_directory)\ \n self.collection_name = '{vectorfiledbname}'\ \n self.collection = client.get_collection(self.collection_name)\n" else: self.modelfile += " self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\n\n" elif(learner_type == 'ImageClassification'): self.modelfile += ' self.config='+str(imageconfig) self.modelfile += '\n' if(modelName.lower() == 'densenet'): self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\'img_width\'],self.config[\'img_height\'],self.config[\'img_channel\'])))' else: self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\'img_width\'],self.config[\'img_height\'],self.config[\'img_channel\'])))' self.modelfile += '\n' self.modelfile += ' headModel = baseModel.output' self.modelfile += '\n' self.modelfile += ' headModel = Flatten(name="flatten")(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = Dense(1024, activation=\'relu\')(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = Dropout(0.5)(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = Dense(2, activation=\'sigmoid\')(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)' self.modelfile += '\n' self.modelfile += ' opt = Adam(lr=self.config[\'lr\'])' self.modelfile += '\n' self.modelfile += ' self.model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])' self.modelfile += '\n' self.modelfile += " self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif(learner_type == 'objectDetection'): self.modelfile += " self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\n" self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+"/export/pipeline.config"\n' self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+"/export/checkpoint/"\n' self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+"/export/label_map.pbtxt"\n' self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\n' self.modelfile += ' self.detection_model = model_builder.build(model_config=configs["model"], is_training=False)\n' self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\n' self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, "ckpt-0")).expect_partial()\n' self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\ use_display_name=True)\n' elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'): self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif modelName.lower() == 'neural architecture search': self.modelfile += ' import autokeras as ak' self.modelfile += '\n' self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects=ak.CUSTOM_OBJECTS)" self.modelfile += '\n' else: self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None): self.modelfile += ' def predict(self,X,features_names):' self.modelfile += '\n' if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()=="autoencoder"): self.modelfile += f" X=X[{feature}]\n" self.modelfile += f" X = np.asarray(X).astype('float32')\n" self.modelfile += f" reconstructed = self.model.predict(X)\n" self.modelfile += f" predict_loss = tf.keras.losses.mae(reconstructed,X)\n" self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\n' self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\n' self.modelfile += ' prediction_df = pd.DataFrame()\n' self.modelfile += ' prediction_df["loss"] = predict_loss\n' self.modelfile += ' prediction_df["max_threshold"] = max_threshold\n' self.modelfile += ' prediction_df["min_threshold"] = min_threshold\n' self.modelfile += ' prediction_df["anomaly"] = np.where((prediction_df["loss"] > prediction_df["max_threshold"]) | (prediction_df["loss"] <= prediction_df["min_threshold"]), True, False)\n' self.modelfile += ' return prediction_df\n' elif(learner_type == 'RecommenderSystem'): self.modelfile += ' predictions = []' self.modelfile += '\n' self.modelfile += ' for index,row in X.iterrows():' self.modelfile += '\n' self.modelfile += ' score = self.model.predict(int(row["uid"]),int(row["iid"]))' self.modelfile += '\n' self.modelfile += ' predictions.append(score.est)' self.modelfile += '\n' self.modelfile += ' return predictions' elif(learner_type in ['similarityIdentification','contextualSearch']): tfeatures = list(modelFeatures.split(",")) if indexFeature != '' and indexFeature != 'NA': ifeatures = indexFeature.split(",") for ifes in ifeatures: if ifes not in tfeatures: tfeatures.append(ifes) if model_type == 'BM25': self.modelfile += f"\n\ tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\n\ bm25 = BM25Okapi(tokenized_corpus)\n\ tokenized_query = [doc.split(' ') for doc in X.tokenize]\n\ logcnt = 5\n\ output = []\n\ for query in tokenized_query:\n\ doc_scores = bm25.get_scores(query)\n\ related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\n\ x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\n\ x['Score'] = doc_scores[related_docs_indices]\n\ x['Score'] = round(x['Score'],2).astype(str)+'%'\n\ output.append(x)\n\ return output\n" elif scoreParam == 'VectorDB Cosine': featuresVecDB = modelFeatures.split(",") self.modelfile += ' logcnt = 5\n' self.modelfile += f" columns = {featuresVecDB}\n" self.modelfile += f"\ \n output = []\ \n for rowindex, row in X.iterrows():\ \n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\ \n results = self.collection.query(\ \n query_embeddings=queryembedding.tolist(),\ \n n_results=logcnt\ \n )\ \n x = pd.DataFrame(columns=columns)\ \n for i in range(0, len(results['ids'][0])):\ \n documentAry = results['documents'][0][i]\ \n documentAry = documentAry.split(' ~&~ ')\ \n for j in range(0, len(documentAry)):\ \n x.at[i,columns[j]] = documentAry[j]\ \n x.at[i,'Score'] = results['distances'][0][i]\ \n output.append(x)\ \n return output" else: self.modelfile += ' columns = self.train_input.columns.tolist()\n' self.modelfile += ' logcnt = 5\n' self.modelfile += f" train_input = self.train_input[{tfeatures}]\n" for tf in tfeatures: self.modelfile += f" columns.remove('{tf}')\n" self.modelfile += f"\ \n results = cosine_similarity(self.train_input[columns],X)\ \n output = []\ \n for i in range(results.shape[1]):\ \n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\ \n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\ \n scores = []\ \n for j in range(0,logcnt):\ \n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\ \n x['Score'] = scores\ \n output.append(x)\ \n return output" elif(learner_type == 'Text Similarity'): self.modelfile += ' X["'+firstDocFeature+'"] = X["'+firstDocFeature+'"].astype(str)' self.modelfile += '\n' self.modelfile += ' X["'+secondDocFeature+'"] = X["'+secondDocFeature+'"].astype(str)' self.modelfile += '\n' self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X["'+firstDocFeature+'"].values)' self.modelfile += '\n' self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X["'+secondDocFeature+'"].values)' self.modelfile += '\n' self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\'post\')' self.modelfile += '\n' self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\'post\')' self.modelfile += '\n' self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])' self.modelfile += '\n' self.modelfile += ' return(prediction)' self.modelfile += '\n' elif(learner_type == 'ImageClassification'): self.modelfile += ' predictions = []' self.modelfile += '\n' self.modelfile += ' for index, row in X.iterrows(): ' self.modelfile += '\n' self.modelfile += ' img = cv2.imread(row[\'imagepath\'])' self.modelfile += '\n' self.modelfile += ' img = cv2.resize(img, (self.config[\'img_width\'],self.config[\'img_height\']))' self.modelfile += '\n' self.modelfile += ' img = image.img_to_array(img)' self.modelfile += '\n' self.modelfile += ' img = np.expand_dims(img, axis=0)' self.modelfile += '\n' self.modelfile += ' img = img/255' self.modelfile += '\n' self.modelfile += ' prediction = self.model.predict(img)' self.modelfile += '\n' self.modelfile += ' prediction = np.argmax(prediction,axis=1)' self.modelfile += '\n' self.modelfile += ' predictions.append(prediction[0])' self.modelfile += '\n' self.modelfile += ' return(predictions)' self.modelfile += '\n' elif(learner_type == 'objectDetection'): self.modelfile += ' @tf.function\n' self.modelfile += ' def detect_fn(image):\n' self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\n' self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\n' self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\n' self.modelfile += ' return detections\n' self.modelfile += ' def load_image_into_numpy_array(path):\n' self.modelfile += ' return np.array(Image.open(path))\n' self.modelfile += ' imageLocation = []\n' self.modelfile += ' for i, row in X.iterrows():\n' self.modelfile += ' if ("confidance" in row) and row["confidance"] <= 1.0:\n' self.modelfile += ' confidance = row["confidance"]\n' self.modelfile += ' else:\n' self.modelfile += ' confidance = 0.8\n' self.modelfile += ' imageName = str(Path(row["imagepath"]).stem)+"_output"+str(Path(row["imagepath"]).suffix)\n' self.modelfile += ' image_np = load_image_into_numpy_array(row["imagepath"])\n' self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n' self.modelfile += ' detections = detect_fn(input_tensor)\n' self.modelfile += ' num_detections = int(detections.pop("num_detections"))\n' self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\n\ for key, value in detections.items()}\n' self.modelfile += ' detections["num_detections"] = num_detections\n' self.modelfile += ' detections["detection_classes"] = detections["detection_classes"].astype(np.int64)\n' self.modelfile += ' label_id_offset = 1\n' self.modelfile += ' image_np_with_detections = image_np.copy()\n' self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\n\ image_np_with_detections,\n\ detections["detection_boxes"],\n\ detections["detection_classes"]+label_id_offset,\n\ detections["detection_scores"],\n\ self.category_index,\n\ use_normalized_coordinates=True,\n\ max_boxes_to_draw=200,\n\ min_score_thresh=confidance,\n\ agnostic_mode=False)\n' self.modelfile += ' plt.figure()\n' self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\n' self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\n' self.modelfile += ' plt.show()\n' self.modelfile += ' return imageLocation\n' else: if(learner_type == 'DL' and model != 'Neural Network'): self.modelfile += ' X = np.expand_dims(X, axis=2)' self.modelfile += '\n' if(learner_type == 'TextDL'): self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' self.modelfile += '\n' elif(learner_type == 'TextML'): self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)' self.modelfile += '\n' elif(learner_type == 'DL' and model_type == 'Classification'): self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' self.modelfile += '\n' else: if(model_type == 'Classification' or model_type == 'TLClassification'): if model == 'Neural Architecture Search': self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(self.model.predict(X))' self.modelfile += '\n' else: if optimizationmethod == 'genetic': self.modelfile += '\n' self.modelfile += ' try:' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))' self.modelfile += '\n' self.modelfile += ' except:' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(self.model.predict(X))' else: self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(q.numpy())' else: self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)' self.modelfile += '\n' elif model_type == 'Regression' and model == 'NAS': self.modelfile += \ """ X = X.astype(np.float32) return self.model.predict(X) """ elif(learner_type == 'TS'): if model.lower() == 'fbprophet': self.modelfile += ' sessonal_freq="'+str(sessonal_freq)+'"' self.modelfile += '\n' self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=int(X["noofforecasts"][0]),freq=sessonal_freq,include_history = False)' self.modelfile += '\n' if (additional_regressors): self.modelfile += '\n' self.modelfile += ' additional_regressors='+str(additional_regressors) self.modelfile += '\n' self.modelfile += ' ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]' self.modelfile += '\n' self.modelfile += ' ts_prophet_future.reset_index(drop=True)' self.modelfile += '\n' self.modelfile += ' ts_prophet_future=ts_prophet_future.dropna()' self.modelfile += '\n' self.modelfile += ' train_forecast = self.model.predict(ts_prophet_future)' self.modelfile += '\n' self.modelfile += ' prophet_forecast_tail=train_forecast[[\'ds\', \'yhat\', \'yhat_lower\',\'yhat_upper\']].tail( int(X["noofforecasts"][0]))' self.modelfile += '\n' self.modelfile += ' return(prophet_forecast_tail)' elif model.lower() == 'lstm' or model.lower() == 'mlp': self.modelfile += ' lag_order='+str(lag_order) self.modelfile += '\n' self.modelfile += ' xt = X.values' self.modelfile += '\n' scalertransformationFile = scalertransformationFile.split('\\')[-1] self.modelfile += ' loaded_scaler_model = joblib.load(os.path.join(os.path.dirname(__file__),\'..\',\'model\',\''+scalertransformationFile+'\'))' self.modelfile += '\n' self.modelfile += ' xt = xt.astype(\'float32\')' self.modelfile += '\n' self.modelfile += ' xt = loaded_scaler_model.transform(xt)' self.modelfile += '\n' self.modelfile += ' noOfPredictions = 10' self.modelfile += '\n' self.modelfile += ' pred_data = xt' self.modelfile += '\n' self.modelfile += ' y_future = []' self.modelfile += '\n' self.modelfile += ' for i in range(noOfPredictions):' self.modelfile += '\n' if len(feature) == 1: self.modelfile += ' pred_data = pred_data[-lag_order:]' self.modelfile += '\n' if model.lower() == 'mlp': self.modelfile += ' pred_data = pred_data.reshape((1,lag_order))' else: self.modelfile += ' pred_data = pred_data.reshape((1,lag_order,1))' self.modelfile += '\n' self.modelfile += ' pred = self.model.predict(pred_data)' self.modelfile += '\n' self.modelfile += ' predoutput = loaded_scaler_model.inverse_transform(pred) ' self.modelfile += '\n' self.modelfile += ' y_future.append(predoutput.flatten()[-1])' self.modelfile += '\n' self.modelfile += ' pred_data = np.append(pred_data,pred)' self.modelfile += '\n' self.modelfile += ' pred = pd.DataFrame(index=range(0,len(y_future)),columns='+str(feature)+')' self.modelfile += '\n' self.modelfile += ' for i in range(0, len(y_future)):' self.modelfile += '\n' self.modelfile += ' pred.iloc[i] = y_future[i]' self.modelfile += '\n' self.modelfile += ' return pred' else: self.modelfile += ' pdata = pred_data[-lag_order:]' self.modelfile += '\n' self.modelfile += ' pdata = pdata.reshape((1,lag_order,'+str(len(feature))+'))' self.modelfile += '\n' self.modelfile += ' pred = self.model.predict(pdata)' self.modelfile += '\n' self.modelfile += ' predoutput = loaded_scaler_model.inverse_transform(pred) ' self.modelfile += '\n' self.modelfile += ' y_future.append(predoutput)' self.modelfile += '\n' self.modelfile += ' pred_data = np.append(pred_data,pred,axis=0)' self.modelfile += '\n' self.modelfile += ' pred = pd.DataFrame(index=range(0,len(y_future)),columns='+str(feature)+')' self.modelfile += '\n' self.modelfile += ' for i in range(0, len(y_future)):' self.modelfile += '\n' self.modelfile += ' pred.iloc[i] = y_future[i]' self.modelfile += '\n' self.modelfile += ' return pred' else: self.modelfile += ' return self.model.predict(n_periods=int(X["noofforecasts"][0]))' else: if model == 'KaplanMeierFitter': self.modelfile += '\n' self.modelfile += ' res = self.model.predict(X[\''+feature[0]+'\'].astype(int))' self.modelfile += '\n' self.modelfile += ' if isinstance(res, pd.DataFrame):\n' self.modelfile += ' return res.values.reshape(1,-1)\n' self.modelfile += ' else:\n' self.modelfile += ' return np.array([res])\n' elif model == 'COX': self.modelfile += ' res = []\n' self.modelfile += ' for idx,row in X.iterrows():\n' self.modelfile += ' res.append(self.model.predict_survival_function(X, times=row[self.model.duration_col])[idx].values[0])\n' self.modelfile += ' return pd.DataFrame(res)' #self.modelfile += ' return self.model.predict_survival_function(X, times=X[self.model.duration_col])' self.modelfile += '\n' elif(learner_type == 'DL' and model_type in ['Classification','Regression']): self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' self.modelfile += ' return self.model.predict(X).reshape(1, -1)' self.modelfile += '\n' elif (model_type == 'Clustering' and model == 'DBSCAN'): self.modelfile += ' return self.model.fit_predict(X)' elif(model_type.lower() == 'anomaly_detection' and model.lower() == 'dbscan'): self.modelfile += " pred=self.model.fit_predict(X)\n" self.modelfile += " X.loc[:,'cluster'] = self.model.labels_ \n" self.modelfile += ' return X\n' elif model_type.lower() == 'anomaly_detection': self.modelfile += ' X = X.astype(np.float32)\n' self.modelfile += ' return self.model.predict(X)' else: if model_type != 'Clustering': self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' #self.modelfile += ' return self.model.predict(X).reshape(1, -1)' self.modelfile += \ """ if isinstance(self.model, LatentDirichletAllocation): output = np.matrix(self.model.transform(X)).argmax(axis=1) return output.flatten().tolist() return self.model.predict(X).reshape(1, -1) """
base.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package.aion_prediction import aionPrediction from AION.prediction_package.utility import TAB_CHAR from AION.prediction_package import utility from AION.prediction_package import common def file_header( usecase=''): return '' class deployer(): """ base deployer class which can be used to generate the deployemnt code. This class will be inherited by deployer specific to problem type. """ def __init__(self, params={}): if not params['paths']['deploy']: raise ValueError('Deploy path is not provided') self.deploy_path = Path(params['paths']['deploy']) if not self.deploy_path.exists(): self.deploy_path.mkdir(parents=True, exist_ok=True) self.name = params.get('problem_type', '') self.params = params self.importer = importModule() self.feature_reducer = False def profiler_code(self): return common.profiler_code(self.params['profiler']) def feature_engg_code(self): if self.params['selector'].get('reducer',False): code, modules = common.feature_reducer_code(self.params['selector']) else: code, modules = common.feature_selector_code(self.params['selector']) utility.import_modules(self.importer, modules) return code def training_code(self): return common.training_code(self.params['training']) def formatter_code(self): return '' def run(self): """ run function will be called to start the deployment process. This function will create following files inputprofiler.py for preprocessing the input aion_predict.py for prediction model service file """ code = self.predict_code( ) with open(self.deploy_path/'aion_predict.py', 'w') as f: f.write(code) profiler_code = self.profiler_code() with open(self.deploy_path/'script'/'inputprofiler.py', 'w') as f: f.write(profiler_code) self.create_model_service( ) self.create_publish_service() self.create_idrift() self.create_odrift() common.create_feature_list(self.params, self.params['features']['target_feat'], self.deploy_path) common.requirement_file(self.deploy_path,self.params['training']['algo'],self.params['features']['text_feat']) common.create_readme_file(self.deploy_path, self.params['training']['model_file'], self.params['features']['input_feat']) self.create_utils_folder() def predict_code(self): imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] utility.import_modules(self.importer, imported_modules) self.importer.addLocalModule(module='inputprofiler',mod_from='script.inputprofiler') code_text = "" code_text += self.feature_engg_code() code_text += self.training_code() code_text += self.formatter_code() code_text += common.main_code() code = file_header() code += self.importer.getCode() return code + code_text def create_model_service(self): service_name = '{}{}{}'.format(self.params['usecase_name'], '_' if self.params['usecase_ver'] != '' else '', self.params['usecase_ver']) obj = aionPrediction() obj.create_model_service(self.deploy_path, service_name, self.name) def create_publish_service(self): obj = aionPrediction() obj.create_publish_service(self.params['paths']['usecase'], self.params['usecase_name'],self.params['usecase_ver'], self.name) def create_idrift(self): pass def create_odrift(self): pass def create_utils_folder(self): common.create_util_folder(self.deploy_path)
forecasting.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package.aion_prediction import aionPrediction from AION.prediction_package.utility import TAB_CHAR from AION.prediction_package import utility from AION.prediction_package.base import deployer from AION.prediction_package import common import numpy as np def get_deployer( params): if params['training']['algo'] == 'ARIMA': return arima(params) elif params['training']['algo'] == 'LSTM': return lstm(params) elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO': return lstmencdec_mviuvo(params) elif params['training']['algo'] == 'MLP': return mlp(params) elif params['training']['algo'] == 'VAR': return var(params) elif params['training']['algo'] == 'FBPROPHET': return fbprophet(params) else: raise ValueError(f"Algorithm {params['training']['algo']} for time series forecasting is not supported") def _profiler_code(params, importer): """ This will create the profiler file based on the config file. separated file is created as profiler is required for input drift also. """ imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'scipy', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] utility.import_modules(importer, imported_modules) if 'code' in params['profiler'].get('preprocess',{}).keys(): code = params['profiler']['preprocess']['code'] else: code = "" code += """ class inputprofiler(): """ init_code = """ def __init__(self): """ init_code += """ # preprocessing preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if not preprocess_path.exists(): raise ValueError(f'Preprocess model file not found: {preprocess_path}') self.profiler = joblib.load(preprocess_path) """ run_code = """ def run(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ if 'code' in params['profiler'].get('preprocess',{}).keys(): run_code += """ df = preprocess( df)""" if params['profiler'].get('unpreprocessed_columns'): run_code += f""" unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}'] df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True) """ if params['profiler'].get('force_numeric_conv'): run_code += f""" df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')""" run_code += _profiler_main_code(params) if params['profiler'].get('unpreprocessed_columns'): run_code += f""" df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data """ run_code += """ return df """ utility.import_modules(importer, imported_modules) import_code = importer.getCode() return import_code + code + init_code + run_code def _profiler_main_code(params): code = f""" df = self.profiler.transform(df) columns = {params['profiler']['output_features']} if isinstance(df, scipy.sparse.spmatrix): df = pd.DataFrame(df.toarray(), columns=columns) else: df = pd.DataFrame(df, columns=columns) """ return code class arima( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code( self): imported_modules = [ {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, ] importer = importModule() utility.import_modules(importer, imported_modules) code = """ class inputprofiler(): def __init__(self): pass def run( self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) return df[['noofforecasts']] """ return importer.getCode() + code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df """ def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='joblib') return f""" class trainer(): def __init__(self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = joblib.load(model_file) def run(self,df): return self.model.predict(n_periods=int(df["noofforecasts"][0])) """ def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.round(2) df = json.dumps(df.tolist()) outputjson = {"status":"SUCCESS","data":eval(df)} return(json.dumps(outputjson)) """ class lstm( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code(self): importer = importModule() return _profiler_code( self.params, importer) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = load_model(model_file) """ run_code = f""" def run(self, df): lag_order={self.params['training']['lag_order']} xt = df.values scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}" if not scaler_file.exists(): raise ValueError(f'Scaling file not found: {{scaler_file}}') loaded_scaler_model = joblib.load(scaler_file) xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) noOfPredictions = 10 pred_data = xt y_future = [] for i in range(noOfPredictions): """ if len(self.params['selector']['output_features']) == 1: run_code += f""" pred_data = pred_data[-lag_order:] pred_data = pred_data.reshape((1,lag_order,1)) pred = self.model.predict(pred_data) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput.flatten()[-1]) pred_data = np.append(pred_data,pred) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ else: run_code += f""" pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) pred = self.model.predict(pdata) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput) pred_data = np.append(pred_data,pred,axis=0) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ return init_code, run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.round(2) df = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(df)} return(json.dumps(outputjson)) """ class lstmencdec_mviuvo( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' outputFeatrues = params['profiler']['output_features'] self.targetColIndx = outputFeatrues.index(params['features']['target_feat']) selectedColDict = params['selector']['output_features'] self.selectedCols = list() for col in selectedColDict: self.selectedCols.append(col) def profiler_code(self): importer = importModule() return _profiler_code( self.params, importer) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = load_model(model_file) """ run_code = f""" def run(self, df): targetColIndx = {self.targetColIndx} lag_order={self.params['training']['lag_order']} xt = df.values scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}" if not scaler_file.exists(): raise ValueError(f'Scaling file not found: {{scaler_file}}') loaded_scaler_model = joblib.load(scaler_file) xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) noOfPredictions = 10 pred_data = xt y_future = [] pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) pred = self.model.predict(pdata) pred_1d = pred.ravel() pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, {len(self.params['selector']['output_features'])}) pdata_2d[:,targetColIndx] = pred_1d pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d) predout = pred_2d_inv[:, targetColIndx] predout = predout.reshape(len(pred_1d),1) pred = pd.DataFrame(index=range(0,len(predout)),columns=['{self.params['features']['target_feat']}']) for i in range(0, len(predout)): pred.iloc[i] = predout[i] return pred """ return init_code, run_code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df[{self.selectedCols}] """ def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.round(2) df = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(df)} return(json.dumps(outputjson)) """ class mlp( lstm): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = load_model(model_file)""" run_code = f""" def run(self, df): lag_order={self.params['training']['lag_order']} xt = df.values scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}" if not scaler_file.exists(): raise ValueError(f'Scaling file not found: {{scaler_file}}') loaded_scaler_model = joblib.load(scaler_file) xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) noOfPredictions = 10 pred_data = xt y_future = [] for i in range(noOfPredictions): """ if len(self.params['selector']['output_features']) == 1: run_code += f""" pred_data = pred_data[-lag_order:] pred_data = pred_data.reshape((1,lag_order)) pred = self.model.predict(pred_data) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput.flatten()[-1]) pred_data = np.append(pred_data,pred) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ else: run_code += f""" pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) pred = self.model.predict(pdata) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput) pred_data = np.append(pred_data,pred,axis=0) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ return init_code, run_code class var( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code(self): importer = importModule() code = _profiler_code( self.params, importer) return code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df[{self.params['selector']['output_features']}] """ def training_code( self): self.importer.addModule(module='joblib') return f""" class trainer(): def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = joblib.load(model_file) def run(self,df): lag_order = self.model.k_ar return self.model.forecast(df.values[-lag_order:],steps={self.params['training']['no_of_prediction']}) """ def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return f""" class output_format(): def __init__( self): pass def invertTransformation(self,predictions): datasetdf = pd.read_csv((Path(__file__).parent/"data")/"trainingdata.csv") dictDiffCount = {self.params['training']['dictDiffCount']} target_features = "{self.params['features']['target_feat']}" columns = target_features.split(',') pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns) for j in range(0,len(columns)): for i in range(0, len(predictions)): pred.iloc[i][j] = round(predictions[i][j],2) prediction = pred for col in columns: if col in dictDiffCount: if dictDiffCount[col]==2: prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum() prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum() prediction = pred return(prediction) def run(self,raw_df,df): df = self.invertTransformation(df) df = df.to_json(orient='records',double_precision=2) outputjson = {{"status":"SUCCESS","data":json.loads(df)}} return(json.dumps(outputjson)) """ class fbprophet( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code( self): imported_modules = [ {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, ] importer = importModule() utility.import_modules(importer, imported_modules) code = """ class inputprofiler(): def __init__(self): pass def run( self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) return df[['noofforecasts']] """ return importer.getCode() + code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df """ def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') self.importer.addModule(module='joblib') code = f""" class trainer(): def __init__(self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = joblib.load(model_file) """ code += f""" def run(self,df): sessonal_freq = '{self.params['training']['sessonal_freq']}' ts_prophet_future = self.model.make_future_dataframe(periods=int(df["noofforecasts"][0]),freq=sessonal_freq,include_history = False) """ if (self.params['training']['additional_regressors']): code += f""" additional_regressors={self.params['training']['additional_regressors']} ts_prophet_future[additional_regressors] = dataFrame[additional_regressors] ts_prophet_future.reset_index(drop=True) ts_prophet_future=ts_prophet_future.dropna() """ code += """ train_forecast = self.model.predict(ts_prophet_future) prophet_forecast_tail=train_forecast[[\'ds\', \'yhat\', \'yhat_lower\',\'yhat_upper\']].tail( int(df["noofforecasts"][0])) return(prophet_forecast_tail)""" return code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(df)} return(json.dumps(outputjson)) """
local_pipeline.py
import docker import json import logging def read_json(file_path): data = None with open(file_path,'r') as f: data = json.load(f) return data def run_pipeline(inputconfig): inputconfig = json.loads(inputconfig) logfilepath = inputconfig['logfilepath'] logging.basicConfig(level=logging.INFO,filename =logfilepath) usecasename = inputconfig['usecase'] logging.info("UseCaseName :"+str(usecasename)) version = inputconfig['version'] logging.info("version :"+str(version)) config = inputconfig['dockerlist'] persistancevolume = inputconfig['persistancevolume'] logging.info("PersistanceVolume :"+str(persistancevolume)) datasetpath = inputconfig['datasetpath'] logging.info("DataSet Path :"+str(datasetpath)) config = read_json(config) client = docker.from_env() inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath} inputconfig = json.dumps(inputconfig) inputconfig = inputconfig.replace('"', '\\"') logging.info("===== Model Monitoring Container Start =====") outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('ModelMonitoring: '+str(outputStr)) print('ModelMonitoring: '+str(outputStr)) logging.info("===== ModelMonitoring Stop =====") logging.info("===== Data Ingestion Container Start =====") outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('DataIngestion: '+str(outputStr)) print('DataIngestion: '+str(outputStr)) logging.info("===== Data Ingestion Container Stop =====") outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status = decoded_data['Status'] if status != 'Success': output = {'Status':'Error','Msg':'Data Ingestion Fails'} logging.info("===== Transformation Container Start =====") outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('Data Transformations: '+str(outputStr)) print('Data Transformations: '+str(outputStr)) logging.info("===== Transformation Container Done =====") outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status = decoded_data['Status'] if status != 'Success': output = {'Status':'Error','Msg':'Data Transformations Fails'} logging.info("===== Feature Engineering Container Start =====") outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('FeatureEngineering: '+str(outputStr)) print('FeatureEngineering: '+str(outputStr)) logging.info("===== Feature Engineering Container Done =====") outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status = decoded_data['Status'] modeltraining = config['ModelTraining'] for mt in modeltraining: logging.info("===== Training Container Start =====") outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('ModelTraining: '+str(outputStr)) print('ModelTraining: '+str(outputStr)) logging.info("===== Training Container Done =====") outputStr = outputStr.strip() try: decoded_data = json.loads(outputStr) status = decoded_data['Status'] except Exception as inst: logging.info(inst) logging.info("===== Model Registry Start =====") outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('ModelRegistry: '+str(outputStr)) print('ModelRegistry: '+str(outputStr)) logging.info("===== ModelRegistry Done =====") logging.info("===== ModelServing Start =====") outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('Prediction: '+str(outputStr)) print('Prediction: '+str(outputStr)) logging.info("===== ModelServing Done =====")
build_container.py
import os import shutil import sys import subprocess from os.path import expanduser import platform import json def createDockerImage(model_name,model_version,module,folderpath): command = 'docker pull python:3.8-slim-buster' os.system(command); subprocess.check_call(["docker", "build", "-t",module+'_'+model_name.lower()+":"+model_version,"."], cwd=folderpath) def local_docker_build(config): print(config) config = json.loads(config) model_name = config['usecase'] model_version = config['version'] mlaac__code_path = config['mlacPath'] docker_images = {} docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring') createDockerImage(model_name,model_version,'modelmonitoring',dataset_addr) docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version dataset_addr = os.path.join(mlaac__code_path,'DataIngestion') createDockerImage(model_name,model_version,'dataingestion',dataset_addr) transformer_addr = os.path.join(mlaac__code_path,'DataTransformation') docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version createDockerImage(model_name,model_version,'datatransformation',transformer_addr) featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering') docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr) from os import listdir arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith("ModelTraining")] docker_training_images = [] for x in arr: dockertraing={} dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version docker_training_images.append(dockertraing) training_addri = os.path.join(mlaac__code_path,x) createDockerImage(model_name,model_version,str(x).lower(),training_addri) docker_images['ModelTraining'] = docker_training_images docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry') createDockerImage(model_name,model_version,'modelregistry',deploy_addr) docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version deploy_addr = os.path.join(mlaac__code_path,'ModelServing') createDockerImage(model_name,model_version,'modelserving',deploy_addr) outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json') with open(outputjsonFile, 'w') as f: json.dump(docker_images, f) f.close() output = {'Status':'Success','Msg':outputjsonFile} output = json.dumps(output) print("aion_build_container:",output)
git_upload.py
import os import sys import json from pathlib import Path import subprocess import shutil import argparse def create_and_save_yaml(git_storage_path, container_label,usecasepath): file_name_prefix = 'gh-acr-' yaml_file = f"""\ name: gh-acr-{container_label} on: push: branches: main paths: {container_label}/** workflow_dispatch: jobs: gh-acr-build-push: runs-on: ubuntu-latest steps: - name: 'checkout action' uses: actions/checkout@main - name: 'azure login' uses: azure/login@v1 with: creds: ${{{{ secrets.AZURE_CREDENTIALS }}}} - name: 'build and push image' uses: azure/docker-login@v1 with: login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}} username: ${{{{ secrets.REGISTRY_USERNAME }}}} password: ${{{{ secrets.REGISTRY_PASSWORD }}}} - run: | docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} """ arr = [filename for filename in os.listdir(usecasepath) if filename.startswith("ModelTraining")] for x in arr: yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label yaml_file+='\n' yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label yaml_file+='\n' with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f: f.write(yaml_file) def run_cmd(cmd): try: subprocess.check_output(cmd, stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: if e.stderr: if isinstance(e.stderr, bytes): err_msg = e.stderr.decode(sys.getfilesystemencoding()) else: err_msg = e.stderr elif e.output: if isinstance(e.output, bytes): err_msg = e.output.decode(sys.getfilesystemencoding()) else: err_msg = e.output else: err_msg = str(e) return False, err_msg return True, "" def validate_config(config): non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName'] missing_keys = [k for k in non_null_keys if k not in config.keys()] if missing_keys: raise ValueError(f"following fields are missing in config file: {missing_keys}") for k,v in config.items(): if k in non_null_keys and not v: raise ValueError(f"Please provide value for '{k}' in config file.") def upload(config): validate_config(config) url_type = config.get('url_type','https') if url_type == 'https': https_str = "https://" url = https_str + config['username'] + ":" + config['token'] + "@" + config['url'][len(https_str):] else: url = config['url'] model_location = Path(config['location']) git_folder_location = Path(config['gitFolderLocation']) git_folder_location.mkdir(parents=True, exist_ok=True) (git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True) if not model_location.exists(): raise ValueError('Trained model data not found') os.chdir(str(git_folder_location)) (git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True) shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True) create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location']) if (Path(git_folder_location)/'.git').exists(): first_upload = False else: first_upload = True if first_upload: cmd = ['git','init'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','config','user.name',config['username']] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','config','user.email',config['email']] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','add', '-A'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','commit','-m',f"commit {config['modelName']}"] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','branch','-M','main'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) if first_upload: cmd = ['git','remote','add','origin', url] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','push','-f','-u','origin', 'main'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) else: cmd = ['git','push'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) return json.dumps({'Status':'SUCCESS'}) if __name__ == '__main__': try: if shutil.which('git') is None: raise ValueError("git is not installed on this system") parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', help='Config file location or as a string') args = parser.parse_args() if Path(args.config).is_file() and Path(args.config).suffix == '.json': with open(args.config,'r') as f: config = json.load(f) else: config = json.loads(args.config) print(upload(config)) except Exception as e: status = {'Status':'Failure','msg':str(e)} print(json.dumps(status))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
kafka_consumer.py
from kafka import KafkaConsumer from json import loads import pandas as pd import json import os,sys import time import multiprocessing from os.path import expanduser import platform import datetime modelDetails = {} class Process(multiprocessing.Process): def __init__(self, modelSignature,jsonData,predictedData,modelpath): super(Process, self).__init__() self.config = jsonData self.modelSignature = modelSignature self.data = predictedData self.modelpath = modelpath def run(self): #data = pd.json_normalize(self.data) minotoringService = self.config['minotoringService']['url'] trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature] #filetimestamp = 'AION_'+str(int(time.time()))+'.csv' #data.to_csv(dataFile, index=False) inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":self.data} inputFieldsJson = json.dumps(inputFieldsJson) ser_url = minotoringService+self.modelSignature+'/monitoring' driftTime = datetime.datetime.now() import requests try: response = requests.post(ser_url, data=inputFieldsJson,headers={"Content-Type":"application/json",}) outputStr=response.content outputStr = outputStr.decode('utf-8') outputStr = outputStr.strip() decoded_data = json.loads(outputStr) print(decoded_data) status = decoded_data['status'] msg = decoded_data['data'] except Exception as inst: if 'Failed to establish a new connection' in str(inst): status = 'Fail' msg = 'AION Service needs to be started' else: status = 'Fail' msg = 'Error during Drift Analysis' statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv') df = pd.DataFrame(columns = ['dateTime', 'status', 'msg']) df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True) print(df) if (os.path.exists(statusFile)): df.to_csv(statusFile, mode='a', header=False,index=False) else: df.to_csv(statusFile, header=True,index=False) def launch_kafka_consumer(): from appbe.dataPath import DATA_DIR configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf') with open(configfile,'r',encoding='utf-8') as f: jsonData = json.load(f) f.close() kafkaIP=jsonData['kafkaCluster']['ip'] kafkaport = jsonData['kafkaCluster']['port'] topic = jsonData['kafkaCluster']['topic'] kafkaurl = kafkaIP+':'+kafkaport if jsonData['database']['csv'] == 'True': database = 'csv' elif jsonData['database']['mySql'] == 'True': database = 'mySql' else: database = 'csv' kafkaPath = os.path.join(DATA_DIR,'kafka') if not (os.path.exists(kafkaPath)): try: os.makedirs(kafkaPath) except OSError as e: pass consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8'))) for message in consumer: message = message.value data = message['data'] data = pd.json_normalize(data) modelname = message['usecasename'] version = message['version'] modelSignature = modelname+'_'+str(version) modelpath = os.path.join(kafkaPath,modelSignature) try: os.makedirs(modelpath) except OSError as e: pass secondsSinceEpoch = time.time() if modelSignature not in modelDetails: modelDetails[modelSignature] = {} modelDetails[modelSignature]['startTime'] = secondsSinceEpoch if database == 'csv': csvfile = os.path.join(modelpath,modelSignature+'.csv') if (os.path.exists(csvfile)): data.to_csv(csvfile, mode='a', header=False,index=False) else: data.to_csv(csvfile, header=True,index=False) modelTimeFrame = jsonData['timeFrame'][modelSignature] currentseconds = time.time() print(currentseconds - modelDetails[modelSignature]['startTime']) if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame): csv_path = os.path.join(modelpath,modelSignature+'.csv') #predictedData = pd.read_csv(csv_path) ##predictedData = predictedData.to_json(orient="records") index = Process(modelSignature,jsonData,csv_path,modelpath) index.start() modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
pattern.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import sys import math import markov_clustering as mc import os import networkx as nx import logging import json ## How far you'd like your random-walkers to go (bigger number -> more walking) EXPANSION_POWER = 2 ## How tightly clustered you'd like your final picture to be (bigger number -> more clusters) INFLATION_POWER = 2 ## If you can manage 100 iterations then do so - otherwise, check you've hit a stable end-point. ITERATION_COUNT = 100 def normalize(matrix): return matrix/np.sum(matrix, axis=0) def expand(matrix, power): return np.linalg.matrix_power(matrix, power) def inflate(matrix, power): for entry in np.nditer(matrix, op_flags=['readwrite']): entry[...] = math.pow(entry, power) return matrix class pattern: def __init__(self,modelFeatures,targetFeature): self.modelFeatures = modelFeatures.split(',') self.targetFeature = targetFeature self.log = logging.getLogger('eion') def training(self,df,outputLocation): df["code"] = df[self.targetFeature].astype("category") df['code'] = df.code.cat.codes df2 = df[[self.targetFeature,'code']] df2 = df2.drop_duplicates() code_book = df2.to_dict('records') size = len(code_book) if self.targetFeature in self.modelFeatures: self.modelFeatures.remove(self.targetFeature) df['prev_code'] = df.groupby(self.modelFeatures)['code'].shift() df['prev_activity'] = df.groupby(self.modelFeatures)[self.targetFeature].shift() print(self.modelFeatures) df = df.dropna(axis=0, subset=['prev_code']) df['prev_code'] = df['prev_code'].astype('int32') matrix = np.zeros((size, size),float) np.set_printoptions(suppress=True) for index, row in df.iterrows(): matrix[int(row['prev_code'])][int(row['code'])] += 1 np.fill_diagonal(matrix, 1) matrix = normalize(matrix) pmatrix = matrix i = 0 records = [] for row in matrix: j = 0 for val in row: for event in code_book: if event['code'] == i: page = event[self.targetFeature] if event['code'] == j: nextpage = event[self.targetFeature] record = {} record['State'] = page record['NextState'] = nextpage record['Probability'] = round(val,2) records.append(record) j = j+1 i = i+1 df_probability = pd.DataFrame(records) self.log.info('Status:- |... StateTransition Probability Matrix') for _ in range(ITERATION_COUNT): matrix = normalize(inflate(expand(matrix, EXPANSION_POWER), INFLATION_POWER)) result = mc.run_mcl(matrix) # run MCL with default parameters c = 0 clusters = mc.get_clusters(matrix) # get clusters self.log.info('Status:- |... StateTransition Algorithm applied: MarkovClustering') clusterrecords = [] for cluster in clusters: clusterid = c clusterlist = '' for pageid in cluster: for event in code_book: if event['code'] == pageid: page = event[self.targetFeature] if clusterlist != '': clusterlist = clusterlist+',' clusterlist = clusterlist+page record = {} record['clusterid'] = c record['clusterlist'] = clusterlist clusterrecords.append(record) c = c+1 df_cluster = pd.DataFrame(clusterrecords) probabilityoutputfile = os.path.join(outputLocation, 'stateTransitionProbability.csv') self.log.info('-------> State Transition Probability Matrix:' + probabilityoutputfile) df_probability.to_csv(probabilityoutputfile,index=False) clusteringoutputfile = os.path.join(outputLocation, 'stateClustering.csv') self.log.info('-------> State Transition Probability Grouping:' + clusteringoutputfile) df_cluster.to_csv(clusteringoutputfile,index=False) datadetailsfile = os.path.join(outputLocation, 'datadetails.json') dataanalytics = {} dataanalytics['activity'] = self.targetFeature dataanalytics['sessionid'] = self.modelFeatures[0] updatedConfig = json.dumps(dataanalytics) with open(datadetailsfile, "w") as fpWrite: fpWrite.write(updatedConfig) fpWrite.close() evaulatemodel = '{"Model":"MarkovClustering","Score":0}' return(evaulatemodel,probabilityoutputfile,clusteringoutputfile)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
runtime_utility.py
class aionRunTimeUtility: # def __init__(self): # print("AI.ON ConversionUtility function init...") def executeOnRuntime(self,inputModelName,inputDataSet): # print("AI.ON ConversionUtility function starts...") RuntimeType = inputModelName.rsplit('.', 1)[1] inputDataType = inputDataSet.rsplit('.', 1)[1] if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType.lower()=='json')): # print("Inference through ONNX Runtime started [ML]") import pandas import json with open(inputDataSet) as datafile: data = json.load(datafile) dataframe = pandas.DataFrame(data,index=[0]) import numpy import onnxruntime as rt sess = rt.InferenceSession(inputModelName) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name inputsize=sess.get_inputs()[0].shape first_n_column = dataframe.iloc[: , :inputsize[1]] dataset = first_n_column.values if(inputsize[1]!=len(dataframe.columns)): print("Error : Input Data size does not match") return 0 pred_onx = sess.run([label_name], {input_name: dataset.astype(numpy.float32)[0:1]})[0] # for i in range(0, 1): #print("ONNX Runtime Prediction [csv]: ",pred_onx) output = numpy.squeeze(pred_onx) predictions = numpy.squeeze(output) prediction = numpy.argmax(predictions) return(prediction) # print("Inference through ONNX modelcompleted ") if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType!='json')): import numpy as np import onnxruntime as rt from tensorflow.keras.preprocessing import image sess = rt.InferenceSession(inputModelName) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name inputsize=sess.get_inputs()[0].shape img = image.load_img(inputDataSet, target_size=(inputsize[1], inputsize[2])) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) pred_onx = sess.run([label_name], {input_name: x.astype(np.float32)[0:1]})[0] output = np.squeeze(pred_onx) predictions = np.squeeze(output) return(pred_onx) if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite')and (inputDataType=='json')): import numpy as np import tensorflow as tf import pandas from numpy import asarray interpreter = tf.lite.Interpreter(model_path=inputModelName) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]['shape'] import pandas import json with open(inputDataSet) as datafile: data = json.load(datafile) dataframe = pandas.DataFrame(data,index=[0]) dataset = dataframe.values XYZ = dataset[:,0:input_shape[1]].astype(float) input_data = asarray(XYZ[0]).reshape((1, input_shape[1])) for i in range(0, 1): input_data = asarray(XYZ[i]).reshape((1,input_shape[1])) interpreter.set_tensor(input_details[0]['index'], input_data.astype(np.float32)[0:1]) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) predictions = np.squeeze(output_data) prediction = np.argmax(predictions) return(prediction) if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite') and (inputDataType!='json')): import numpy as np from tensorflow.keras.preprocessing import image import os import tensorflow as tf import pandas from numpy import asarray interpreter = tf.lite.Interpreter(model_path=inputModelName) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]['shape'] img = image.load_img(inputDataSet, target_size=(input_shape[1], input_shape[2])) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) interpreter.set_tensor(input_details[0]['index'], x.astype(np.float32)[0:1]) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) predictions = np.squeeze(output_data) prediction = np.argmax(predictions) return(prediction) def runTimeTesting(inputModelName,inputDataSet): objRunTimeUtility=aionRunTimeUtility() return(objRunTimeUtility.executeOnRuntime(inputModelName,inputDataSet))
model_convertions.py
import os import sys import logging import json import joblib from pathlib import Path import platform from datetime import datetime as dt import time import argparse log = None def get_true_option(d, default_value=None): if isinstance(d, dict): for k,v in d.items(): if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): return k return default_value def convert_keras2onnx(input_model, output_file): import tensorflow as tf import tf2onnx from tensorflow.keras.models import load_model model = load_model(input_model) config = model.get_config() modelInputShape=config["layers"][0]["config"]["batch_input_shape"] spec = (tf.TensorSpec(modelInputShape, tf.float32, name="input"),) model_proto, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13, output_path=output_file) def convert_sklearn2onnx(input_model, file_path, input_shape=1): #print('Instead convert_sklearn2onnx') from skl2onnx import convert_sklearn #print('Instead convert_sklearn2onnx') from skl2onnx.common.data_types import FloatTensorType sklearn_model = joblib.load(input_model) sklearn_model_name= str(type(sklearn_model)).split(".")[-1][:-2] initial_type = [('float_input', FloatTensorType([None, input_shape]))] model = convert_sklearn(sklearn_model, initial_types=initial_type) with open(file_path, "wb") as f: f.write(model.SerializeToString()) def convert_xgboost2onnx(input_model, file_path, input_shape=1): from onnxmltools.convert import convert_xgboost from onnxmltools.convert.common.data_types import FloatTensorType xgBoost_model = joblib.load(input_model) if not xgBoost_model.n_estimators: xgBoost_model.n_estimators = xgBoost_model.get_num_boosting_rounds() n_features = xgBoost_model.n_features_in_ xgBoost_model.get_booster().feature_names = [f'f{x}' for x in range(n_features)] initial_type = [('float_input', FloatTensorType([None, xgBoost_model.n_features_in_]))] model = convert_xgboost(xgBoost_model, initial_types=initial_type) with open(file_path, "wb") as f: f.write(model.SerializeToString()) def convert_lightgbm2onnx(input_model, file_path): from onnxmltools.convert import convert_lightgbm from onnxmltools.convert.common.data_types import FloatTensorType lightGBM_model = joblib.load(input_model) initial_type = [('float_input', FloatTensorType([None, lightGBM_model.n_features_in_]))] model = convert_lightgbm(lightGBM_model, initial_types=initial_type, zipmap=False) with open(file_path, "wb") as f: f.write(model.SerializeToString()) def convert_coreml2onnx(input_model, file_path): import coremltools import onnxmltools coreml_model = coremltools.utils.load_spec(input_model) onnx_model = onnxmltools.convert_coreml(coreml_model) onnxmltools.utils.save_model(onnx_model, file_path) def convert_tflite2onnx(input_model, file_path): cmd = f"{sys.executable} -m tf2onnx.convert --opset 13 --tflite {str(input_model)} --output {str(file_path)}" os.system(cmd) def convert_tensorflow2onnx(input_model, file_path): import subprocess cmd = [sys.executable, '-m','tf2onnx.convert','--saved-model',str(input_model),'--output',str(file_path)] result = subprocess.check_output(cmd) result = result.decode('utf-8') def convert_libsvm2onnx(input_model, file_path): import onnxmltools import libsvm.svmutil as svmutil from onnxmltools.convert.libsvm import convert from onnxmltools.convert.common.data_types import FloatTensorType loaded_model=svmutil.svm_load_model(str(input_model)) model = convert(loaded_model, "node", [('input', FloatTensorType())]) onnxmltools.utils.save_model(model, file_path) def optimize_onnx(onnx_model_file, output_file_path): from onnxruntime.quantization import quantize_dynamic, QuantType quantize_dynamic(onnx_model_file, output_file_path, weight_type=QuantType.QUInt8) return True def convert_keras2tflite(input_model, file_path, optimized=False): import tensorflow as tf converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(input_model) if optimized: converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] model = converter.convert() with open(file_path, 'wb') as f: f.write(model) def convert_tensorflow2tflite(input_model, file_path, optimized=False): import tensorflow as tf modelpath=str(input_model) #converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(input_model) converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(modelpath) if optimized: converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] model = converter.convert() with open(file_path, 'wb') as f: f.write(model) class model_converter(): def __init__(self, model_path, output_path,input_format,output_format, shape=None): if not self.is_conversion_supported(input_format,output_format): raise ValueError(f"{input_format} to {output_format} is not supported") if not Path(model_path).exists(): raise ValueError(f"Model doen't exists at: {model_path}") self.model_path = Path(model_path) self.output_path = Path(output_path) self.output_path.mkdir(parents=True, exist_ok=True) self.input_format = input_format self.output_format = output_format self.shape = shape def is_conversion_supported(self, model_format, output_format): onnx_formats = ['onnx_standard','onnx_optimized'] tflite_formats = ['tflite_standard','tflite_optimized'] sagemaker_formats = ['sagemaker'] all_formats = onnx_formats + tflite_formats + sagemaker_formats formats = {'sklearn':onnx_formats + sagemaker_formats, 'keras':onnx_formats + tflite_formats, 'tensorflow':onnx_formats + tflite_formats, 'tflite':onnx_formats, 'lightgbm':onnx_formats, 'xgboost':onnx_formats, 'libsvm':onnx_formats,'coreml':['onnx_standard'] } if model_format in list(formats.keys()) and output_format in all_formats: if output_format in formats[model_format]: return True return False def convert(self): if self.output_format == 'onnx_standard': output_file = self.output_path/(self.model_path.stem + '.onnx') if self.input_format == 'sklearn': model = convert_sklearn2onnx(self.model_path, output_file, self.shape) elif self.input_format == 'keras': convert_keras2onnx(self.model_path, output_file) elif self.input_format == 'lightgbm': convert_lightgbm2onnx(self.model_path, output_file) elif self.input_format == 'xgboost': convert_xgboost2onnx(self.model_path, output_file) elif self.input_format == 'libsvm': convert_libsvm2onnx(self.model_path, output_file) elif self.input_format == 'coreml': convert_coreml2onnx(self.model_path, output_file) elif self.input_format == 'tflite': convert_tflite2onnx(self.model_path, output_file) elif self.input_format == 'tensorflow': convert_tensorflow2onnx(self.model_path, output_file) elif self.output_format == 'onnx_optimized': onnx_std_file = self.output_path/(self.model_path.stem + '_unquant.onnx') if onnx_std_file.exists(): onnx_std_file.unlink() output_file = self.output_path/(self.model_path.stem + 'Opt.onnx') if self.input_format == 'sklearn': convert_sklearn2onnx(self.model_path, onnx_std_file, self.shape) elif self.input_format == 'keras': convert_keras2onnx(self.model_path, onnx_std_file) elif self.input_format == 'lightgbm': convert_lightgbm2onnx(self.model_path, onnx_std_file) elif self.input_format == 'xgboost': convert_xgboost2onnx(self.model_path, onnx_std_file) elif self.input_format == 'libsvm': convert_libsvm2onnx(self.model_path, onnx_std_file) elif self.input_format == 'tflite': convert_tflite2onnx(self.model_path, onnx_std_file) elif self.input_format == 'tensorflow': convert_tensorflow2onnx(self.model_path, onnx_std_file) if onnx_std_file.exists(): try: optimize_onnx(onnx_std_file, output_file) except Exception as e: raise finally: onnx_std_file.unlink() temp_file = onnx_std_file.parent/(onnx_std_file.stem + '-opt.onnx') if temp_file.exists(): temp_file.unlink() elif self.output_format in ['tflite_standard', 'tflite_optimized']: if self.output_format == 'tflite_optimized': output_file = self.output_path/(self.model_path.stem + 'Opt.tflite') optimized = True else: output_file = self.output_path/(self.model_path.stem + '.tflite') optimized = False if self.input_format == 'keras': convert_keras2tflite(self.model_path, output_file, optimized) elif self.input_format == 'tensorflow': convert_tensorflow2tflite(self.model_path, output_file, optimized) def run(model_path, output_path, input_format, output_format, input_shape=None): from appbe.dataPath import LOG_LOCATION input_format = input_format.lower() output_format = output_format.lower() log_file_path = Path(LOG_LOCATION) log_file_path.mkdir(parents=True, exist_ok=True) time_stamp = dt.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S') fileName='modelConversion_'+time_stamp+'.log' filehandler = logging.FileHandler(log_file_path/fileName, 'w','utf-8') formatter = logging.Formatter('%(message)s') filehandler.setFormatter(formatter) log = logging.getLogger('modelConversionUtility') log.propagate = False for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) log.addHandler(filehandler) log.setLevel(logging.INFO) log.info('------------------ModelConversionUtility---------------------') log.info(f'Input model path: {model_path}') log.info(f'Output model path: {output_path}') log.info(f'Input model format: {input_format}') log.info(f'Output model format: {output_format}') log.info(f'\nConverting {input_format} to {output_format} start:') output ={} output['logfiles'] = str(log_file_path/fileName) log.info(f"\nExecution Start Time: {dt.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')}") try: start_time = time.time() objConvUtility=model_converter(model_path,output_path,input_format,output_format,input_shape) objConvUtility.convert() end_time = time.time() log.info(f"Time required for conversion: {end_time - start_time} sec") log.info(f'\nConverting {input_format} to {output_format} Successful') output['Convert'] = "Success" except Exception as e: output['Convert'] = "Error" log.info('Error: ' + str(e)) log.error(e, exc_info=True) if 'not supported' in str(e): output['sub error'] = "Not supported" output = json.dumps(output) log.info(f'Output: {output}') return output def convert(config_file): with open(config_file, 'r') as f: config = json.load(f) model_path = config['advance']['aionConversionUtility']['modelpath'] output_path = config['advance']['aionConversionUtility']['deployedlocation'] input_format = get_true_option(config['advance']['aionConversionUtility']['inputModelType'],'').lower() output_format = get_true_option(config['advance']['aionConversionUtility']['outputModelType'],'').lower() if input_format=="keras": input_shape = int(config['advance']['aionConversionUtility']['inputShape']) if input_format!="keras": input_shape = config['advance']['aionConversionUtility']['numberoffeatures'] input_shape = int(input_shape) if input_shape else 0 #input_shape = int(config['advance']['aionConversionUtility']['numberoffeatures']) output = run(model_path, output_path, input_format, output_format, input_shape) print(output)
run_onnxinference.py
import pandas import numpy import sys import onnxruntime as rt def onnx_runtime_validation(modelfile,datafile): dataframe = pandas.read_csv(datafile) df = dataframe.head(8) dataset = df.values sess = rt.InferenceSession(modelfile) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name inputsize=sess.get_inputs()[0].shape XYZ = dataset[:,0:inputsize[1]].astype(float) pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0] print("Prediction of AION generated/converted model on ONNX runtime for 8 sets of data") for i in range(0, 8): output = numpy.squeeze(pred_onx[i]) predictions = numpy.squeeze(output) prediction = numpy.argmax(predictions) df['predictions'] = predictions result = df.to_json(orient="records") return(result) if __name__ == "__main__": output = onnx_runtime_validation(sys.argv[1],sys.argv[2]) print("predictions:",output)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
aionNAS.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from numpy import mean from numpy import std from pandas import read_csv from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from learner.machinelearning import machinelearning # from sklearn.dummy import DummyClassifier # create histograms of numeric input variables import sys import os import re import pandas as pd import numpy as np from learner.aion_matrix import aion_matrix import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) import autokeras as ak # load the sonar dataset from sklearn.model_selection import train_test_split # from sklearn.metrics import cohen_kappa_score # from sklearn.metrics import roc_auc_score # from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from math import sqrt from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error from sklearn import metrics class aionNAS: def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation): try: self.dfFeatures=None self.nas_class=nas_class self.nas_params=nas_params self.targetFeature=None self.log = logging.getLogger('eion') self.n_models=int(self.nas_params['n_models']) self.n_epochs=int(self.nas_params['n_epochs']) self.optimizer=self.nas_params['optimizer'] self.metrics=self.nas_params['metrics'] self.tuner=self.nas_params['tuner'] self.seed=int(self.nas_params['seed']) self.xtrain = xtrain1 self.xtest = xtest1 self.ytrain = ytrain1 self.ytest = ytest1 #self.labelMaps = labelMaps self.deployLocation=deployLocation except Exception as e: self.log.info('<!------------- NAS INIT Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def paramCheck(self): try: if not (self.nas_class): self.log.info('<!------------- NAS class input Error ---------------> ') if not (self.nas_params): self.log.info('<!------------- NAS model hyperparameter input Error ---------------> ') if not (self.targetFeature): self.log.info('<!------------- NAS model targetFeature input Error ---------------> ') if (self.n_models < 1): self.n_models=1 if not (self.dfFeatures): self.log.info('<!------------- NAS model features Error ---------------> ') if (self.n_epochs < 1): self.n_models=1 if not (self.optimizer): self.optimizer="adam" if not (self.tuner): self.tuner="greedy" if (self.seed < 1): self.seed=0 if not (self.metrics): self.metrics=None except ValueError: self.log.info('<------------------ NAS config file error. --------------->') def recall_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + tf.keras.backend.epsilon()) return recall def precision_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + tf.keras.backend.epsilon()) return precision def f1_score(self,y_true, y_pred): precision = self.precision_m(y_true, y_pred) recall = self.recall_m(y_true, y_pred) return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon())) def nasStructdataPreprocess(self): df=self.data self.paramCheck() target=df[self.targetFeature].values counter = Counter(target) for k,v in counter.items(): per = v / len(target) * 100 self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per)) # select columns with numerical data types num_ix = df.select_dtypes(include=['int64', 'float64']).columns subset = df[num_ix] last_ix = len(df.columns) - 1 y=df[self.targetFeature] X = df.drop(self.targetFeature, axis=1) #Using Pearson Correlation # plt.figure(figsize=(12,10)) # cor = df.corr() # sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) # plt.show() # select categorical features cat_ix = X.select_dtypes(include=['object', 'bool']).columns # one hot encode cat features only ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough') X = X.reset_index() X=X.replace(to_replace="NULL",value=0) X = X.dropna(how='any',axis=0) X = ct.fit_transform(X) from sklearn.preprocessing import scale X = scale(X) # label encode the target variable to have the classes 0 and 1 y = LabelEncoder().fit_transform(y) # separate into train and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1) return X_train, X_test, y_train, y_test def nasStructClassification(self,scoreParam): try: objClf = aion_matrix() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest modelName="nas_structdata_classifier" self.log.info("Processing structured data block...\n") s_in = ak.StructuredDataInput() #s_in = Flatten()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Classification Dense layers ...\n") s_out = ak.ClassificationHead()(s_out) self.log.info("applying autokeras automodel to run different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nasclf = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models, seed=self.seed) # compile the model #nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m]) nasclf.fit(X_train, y_train, epochs=self.n_epochs) best_model = nasclf.export_model() mpredict=best_model.predict(X_test) mtpredict=best_model.predict(X_train) #loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0) #from sklearn.metrics import classification_report #Classification report y_pred_bool = np.argmax(mpredict, axis=1) y_train_pred_bool = np.argmax(mtpredict, axis=1) score = objClf.get_score(scoreParam,y_test, y_pred_bool) #best_model = nasclf.export_model() best_model_summary=best_model.summary() filename = os.path.join(self.deployLocation,'log','summary.txt') with open(filename,'w') as f: best_model.summary(print_fn=lambda x: f.write(x + '\n')) f.close() #self.log.info("==========") #self.log.info(best_model_summary) self.log.info("NAS struct data classification, best model summary: \n"+str(best_model.summary(print_fn=self.log.info))) #self.log.info("==========") #Save and load model # # #try: # try: # best_model.save("model_class_autokeras", save_format="tf") # except Exception: # best_model.save("model_class_autokeras.h5") # loaded_model = load_model("model_class_autokeras", custom_objects=ak.CUSTOM_OBJECTS) # loadedmodel_predict=loaded_model.predict(X_test) loss,accuracy_m=nasclf.evaluate(X_test, y_test) #mpredict_classes = mpredict.argmax(axis=-1) #accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int)) # precision tp / (tp + fp) #precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro') # recall: tp / (tp + fn) #recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro') #f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average="macro") self.log.info("Autokeras struct data classification metrics: \n") except Exception as inst: self.log.info("Error: NAS failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(inst) return modelName,nasclf,score def nasStructRegressor(self,scoreParam): objClf = aion_matrix() modelName="nas_struct_regressor" #self.paramCheck() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest # Autokeras alg s_in = ak.StructuredDataInput() #tf.keras.layers.GlobalMaxPooling2D()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Regression Dense layers ...\n") s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out) self.log.info("applying autokeras automodel to evaluate different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nas_reg = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models) nas_reg.fit(X_train, y_train, epochs=self.n_epochs) best_model = nas_reg.export_model() self.log.info("NAS struct data regression best model summary: \n") best_model_summary=best_model.summary(print_fn=self.log.info) self.log.info(best_model_summary) predictm=best_model.predict(X_test) mtpredict=best_model.predict(X_train) score = objClf.get_score(scoreParam,y_test, predictm) self.log.info("Autokeras struct data regression metrics: \n") return modelName,nas_reg,score def nasMain(self,scoreParam): modelName = "" nasclf=None nas_reg=None #text_reg_model=None mse_value=0 reg_rmse=0 mape_reg=0 huber_loss_reg=0 accuracy=0 precision=0 recall=0 #Dummy values to return main for classification problems dummy_score_1=int(0) #dummy_score_2=int(0) try: if ((self.nas_class.lower() == "classification")): modelName,nasclf,score=self.nasStructClassification(scoreParam) self.log.info('NAS Struct Classification score: '+str(score)) best_model_nas = nasclf.export_model() scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1 elif (self.nas_class.lower() == "regression"): modelName,nas_reg,score =self.nasStructRegressor(scoreParam) self.log.info('NAS Struct Regression score: '+str(score)) best_model_nas = nas_reg.export_model() ''' filename = os.path.join(self.deployLocation,'model','autoKerasModel') best_model_nas = nas_reg.export_model() try: best_model_nas.save(filename, save_format="tf") modelName = 'autoKerasModel' except Exception: filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5') best_model_nas.save(filename) modelName = 'autoKerasModel.h5' ''' scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' ''' error_matrix = '"MSE":"'+str(round(mse_value,2))+'","RMSE":"'+str(round(reg_rmse,2))+'","MAPE":"'+str(round(mape_reg,2))+'","MSLE":"'+str(round(msle_reg,2))+'"' ''' return best_model_nas,self.nas_params,score,'NAS' else: pass except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) output = {"status":"FAIL","message":str(inst).strip('"')} output = json.dumps(output)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
survival_analysis.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''# -*- coding: utf-8 -*- """ @author: satish_k """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statistics from sklearn.impute import SimpleImputer from sklearn.model_selection import train_test_split from lifelines import KaplanMeierFitter, CoxPHFitter from lifelines.statistics import logrank_test from scipy import stats import logging class SurvivalAnalysis(object): def __init__(self, df, method, event_column, duration_column, fitter_param=None, df_negate=None ): pd.options.display.width = 30 self.df = df self.fitter_param = fitter_param self.method = method self.event_column = event_column self.duration_column = duration_column self.models = [] self.train = df.drop_duplicates().reset_index() self.test = None if isinstance(df_negate, pd.DataFrame): self.df_n = df_negate.drop_duplicates().reset_index() else: self.df_n = None self.log = logging.getLogger('eion') self.plots = [] def learn(self): self.log.info('\n---------- SurvivalAnalysis learner has started ----------') self.log.info('\n---------- SurvivalAnalysis learner method is "%s" ----------'%self.method) lifelines_univariate_models = ["AalenJohansenFitter", "BreslowFlemingHarringtonFitter", "ExponentialFitter", "GeneralizedGammaFitter", "KaplanMeierFitter", "LogLogisticFitter", "LogNormalFitter", "MixtureCureFitter", "NelsonAalenFitter", "PiecewiseExponentialFitter", "SplineFitter", "WeibullFitter"] lifelines_regression_models = ["AalenAdditiveFitter", "CRCSplineFitter", "CoxPHFitter", "CoxTimeVaryingFitter", "GeneralizedGammaRegressionFitter", "LogLogisticAFTFitter", "LogNormalAFTFitter", "PiecewiseExponentialRegressionFitter", "WeibullAFTFitter"] if self.method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']: self.log.info('\n---------- SurvivalAnalysis learner method "%s" has started ----------'%self.method) #from lifelines.utils import find_best_parametric_model #m,s = find_best_parametric_model(event_times=self.df[self.duration_column]) if not isinstance(self.df_n, pd.DataFrame): kmf = KaplanMeierFitter() self.log.info('\n Shape of training data - %s'%str(self.train.shape)) T = self.train[self.duration_column] E = self.train[self.event_column] self.log.info('\n T : \n%s'%str(T)) self.log.info('\n E : \n%s'%str(E)) K = kmf.fit(T, E) ax = plt.subplot(111) kmf_sf = K.survival_function_ ax = kmf_sf.plot(ax=ax) kmf_sf_json = self.survival_probability_to_json(kmf_sf) self.models.append(K) plt.title("KM Survival Functions") self.plots.append(plt) self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method) self.log.info('\n---------- SurvivalAnalysis learner has ended ----------') return kmf_sf_json else: kmf1 = KaplanMeierFitter() kmf2 = KaplanMeierFitter() T1 = self.train[self.duration_column] E1 = self.train[self.event_column] #self.df_n = self.df_n.drop('fin', axis=1) T2 = self.df_n[self.duration_column] E2 = self.df_n[self.event_column] ax = plt.subplot(111) plt.title("KM Survival Functions - Filter vs Negation") self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------'%self.method) kmf1.fit(T1, E1) ax = kmf1.plot(ax=ax, label='%s'%self.fitter_param) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------'%self.method) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------'%self.method) kmf2.fit(T2, E2) ax = kmf2.plot(ax=ax, label='~%s'%self.fitter_param) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------'%self.method) self.models.extend([kmf1,kmf2]) kmf1_sf = kmf1.survival_function_ kmf2_sf = kmf2.survival_function_ kmf1_sf_json = self.survival_probability_to_json(kmf1_sf) self.plots.append(plt) self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method) self.log.info('\n---------- SurvivalAnalysis learner has ended ----------') return kmf1_sf_json elif self.method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']: self.log.info('\n---------- SurvivalAnalysis learner method "%s" has started ----------'%self.method) #from lifelines.utils import k_fold_cross_validation if not isinstance(self.df_n, pd.DataFrame): cph = CoxPHFitter() C = cph.fit(self.train, self.duration_column, self.event_column, show_progress=True) self.models.append(C) cph_sf = C.baseline_survival_ ax = plt.subplot(111) ax = C.plot(ax=ax) cph_sf_json = self.survival_probability_to_json(cph_sf) self.log.info('\n Summary : \n%s'%str(C.summary)) plt.title("COX hazard ratio") self.plots.append(plt) self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method) self.log.info('\n---------- SurvivalAnalysis learner has ended ----------') #plt.show() return cph_sf_json else: cph1 = CoxPHFitter(penalizer=0.0001) cph2 = CoxPHFitter(penalizer=0.0001) ax = plt.subplot(211) plt.title("COX hazard ratio - [%s](Top) vs [~(%s)](Bottom)"%(self.fitter_param,self.fitter_param)) #self.train = self.train.drop('fin',axis=1) self.df_n = self.drop_constant_features(self.df_n) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------'%self.method) cph1.fit(self.train, self.duration_column, self.event_column, show_progress=True, step_size=0.4) ax = cph1.plot(ax=ax, label='%s'%self.fitter_param) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------'%self.method) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------'%self.method) cph2.fit(self.df_n, self.duration_column, self.event_column, show_progress=True, step_size=0.4) ax = plt.subplot(212) ax = cph2.plot(ax=ax, label='~%s'%self.fitter_param) self.log.info('\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------'%self.method) self.models.extend([cph1,cph2]) cph1_sf = cph1.baseline_survival_ cph2_sf = cph2.baseline_survival_ cph1_sf_json = self.survival_probability_to_json(cph1_sf) #plt.show() plt.tight_layout() self.plots.append(plt) self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method) self.log.info('\n---------- SurvivalAnalysis learner has ended ----------') return cph1_sf_json def survival_probability_to_json(self, sf): ''' sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_ returns json of survival probabilities ''' sf = sf[sf.columns[0]].apply(lambda x: "%4.2f"%(x*100)) self.log.info('\n Survival probabilities : \n%s'%str(sf)) sf = sf.reset_index() sf = sf.sort_values(sf.columns[0]) sf_json = sf.to_json(orient='records') self.log.info('\n Survival prbability json : \n%s'%str(sf_json)) return sf_json def drop_constant_features(self, df): for col in df.columns: if len(df[col].unique()) == 1: df.drop(col,inplace=True,axis=1) return df def predict(self): if self.method == 'KaplanMeierFitter': return self.model.predict(self.test[self.duration_column]) #kmf.predict() #kmf.median_survival_time_ #from lifelines.utils import median_survival_times #median_ci = median_survival_times(kmf.confidence_interval_) elif self.method == 'CoxPHFitter': #print('train score',self.model.score(self.train)) #print('test score',self.model.score(self.test)) return self.model.predict_survival_function(self.test) #cph.predict_cumulative_hazard() #cph.predict_expectation() #cph.predict_log_partial_hazard() #cph.predict_median() #cph.predict_partial_hazard() #cph.predict_percentile() #cph.predict_survival_function() #cph.predict_hazard() #cph.score() #cph.summary() #if __name__ == "__main__": # data_file = r"C:\Users\satish_k\Desktop\Work\input\echocardiogram.csv" # #data_file = r"C:\Users\satish_k\Desktop\Work\input\lymphoma.csv" # method = "CoxPHFitter" # event_column = "alive" # duration_column = "survival" # sa = SurvivalAnalysis(data_file, method, event_column, duration_column) # sa.profiler() # model = sa.learn() # print(sa.predict()) #print(model.survival_function_)
DebiasingManager.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from sklearn.preprocessing import MinMaxScaler, LabelEncoder import numpy as np import logging logging.getLogger('tensorflow').disabled = True import aif360 from aif360.datasets import StandardDataset from aif360.algorithms.preprocessing.reweighing import Reweighing from aif360.algorithms.preprocessing import DisparateImpactRemover class DebiasingManager: def __init__(self): self.data = '' # ------------------------------- Debiasing Changes ------------------------------- def get_attributes(self, data, selected_attr=None): unprivileged_groups = [] privileged_groups = [] if selected_attr == None: selected_attr = data.protected_attribute_names for attr in selected_attr: idx = data.protected_attribute_names.index(attr) privileged_groups.append({attr:data.privileged_protected_attributes[idx]}) unprivileged_groups.append({attr:data.unprivileged_protected_attributes[idx]}) return privileged_groups, unprivileged_groups # ------------------------------- ------------------------------- def Bias_Mitigate(self, dataFrame, protected_feature, privileged_className, target_feature, algorithm): # log = logging.getLogger('eion') # log.propagate = False data_encoded = dataFrame.copy() categorical_names = {} encoders = {} dataFrame = dataFrame.replace('Unknown', 'NA') dataFrame = dataFrame.replace(np.nan, 'NA') try: # Label-Encoding for feature in dataFrame.columns: le = LabelEncoder() le.fit(data_encoded[feature]) data_encoded[feature] = le.transform(data_encoded[feature]) categorical_names[feature] = le.classes_ encoders[feature] = le privileged_class = np.where(categorical_names[protected_feature] == privileged_className)[0] target_feature_count = len(data_encoded[target_feature].value_counts()) # Check if it's BinaryLabel if target_feature_count == 2: binaryLabelDataset = aif360.datasets.BinaryLabelDataset( favorable_label='1', unfavorable_label='0', df=data_encoded, label_names=[target_feature], protected_attribute_names=[protected_feature]) data_orig = binaryLabelDataset # Check if it's Non-BinaryLabel if target_feature_count > 2: data_orig = StandardDataset(data_encoded, label_name=target_feature, favorable_classes=[1], protected_attribute_names=[protected_feature], privileged_classes=[privileged_class]) if algorithm == 'DIR': DIR = DisparateImpactRemover(repair_level=0.9) data_transf_train = DIR.fit_transform(data_orig) # log.info('Status:-|... DIR applied on input dataset') else: privileged_groups, unprivileged_groups = self.get_attributes(data_orig, selected_attr=[protected_feature]) RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) data_transf_train = RW.fit_transform(data_orig) # log.info('Status:-|... Reweighing applied on input dataset') transf_dataFrame = data_transf_train.convert_to_dataframe()[0] data_decoded = transf_dataFrame.copy().astype('int') for column in data_decoded.columns: data_decoded[column] = encoders[column].inverse_transform(data_decoded[column]) debiased_dataFrame = data_decoded except Exception as e: print(e) debiased_dataFrame = dataFrame return debiased_dataFrame
actian.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pyodbc as pyodbc import pandas as pd import json def simple_select(c, sql_query, bind_params=None, display_sql=False): """where c is a cursor""" if bind_params is None: c.execute(sql_query) else: if display_sql: c.execute(sql_query, bind_params) headers = [] if c.description is not None: # We have a SELECT statement for x in c.description: headers.append(x[0]) row_count = 0 row = c.fetchone() data=[] while row: row_count += 1 xrow={} for i in range(len(row)): xrow[headers[i]] = row[i] data.append(xrow) row = c.fetchone() #df = pd.DataFrame(data) return(data) def validatequery(request,query): resultdata = [] try: server_url = request.session['server_url'] username_actian = request.session['username'] password_actian = request.session['password'] database_actian = request.session['database'] conn = get_connection(server_url,username_actian,password_actian,database_actian) sql_text = query cur = conn.cursor() resultdata = simple_select(cur, query) cur.close() if len(resultdata) > 0: return "Query executed successfully" else: return "No rows returned" except Exception as e: print(e) return str(e) def executequery(request,query): resultdata = [] try: server_url = request.session['server_url'] username_actian = request.session['username'] password_actian = request.session['password'] database_actian = request.session['database'] conn = get_connection(server_url,username_actian,password_actian,database_actian) sql_text = query cur = conn.cursor() resultdata = simple_select(cur, query) cur.close() return(resultdata) except Exception as e: print(e) return(resultdata) def list_tables_fields(request,table_list): table_field_obj = {} table_field_obj['data'] = [] try: server_url = request.session['server_url'] username_actian = request.session['username'] password_actian = request.session['password'] database_actian = request.session['database'] table_list = json.loads(table_list) conn = get_connection(server_url,username_actian,password_actian,database_actian) for table in table_list: tf_obj = {} tf_obj['TableName'] = str(table).strip() tf_obj['Fields']= [] field_list = [] sql_text = "SELECT column_name, false as is_select FROM iicolumns WHERE table_name='"+table+"'" cur = conn.cursor() field_list = simple_select(cur, sql_text) cur.close() print(field_list) tf_obj['Fields'] = field_list table_field_obj['data'].append(tf_obj) print("----------------------") print(table_field_obj) print(json.dumps(table_field_obj)) print("----------------------") return json.dumps(table_field_obj) except Exception as e: print("Something went wrong "+str(e)) return table_field_obj def list_tables(request): server_url = request.session['server_url'] username_actian = request.session['username'] password_actian = request.session['password'] database_actian = request.session['database'] dt_list = [] try: conn = get_connection(server_url,username_actian,password_actian,database_actian) sql_text = "select table_name from iitables where table_type='T' and table_owner='"+username_actian+"'" cur = conn.cursor() dt_list = simple_select(cur, sql_text) cur.close() return dt_list except: print("Something went wrong") return dt_list def get_connection(server_url,username_actian,password_actian,database_actian): conn = pyodbc.connect("driver=Ingres;servertype=ingres;server=@"+str(server_url)+",tcp_ip,VW;uid="+str(username_actian)+";pwd="+str(password_actian)+";database="+str(database_actian)) print("connected") return conn def getDataFromActianAvalanche(request): server_url = request.POST.get('server_url') username_actian = request.POST.get('username') password_actian = request.POST.get('password') database_actian = request.POST.get('database') table_actian = request.POST.get('table') conn = get_connection(server_url,username_actian,password_actian,database_actian) c = conn.cursor() sql_text = "select * from "+str(table_actian) data = simple_select(c, sql_text) df = pd.DataFrame(data) return(df)
dataPath.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os from os.path import expanduser import platform DEFAULT_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),'conf') cur_dir = os.path.dirname(os.path.abspath(__file__)) home = expanduser("~") if platform.system() == 'Windows': DATA_DIR = os.path.normpath(os.path.join(cur_dir,'..','..','..','..','..','..','data')) DATA_FILE_PATH = os.path.join(DATA_DIR,'storage') CONFIG_FILE_PATH = os.path.join(DATA_DIR,'config') DEPLOY_LOCATION = os.path.join(DATA_DIR,'target') LOG_LOCATION = os.path.join(DATA_DIR,'logs') LOG_FILE = os.path.join(DATA_DIR,'logs','ux.log') else: DATA_DIR = os.path.join(home,'HCLT','data') DATA_FILE_PATH = os.path.join(DATA_DIR,'storage') CONFIG_FILE_PATH = os.path.join(DATA_DIR,'config') DEPLOY_LOCATION = os.path.join(DATA_DIR,'target') LOG_FILE = os.path.join(DATA_DIR,'logs','ux.log') LOG_LOCATION = os.path.join(DATA_DIR,'logs')
pages.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys from appbe import compute from appbe.aion_config import kafka_setting from appbe.aion_config import running_setting from records import pushrecords from appbe import service_url import json import time import pandas as pd from django.db.models import Max, F from os.path import expanduser import platform from appbe.data_io import sqlite_db import subprocess from appbe.dataPath import DEFAULT_FILE_PATH from appbe.dataPath import DATA_FILE_PATH from appbe.dataPath import CONFIG_FILE_PATH from appbe.dataPath import DEPLOY_LOCATION from appbe.dataPath import DATA_DIR DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite') def pushRecordForTraining(): from appbe.pages import getversion AION_VERSION = getversion() try: status,msg = pushrecords.enterRecord(AION_VERSION) except Exception as e: print("Exception", e) status = False msg = str(e) return status,msg def getversion(): configFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config') version = 'NA' for file in os.listdir(configFolder): if file.endswith(".var"): version = file.rsplit('.', 1) version = version[0] break return version def getusercasestatus(request): if 'UseCaseName' in request.session: selected_use_case = request.session['UseCaseName'] else: selected_use_case = 'Not Defined' if 'ModelVersion' in request.session: ModelVersion = request.session['ModelVersion'] else: ModelVersion = 0 if 'ModelStatus' in request.session: ModelStatus = request.session['ModelStatus'] else: ModelStatus = 'Not Trained' return selected_use_case,ModelVersion,ModelStatus def getMLModels(configSettingsJson): mlmodels ='' dlmodels = '' problem_type = "" problemtypes = configSettingsJson['basic']['analysisType'] for k in problemtypes.keys(): if configSettingsJson['basic']['analysisType'][k] == 'True': problem_type = k break sc = "" if problemtypes in ['classification','regression','survivalAnalysis']: scoringCreteria = configSettingsJson['basic']['scoringCriteria'][problem_type] for k in scoringCreteria.keys(): if configSettingsJson['basic']['scoringCriteria'][problem_type][k] == 'True': sc = k break else: sc = 'NA' if problem_type in ['classification','regression']: algorihtms = configSettingsJson['basic']['algorithms'][problem_type] #print(algorihtms) for k in algorihtms.keys(): #print(configSettingsJson['basic']['algorithms'][problem_type][k]) if configSettingsJson['basic']['algorithms'][problem_type][k] == 'True': if k in ['SNN','RNN','CNN']: if dlmodels != '': dlmodels += ', ' dlmodels += k else: if mlmodels != '': mlmodels += ', ' mlmodels += k elif problem_type in ['videoForecasting','imageClassification','objectDetection']: algorihtms = configSettingsJson['basic']['algorithms'][problem_type] for k in algorihtms.keys(): if configSettingsJson['basic']['algorithms'][problem_type][k] == 'True': if dlmodels != '': dlmodels += ', ' dlmodels += k else: algorihtms = configSettingsJson['basic']['algorithms'][problem_type] for k in algorihtms.keys(): if configSettingsJson['basic']['algorithms'][problem_type][k] == 'True': if mlmodels != '': mlmodels += ', ' mlmodels += k displayProblemType = problem_type selected_model_size = '' if problem_type.lower() == 'llmfinetuning': displayProblemType = 'LLM Fine-Tuning' supported_model_types = configSettingsJson['basic']['modelSize'][problem_type][mlmodels] for k in supported_model_types.keys(): if configSettingsJson['basic']['modelSize'][problem_type][mlmodels][k] == 'True': selected_model_size = k break #print(selected_model_size) if mlmodels == 'TF_IDF': mlmodels = 'TF-IDF' if mlmodels == 'LatentSemanticAnalysis': mlmodels = 'Latent Semantic Analysis (LSA)' if mlmodels == 'SentenceTransformer_distilroberta': mlmodels = 'SentenceTransformer (DistilRoBERTa)' if mlmodels == 'SentenceTransformer_miniLM': mlmodels = 'SentenceTransformer (MiniLM)' if mlmodels == 'SentenceTransformer_mpnet': mlmodels = 'SentenceTransformer (MPNet)' return(problem_type,displayProblemType,sc,mlmodels,dlmodels,selected_model_size) def get_usecase_page(request,usecasedetails,Existusecases,usecaseId = None,search_text=None): try: x = request.build_absolute_uri().split("http://") y = x[1].split("/") url = y[0].split(":") tacking_url = url[0] except: tacking_url = '127.0.0.1' computeinfrastructure = compute.readComputeConfig() ruuningSetting = running_setting() selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) status = 'SUCCESS' ser_url = service_url.read_service_url_params(request) hosturl =request.get_host() hosturl = hosturl.split(':') hosturl = hosturl[0] packagetip=''' Call From Command Line 1. Click AION Shell 2. python {packageAbsolutePath}/aion_prediction.py {json_data} Call As a Package 1. Go To package_path\WHEELfile 2. python -m pip install {packageName}-py3-none-any.whl Call the predict function after wheel package installation 1. from {packageName} import aion_prediction as p1 2. p1.predict({json_data})''' models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id') usecase = usecasedetails.objects.all().order_by('-id') usecase = landing_page(usecasedetails,Existusecases,hosturl,usecaseId,search_text) if len(usecase) > 0: nouc = usecasedetails.objects.latest('id') nouc = (nouc.id)+1 nouc = str(nouc).zfill(4) else: nouc = 1 nouc = str(nouc).zfill(4) description_text = 'This is a usecase for AI' + str(nouc) context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'tacking_url':tacking_url, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'computeinfrastructure':computeinfrastructure,'ruuningSetting':ruuningSetting} return status,context,'usecases.html' def checkText(configPath): isText='False' with open(configPath) as config: data = json.load(config) for feature in data['advance']['profiler']['featureDict'] : if feature['type']=='text': isText = 'True'; break; return isText # For Task ID 12393 # For BUG ID 13161 def checkFE(configPath): isFE = 'False' with open(configPath) as config: data = json.load(config) is_selection_method = data.get('advance', {}).get('selector', {}).get('selectionMethod', {}).get('featureEngineering','False') feature_dict= data.get('advance', {}).get('selector', {}).get('featureEngineering', {}) if 'null' in feature_dict.keys(): feature_dict.pop('null') if is_selection_method == 'True' or 'True' in list(feature_dict.values()): isFE = 'True' return isFE def get_model(Existusecases,usercaseid,version=-1): from django.core import serializers if version == -1: models = Existusecases.objects.filter(ModelName=usercaseid).order_by('-id') else: models = Existusecases.objects.filter(ModelName=usercaseid,Version=version).order_by('-id') for model in models: model.scoringCreteria = 'NA' model.score = 'NA' model.deploymodel = 'NA' model.problemType = 'NA' model.maacsupport = 'False' model.flserversupport = 'False' model.onlinelerningsupport = 'False' model.oltrainingdetails='' model.xplain = 'True' model.isText = 'False' problemTypeNames = {'topicmodelling':'TopicModelling','anomalydetection':'AnomalyDetection'} if model.Status == 'SUCCESS': if os.path.isdir(str(model.DeployPath)): modelPath = os.path.join(str(model.DeployPath),'etc','output.json') try: with open(modelPath) as file: outputconfig = json.load(file) file.close() if outputconfig['status'] == 'SUCCESS': model.scoringCreteria = outputconfig['data']['ScoreType'] model.score = outputconfig['data']['BestScore'] model.deploymodel = outputconfig['data']['BestModel'] model.problemType = outputconfig['data']['ModelType'] if model.problemType in ['topicmodelling','anomalydetection']: model.problemType = problemTypeNames[model.problemType] model.featuresused = outputconfig['data']['featuresused'] model.targetFeature = outputconfig['data']['targetFeature'] if 'params' in outputconfig['data']: model.modelParams = outputconfig['data']['params'] model.modelType = outputconfig['data']['ModelType'] model.isText = checkText(str(model.ConfigPath)) model.isFeatureEng = checkFE(str(model.ConfigPath))#task id 12393 model.dataPath = os.path.join(str(model.DeployPath),'data', 'postprocesseddata.csv.gz') mlacSupportedModel = ["Logistic Regression","Naive Bayes","Decision Tree","Random Forest", "Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Extreme Gradient Boosting (XGBoost)","Light Gradient Boosting (LightGBM)", "Categorical Boosting (CatBoost)","Linear Regression","Lasso","Ridge","MLP","LSTM"] if model.problemType.lower() in ['classification','regression','timeseriesforecasting']: #task 11997 if model.deploymodel in mlacSupportedModel: model.maacsupport = 'True' if model.problemType.lower() not in ['classification','regression']: model.xplain = 'False' elif model in ["Neural Architecture Search"]: model.xplain = 'False' model.flserversupport = 'False' model.onlinelerningsupport = 'False' supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"] if model.deploymodel in supportedmodels: model.flserversupport = 'True' else: model.flserversupport = 'False' supportedmodels = ["Extreme Gradient Boosting (XGBoost)"] if model.deploymodel in supportedmodels: model.encryptionsupport = 'True' else: model.encryptionsupport = 'False' supportedmodels = ["Online Decision Tree Classifier","Online Logistic Regression","Online Linear Regression","Online Decision Tree Regressor","Online KNN Regressor","Online Softmax Regression","Online KNN Classifier"] if model.deploymodel in supportedmodels: model.onlinelerningsupport = 'True' onlineoutputPath = os.path.join(str(model.DeployPath),'production','Config.json') with open(onlineoutputPath) as file: onlineoutputPath = json.load(file) file.close() details = {'Score' :onlineoutputPath['metricList'],'DataSize':onlineoutputPath['trainRowsList']} dfonline = pd.DataFrame(details) model.oltrainingdetails = dfonline else: model.onlinelerningsupport = 'False' except Exception as e: print(e) pass return models def landing_page(usecasedetails,Existusecases,hosturl,usecaseId = None,search_text=None): sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') if usecaseId: usecase = usecasedetails.objects.filter(id=usecaseId) else: if search_text: usecase = usecasedetails.objects.filter(UsecaseName__contains=str(search_text)).order_by('-id') else: #usecase = usecasedetails.objects.all().order_by('-id')[:100] #top 100 records usecase = usecasedetails.objects.all().order_by('-id') #top 100 records usecaselist=[] if not usecaseId: for x in usecase: problemType= 'NA' publish_url = '' otherModel = {} models = Existusecases.objects.filter(Status='SUCCESS',publishStatus='Published',ModelName=x.id).order_by('-id') if len(models) > 0: #print(models[0]) version = models[0].Version if os.path.isdir(str(models[0].DeployPath)): modelPath = os.path.join(str(models[0].DeployPath),'etc','output.json') with open(modelPath) as file: outputconfig = json.load(file) problemType = outputconfig['data']['ModelType'] #print(problemType.lower()) if problemType.lower() == "llm fine-tuning": cloudconfig = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json')) print(cloudconfig) from appbe.models import get_instance hypervisor,instanceid,region,image,status = get_instance(x.usecaseid+ '_' + str(version)) from llm.llm_inference import get_ip instanceip = get_ip(cloudconfig,instanceid,hypervisor,region,image) #usnish__ server maynot running if instanceip != '': publish_url = 'http://' + instanceip + ':' + '8000' + '/generate' else: publish_url = 'service not available' else: publish_url = 'http://'+hosturl+':'+str(models[0].portNo)+'/AION/'+x.usecaseid+'/predict' publish_status = 'Published' #publish_url = 'http://'+hosturl+':'+str(models[0].portNo)+'/AION/'+x.usecaseid+'/predict' parentModel = get_model(Existusecases,x.id,int(version)) else: models = Existusecases.objects.filter(Status='SUCCESS',ModelName=x.id).order_by('-id') if len(models) > 0: publish_status = 'Trained' version = models[0].Version parentModel = get_model(Existusecases,x.id,int(version)) else: models = Existusecases.objects.filter(ModelName=x.id).order_by('-id') if len(models)==0: publish_status= 'Not Trained' version = -1 else: if models[0].Status == 'FAIL': publish_status= 'Failed' elif models[0].Status == 'Running': publish_status = 'Running' else: publish_status='Not Trained' problemType = models[0].ProblemType version = models[0].Version parentModel={} usecasedetails = {'uuid':x.id,'description':x.Description,'usecaseid':x.usecaseid,'usecase':x.UsecaseName,'status':publish_status,'publish_url':publish_url,'version':version,'parentModel':parentModel,'otherModel':otherModel,'problemType':problemType} usecaselist.append(usecasedetails) else: for x in usecase: otherModel = get_model(Existusecases,x.id) problemType = otherModel[0].problemType usecasedetails = {'uuid':x.id,'description':x.Description,'usecase':x.UsecaseName,'status':'','version':'','parentModel':{},'otherModel':otherModel,'problemType':problemType} usecaselist.append(usecasedetails) return usecaselist def get_landing_model(Existusecases): models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id') for model in models: model.scoringCreteria = 'NA' model.score = 'NA' model.deploymodel = 'NA' if os.path.isdir(str(model.DeployPath)): modelPath = os.path.join(str(model.DeployPath),'etc','output.json') try: with open(modelPath) as file: outputconfig = json.load(file) file.close() if outputconfig['status'] == 'SUCCESS': model.scoringCreteria = outputconfig['data']['ScoreType'] model.score = outputconfig['data']['BestScore'] model.deploymodel = outputconfig['data']['BestModel'] model.problemType = outputconfig['data']['ModelType'] model.maacsupport = 'True' model.flserversupport = 'False' model.onlinelerningsupport = 'False' supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"] if model.deploymodel in supportedmodels: model.flserversupport = 'True' else: model.flserversupport = 'False' supportedmodels = ["Extreme Gradient Boosting (XGBoost)"] if model.deploymodel in supportedmodels: model.encryptionsupport = 'True' else: model.encryptionsupport = 'False' supportedmodels = ["Online Decision Tree Classifier","Online Logistic Regression"] if model.deploymodel in supportedmodels: model.onlinelerningsupport = 'True' onlineoutputPath = os.path.join(str(model.DeployPath),'production','Config.json') with open(onlineoutputPath) as file: onlineoutputPath = json.load(file) file.close() details = {'Score' :onlineoutputPath['metricList'],'DataSize':onlineoutputPath['trainRowsList']} dfonline = pd.DataFrame(details) model.oltrainingdetails = dfonline else: model.onlinelerningsupport = 'False' except Exception as e: pass return models def usecase_page(request,usecasedetails,Existusecases,usecaseid,search_text): try: from appbe import read_service_url_params tacking_url = read_service_url_params(request) except: tacking_url = '127.0.0.1' hosturl =request.get_host() hosturl = hosturl.split(':') hosturl = hosturl[0] computeinfrastructure = compute.readComputeConfig() from appbe.aion_config import settings usecasetab = settings() kafkaSetting = kafka_setting() ruuningSetting = running_setting() selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) status,msg = pushRecordForTraining() if status == False: context = {'msg':msg} context['selected'] = 'License' return status,context,'licenseexpired.html' ser_url = service_url.read_service_url_params(request) packagetip=''' Call From Command Line 1. Click AION Shell 2. python {packageAbsolutePath}/aion_predict.py {json_data} Call As a Package 1. Go To package_path\publish\package 2. python -m pip install {packageName}-py3-none-any.whl Call the predict function after wheel package installation 1. from {packageName} import aion_predict as p1 2. p1.predict({json_data})''' if request.method == "POST": usecasename = request.POST.get('UsecaseName') description = request.POST.get('Description') usecaseid = request.POST.get('usecaseid') #print('1',usecasename) if (usecasename == ''): usecase = landing_page(usecasedetails,Existusecases,hosturl) if len(usecase) > 0: nouc = usecasedetails.objects.latest('id') nouc = (nouc.id)+1 else: nouc = 1 nouc = str(nouc).zfill(4) description_text = 'This is a usecase for AI' + str(nouc) context = {'description_text':description_text,'usecase':'usecase','Notallowed':'Usecasename is mandatory','ser_url':ser_url,'packagetip':packagetip,'usecasedetail': usecase,'nouc':nouc, 'ser_url':ser_url,'packagetip':packagetip, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'tacking_url':tacking_url,'usecasetab':usecasetab, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting} return status,context,'usecases.html' else: usecase_count = usecasedetails.objects.filter(usecaseid=usecaseid).count() usecasename_count = usecasedetails.objects.filter(UsecaseName=usecasename).count() usecase = landing_page(usecasedetails,Existusecases,hosturl) if (usecase_count > 0) or (usecasename_count > 0): nouc = usecasedetails.objects.latest('id') nouc = (nouc.id)+1 nouc = str(nouc).zfill(4) Msg = 'Error in usecase creating, try again' if usecase_count > 0: Msg = 'Error in usecase creating, try again' if usecasename_count > 0: Msg = 'There is already a use case with same name, please provide unique name' description_text = 'This is a usecase for AI' + str(nouc) context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc,'Status':'error','Msg': Msg,'tacking_url':tacking_url,'usecasetab':usecasetab,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ser_url':ser_url,'packagetip':packagetip, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting} return status,context,'usecases.html' else: clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id') from appbe.s3bucketsDB import get_s3_bucket from appbe.gcsbucketsDB import get_gcs_bucket from appbe.azureStorageDB import get_azureStorage p = usecasedetails(UsecaseName=usecasename,usecaseid=usecaseid,Description=description) p.save() s1 = Existusecases.objects.filter(ModelName=p.id).annotate(maxver=Max('ModelName__existusecases__Version')) config_list = s1.filter(Version=F('maxver')) if config_list.count() > 0: Version = config_list[0].Version Version = Version + 1 else: Version = 1 ps = Existusecases(DataFilePath='', DeployPath='', Status='Not Trained',ConfigPath='', Version=Version, ModelName=p,TrainOuputLocation='') ps.save() request.session['ModelName'] = p.id request.session['UseCaseName'] = usecasename request.session['usecaseid'] = usecaseid request.session['ModelVersion'] = Version request.session['ModelStatus'] = 'Not Trained' request.session['currentstate'] = 0 request.session['finalstate'] = 0 selected_use_case = usecasename model_status = 'Not Trained' ModelVersion = Version from appbe.telemetry import UseCaseCreated UseCaseCreated(usecaseid+'-'+str(Version)) if len(usecase) > 0: nouc = usecasedetails.objects.latest('id') nouc = (nouc.id)+1 else: nouc = 1 nouc = str(nouc).zfill(4) description_text = 'This is a usecase for AI' + str(nouc) context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'newusercase': usecasename,'tacking_url':tacking_url,'finalstate':request.session['finalstate'], 'description': description,'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'clusteringModels':clusteringModels,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'usecasetab':usecasetab,'azurestorage':get_azureStorage(), 'ModelStatus': model_status, 'ModelVersion': ModelVersion, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure} return status,context,'upload.html' else: models = get_landing_model(Existusecases) usecase = landing_page(usecasedetails,Existusecases,hosturl,usecaseid,search_text) if len(usecase) > 0: nouc = usecasedetails.objects.latest('id') nouc = (nouc.id)+1 else: nouc = 1 nouc = str(nouc).zfill(4) description_text = 'This is a usecase for AI' + str(nouc) context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'tacking_url':tacking_url,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab} if usecaseid: context.update({'ucdetails':'True'}) return status,context,'usecases.html' def index_page(request,usecasedetails,Existusecases): if 'ModelVersion' in request.session: del request.session['ModelVersion'] if 'UseCaseName' in request.session: del request.session['UseCaseName'] if 'ModelStatus' in request.session: del request.session['ModelStatus'] if 'currentstate' in request.session: del request.session['currentstate'] if 'finalstate' in request.session: del request.session['finalstate'] return usecases_page(request,usecasedetails,Existusecases) def usecases_page(request,usecasedetails,Existusecases,usecaseid=None,substring=None): return usecase_page(request,usecasedetails,Existusecases,usecaseid,substring) def mllite_page(request): from appbe.aion_config import settings usecasetab = settings() status,msg = pushRecordForTraining() if status == False: context = {'selected':'mllite','lerror':msg} return context configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json') f = open(configFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) context = {} context = {'selected':'mllite','sagemaker':configSettingsJson,'usecasetab':usecasetab} return context def mltesting_page(request): from appbe.aion_config import settings usecasetab = settings() status,msg = pushRecordForTraining() if status == False: context = {'lerror':msg} return context if request.method == "POST": models = request.POST['model'] datap = request.POST['data'] if(os.path.isfile(models) and os.path.isfile(datap)): request.session['datalocation'] = datap df = pd.read_csv(datap,encoding='utf-8',skipinitialspace = True,encoding_errors= 'replace') trainfea = df.columns.tolist() featurs = request.POST.getlist('Training') feature = ",".join(featurs) filetimestamp = str(int(time.time())) settingconfig = os.path.join(CONFIG_FILE_PATH, 'MLTest_' + filetimestamp + '.json') request.session['MLTestResult'] = settingconfig mltestresult={} mltestresult['models'] = models mltestresult['datap'] = datap mltestresult['feature'] = feature # features = ['PetalLengthCm','PetalWidthCm'] targ = request.POST['Target'] tar =[targ] mltestresult['target'] = targ mltestresult = json.dumps(mltestresult) with open(settingconfig, "w") as fpWrite: fpWrite.write(mltestresult) fpWrite.close() from pathlib import Path mltest={} if Path(models).is_file() and Path(datap).is_file(): try: from mltest import baseline outputStr = baseline.baseline_testing(models,datap, feature, targ) #scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_mltest.py')) #print(scriptPath, models, datap, feature, targ) #outputStr = subprocess.check_output([sys.executable, scriptPath, models, datap, feature, targ]) #print(outputStr) #outputStr = outputStr.decode('utf-8') #outputStr= outputStr.replace('\'','\"') #print('ou',outputStr) #outputStr = outputStr.strip() mltest = json.loads(outputStr) Problemtype= mltest['Problemtype'] with open(request.session['MLTestResult'], 'r+') as f: mltestresult = json.load(f) f.close() mltestresult['Problemtype'] = Problemtype mltestresult['ProblemName'] = mltest['ProblemName'] status = mltest['Status'] if status == 'Fail': errormsg= mltest['Msg'] context = {'error':errormsg,'mltest':'mltest'} else: if Problemtype == 'Classification': mltestresult['Score'] = mltest['Accuracy'] mltestresult['Params'] = mltest['Params'] Problem= mltest['ProblemName'] Parameters= mltest['Params'] round_params = {} for key, value in Parameters.items(): if isinstance(value, float): round_params[key] = round(value,2) else: round_params[key] = value matrixconfusion = mltest['Confusionmatrix'] classificationreport = mltest['classificationreport'] classificationreport = json.loads(classificationreport) matrixconfusion = json.loads(matrixconfusion) indexName =[] columnName = [] for i in matrixconfusion.keys(): indexName.append("act:"+str(i)) for j in matrixconfusion[i].keys(): columnName.append("pre:"+str(j)) df3 = pd.DataFrame.from_dict(classificationreport) df = df3.transpose() df2 = pd.DataFrame.from_dict(matrixconfusion) df1 = pd.DataFrame(df2.values,index=indexName,columns=columnName) report = df.to_html() report1 = df1.to_html() recordone = mltest['onerecord'] recordsten = mltest['tenrecords'] recordshund = mltest['hundrecords'] context = {'modelname': models,'datapath':datap,'features':featurs,'target':tar,'Problemtype':Problem,'modeltype':Problemtype,'Parameter':round_params,'Onerecord':recordone,'Tenrecords':recordsten,'Hundrecords':recordshund,'matrixconfusion':report1,'classificationreport':report,'classification':'classification','df':df,'df1':df1,'basemltest':'basemltest','success':'success','trainfea':trainfea,'selected':'mltesting','usecasetab':usecasetab} elif Problemtype == 'Regression': Problem= mltest['ProblemName'] mltestresult['Params'] = mltest['Params'] mltestresult['Score'] = mltest['R2'] Parameters= mltest['Params'] round_params = {} for key, value in Parameters.items(): if isinstance(value, float): round_params[key] = round(value,2) else: round_params[key] = value Mse = mltest['MSE'] Mae = mltest['MAE'] Rmse = mltest['RMSE'] R2 = mltest['R2'] recordone = mltest['onerecord'] recordsten = mltest['tenrecords'] recordshund = mltest['hundrecords'] context = {'modelname': models,'datapath':datap,'features':featurs,'target':tar, 'Problemtype':Problem,'Parameter':round_params,'Onerecord':recordone,'Tenrecords':recordsten,'Hundrecords':recordshund,'Mse':Mse,'Mae':Mae,'Rmse':Rmse,'R2Score':R2,'regression':'regression','success':"success",'selected': 'mltest','basemltest':'basemltest','usecasetab':usecasetab} else: errormsg= mltest['Msg'] context = {'error':errormsg,'mltest':'mltest'} mltestresult = json.dumps(mltestresult) with open(settingconfig, "w") as fpWrite: fpWrite.write(mltestresult) fpWrite.close() except Exception as e: print("-------------"+str(e)+'=================') e = str(e).replace('\'','') errormsg = 'Error: Exception '+str(e) context = {'error':errormsg,'mltest':'mltest'} else: if not (Path(models).is_file() and Path(datap).is_file()): context = {'error':"Please Check ModelPath & Datapath Format","result":"result",'selected':'mltesting','usecasetab':usecasetab} elif not Path(models).is_file(): context = {'error':"Please Check ModelPath Format","result":"result",'selected':'mltesting','usecasetab':usecasetab} elif not Path(datap).is_file(): context = {'error':"Please Check DataPath Format","result":"result",'selected':'mltesting','usecasetab':usecasetab} else: context = {'error':'Either model path or data path does not exist','mltest':'mltest','usecasetab':usecasetab} else: context = {'selected':'mltesting','usecasetab':usecasetab} return context
aion_config.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys import json import platform import subprocess def kafka_setting(): file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')) f = open(file_path, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) from os.path import expanduser home = expanduser("~") if platform.system() == 'Windows': DEPLOY_LOCATION = os.path.join(home,'AppData','Local','HCLT','AION','target','kafka') else: DEPLOY_LOCATION = os.path.join(home,'HCLT','AION','target','kafka') configSettingsJson['kafkalocation'] = DEPLOY_LOCATION return(configSettingsJson) def start_tracking(): from appbe.dataPath import DEPLOY_LOCATION import platform mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','..','Scripts','mlflow.exe')) script_path = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','..','Scripts')) #Updating path for system environment; Bug-13835 os.environ['PATH']= os.environ['PATH']+ ';'+ str(script_path) DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns') if platform.system() == 'Windows': subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION]) else: subprocess.Popen(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION]) def aion_tracking(): status = 'Success' import requests try: response = requests.get('http://localhost:5000') if response.status_code != 200: status = 'Error' except Exception as inst: print(inst) status = 'Error' return status def aion_service(): try: if platform.system() == 'Windows': nooftasks = getrunningstatus('AION_Service') else: nooftasks = getrunningstatus('run_service') if len(nooftasks): status = 'Running' else: if platform.system() == 'Windows': servicepath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','sbin','AION_Service.bat')) os.system('start cmd /c "'+servicepath+'"') else: servicepath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','bin','run_service.py')) subprocess.Popen([sys.executable,servicepath]) status = 'Started' except Exception as inst: print(inst) status = 'Error' return status def getrunningstatus(name): try: taskdetails = [] if platform.system() == 'Windows': r = ([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq conhost.exe"').decode('UTF-8').splitlines()]) r.append([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq cmd.exe"').decode('UTF-8').splitlines()]) else: r = ([line.split() for line in subprocess.check_output("ps -ef | grep .py",shell=True).decode('UTF-8').splitlines()]) for i in range(len(r)): s = r[i] if any(name in j for j in s): taskdetails.append('Yes') break return (taskdetails) except Exception as inst: print(inst) status = 'Error' return status def getTasks(mlflow,consumer,service): mlflowlist = [] consumerlist=[] servicelist = [] #r = os.popen('tasklist /v').read().strip().split('\n') try: if platform.system() == 'Windows': r = ([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq conhost.exe"').decode('UTF-8').splitlines()]) r.append([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq cmd.exe"').decode('UTF-8').splitlines()]) else: r = ([line.split() for line in subprocess.check_output("ps -ef | grep .py",shell=True).decode('UTF-8').splitlines()]) except Exception as e: print(e) r = [] #print(r) #print ('# of tasks is %s' % (len(r))) for i in range(len(r)): s = r[i] if any(mlflow in j for j in s): mlflowlist.append('Yes') if any(consumer in j for j in s): consumerlist.append('Yes') if any(service in j for j in s): servicelist.append('Yes') return (mlflowlist,consumerlist,servicelist) def running_setting(): otherApps = {} if platform.system() == 'Windows': mlflowlist,consumerlist,servicelist = getTasks('AION_MLFlow','AION_Consumer','AION_Service') else: mlflowlist,consumerlist,servicelist = getTasks('run_mlflow','AION_Consumer','run_service') if len(mlflowlist): otherApps['modeltracking'] = 'Running' else: otherApps['modeltracking'] = 'Not Running' #nooftasks = getTasks('AION_Consumer') if len(consumerlist): otherApps['consumer'] = 'Running' else: otherApps['consumer'] = 'Not Running' #nooftasks = getTasks('AION_Service') if len(servicelist): otherApps['service'] = 'Running' else: otherApps['service'] = 'Not Running' return(otherApps) #EDA Performance change # ---------------------------- def eda_setting(): configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','eda.config') sample_size='' try: if(os.path.isfile(configfilepath)): file = open(configfilepath, "r") read = file.read() file.close() for line in read.splitlines(): if 'sample_size=' in line: sample_size = line.split('=',1)[1] except Exception as inst: pass return(sample_size) def get_telemetryoptout(): telemetryoptuout = "No" from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') try: if sqlite_obj.table_exists('settings'): data = sqlite_obj.read_data('settings') for values in data: telemetryoptuout = values[7] else: telemetryoptuout = 'No' except Exception as e: print(e) telemetryoptuout ='No' return telemetryoptuout def get_edafeatures(): No_of_Permissible_Features_EDA = "" from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') try: if sqlite_obj.table_exists('settings'): data = sqlite_obj.read_data('settings') for values in data: No_of_Permissible_Features_EDA = values[3] else: configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') if (os.path.isfile(configfilepath)): file = open(configfilepath, "r") read = file.read() file.close() for line in read.splitlines(): if 'No_of_Permissible_Features_EDA=' in line: No_of_Permissible_Features_EDA = line.split('=', 1)[1] except Exception as e: print(e) No_of_Permissible_Features_EDA =20 return No_of_Permissible_Features_EDA def get_graviton_data(): graviton_url = "" graviton_userid = "" from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') try: if sqlite_obj.table_exists('settings'): data = sqlite_obj.read_data('settings') for values in data: graviton_url = values[0] graviton_userid = values[1] else: configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') if (os.path.isfile(configfilepath)): file = open(configfilepath, "r") read = file.read() file.close() for line in read.splitlines(): if 'graviton_url=' in line: graviton_url = line.split('=', 1)[1] if 'graviton_userid=' in line: graviton_userid = line.split('=', 1)[1] except Exception as e: print(e) graviton_url = "" graviton_userid = "" return graviton_url,graviton_userid def get_llm_data(): apiKeyIdLLM = "" apiUrlLLM = "" from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') try: if sqlite_obj.table_exists('openai'): data = sqlite_obj.read_data('openai')[0] param_keys = ['api_type','api_key','api_base','api_version'] openai_data = dict((x,y) for x,y in zip(param_keys,data)) return openai_data['api_key'],openai_data['api_base'],openai_data['api_type'],openai_data['api_version'] except Exception as e: print(e) apiKeyIdLLM = "" apiUrlLLM = "" return apiKeyIdLLM,apiUrlLLM,"","" def settings(): configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','aion.config') usecase='disable' graviton_url = '' graviton_userid = '' apiKeyIdLLM = '' apiUrlLLM = '' No_of_Permissible_Features_EDA = '' try: from appbe.sqliteUtility import sqlite_db import pandas as pd from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('settings'): column_names = sqlite_obj.column_names('settings') data = sqlite_obj.read_data('settings') if 'telemetryOptOut' not in column_names: query = 'Alter Table settings ADD telemetryOptOut TEXT' sqlite_obj.execute_query(query) if 'No_of_Permissible_Features_EDA' not in column_names or 'apiKeyIdLLM' not in column_names: sqlite_obj.drop_table('settings') configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') file = open(configfilepath, "r") dataread = file.read() for line in dataread.splitlines(): if 'usecase=' in line: cusecase = line.split('=', 1)[1] if 'graviton_url=' in line: cgraviton_url = line.split('=', 1)[1] if 'graviton_userid=' in line: cgraviton_userid = line.split('=', 1)[1] if 'No_of_Permissible_Features_EDA=' in line: cNo_of_Permissible_Features_EDA = line.split('=', 1)[1] if 'apiKeyIdLLM=' in line: capiKeyIdLLM = '' if 'apiUrlLLM=' in line: capiUrlLLM = '' file.close() if 'apiKeyIdLLM' not in column_names: apiKeyIdLLM = capiKeyIdLLM if 'apiUrlLLM' not in column_names: apiUrlLLM = capiUrlLLM if 'No_of_Permissible_Features_EDA' not in column_names: No_of_Permissible_Features_EDA = cNo_of_Permissible_Features_EDA newdata = {} newdata.update({'graviton_url':[data[0][0]],'graviton_userid': [data[0][1]],'usecase': [data[0][2]],'No_of_Permissible_Features_EDA':[No_of_Permissible_Features_EDA],'settingsid':['1'],'apiKeyIdLLM' :apiKeyIdLLM,'apiUrlLLM':apiUrlLLM,'telemetryOptOut':telemetryOptOut}) sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'settings') data = sqlite_obj.read_data('settings') for values in data: graviton_url = values[0] graviton_userid = values[1] usecase = values[2] No_of_Permissible_Features_EDA = values[3] telemetryOptOut = values[7] else: configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config') if (os.path.isfile(configfilepath)): file = open(configfilepath, "r") read = file.read() file.close() apiKeyIdLLM = '' apiUrlLLM = '' for line in read.splitlines(): if 'usecase=' in line: usecase = line.split('=', 1)[1] if 'graviton_url=' in line: graviton_url = line.split('=', 1)[1] if 'graviton_userid=' in line: graviton_userid = line.split('=', 1)[1] if 'No_of_Permissible_Features_EDA=' in line: No_of_Permissible_Features_EDA = line.split('=', 1)[1] newdata = {} newdata.update({'graviton_url':[graviton_url],'graviton_userid': [graviton_userid],'usecase': [usecase],'No_of_Permissible_Features_EDA':[No_of_Permissible_Features_EDA],'settingsid':['1'],'apiKeyIdLLM' :'','apiUrlLLM':'','telemetryOptOut':['No']}) # --------else create table and update the data, write data will create a table if it does nt exists----- sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'settings') return(usecase) except Exception as e: print(e) configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','aion.config') try: if(os.path.isfile(configfilepath)): file = open(configfilepath, "r") read = file.read() file.close() for line in read.splitlines(): if 'usecase=' in line: usecase = line.split('=',1)[1] if 'graviton_url=' in line: graviton_url = line.split('=',1)[1] if 'graviton_userid=' in line: graviton_userid = line.split('=',1)[1] if 'No_of_Permissible_Features_EDA=' in line: No_of_Permissible_Features_EDA = line.split('=', 1)[1] if 'apiKeyIdLLM=' in line: apiKeyIdLLM = line.split('=', 1)[1] if 'apiUrlLLM=' in line: apiUrlLLM = line.split('=', 1)[1] except Exception as inst: pass external_system = 'enable' semantico = 'enable' return(usecase) def addKafkaModel(request,datalocation): file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')) f = open(file_path, "r+") configSettings = f.read() configSettingsJson = json.loads(configSettings) modelSignature = request.POST.get('modelsignature') timeframe = request.POST.get('timeframe') command = request.POST.get('kafkasubmit') if command.lower() == 'configure': configSettingsJson['timeFrame'][modelSignature] = str(timeframe) configSettingsJson['trainingDataLocation'][modelSignature] = datalocation elif command.lower() == 'unconfigure': del configSettingsJson['timeFrame'][modelSignature] updatedConfigSettingsJson = json.dumps(configSettingsJson) f.seek(0) f.write(updatedConfigSettingsJson) f.truncate() f.close() def saveopenaisettings(request): try: from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('openai'): updated_data = 'api_type="'+request.POST.get('api_type')+'",api_key="'+request.POST.get('apiKeyIdLLM')+'",api_base="'+request.POST.get('apiUrlLLM')+'",api_version="'+request.POST.get('api_version')+'"' sqlite_obj.update_data(updated_data,'','openai') else: newdata = {} newdata.update({'api_type':['azure'],'api_key': [request.POST.get('apiKeyIdLLM')],'api_base': [request.POST.get('apiUrlLLM')],'api_version':[request.POST.get('api_version')]}) sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'openai') except Exception as e: print(e) def savegravitonconfig(request): try: from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') updated_data = 'graviton_url="'+request.POST.get('graviton_url')+'",graviton_userid="'+request.POST.get('graviton_userid')+'"' sqlite_obj.update_data(updated_data,'settingsid=1','settings') except Exception as e: print(e) def saveconfigfile(request): try: from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') updated_data = 'usecase="'+request.POST.get('usecasetab')+'",No_of_Permissible_Features_EDA="'+request.POST.get('edefeatures')+'",telemetryOptOut="'+request.POST.get('telemetryOptOut')+'"' print(updated_data) sqlite_obj.update_data(updated_data,'settingsid=1','settings') return request.POST.get('usecasetab') except Exception as e: print(e)
reports.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import json import os def downloadtrainingfile(request,Existusecases): usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion']) updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r+", encoding="utf-8") configSettingsData = f.read() configSettingsJson = json.loads(configSettingsData) modelName = request.session['UseCaseName'] modelVersion = request.session['ModelVersion'] modelStatus = request.session['ModelStatus'] model = Existusecases.objects.get(ModelName=request.session['ModelName'],Version=request.session['ModelVersion']) output_train_json_filename = str(model.TrainOuputLocation) f = open(output_train_json_filename, "r+") training_output = f.read() f.close() dict = {'Attribute':[], 'Value':[] } training_output = json.loads(training_output) dfdashbord = pd.DataFrame(dict) dfdashbord.loc[len(dfdashbord.index)] = ['UseCaseName',modelName] dfdashbord.loc[len(dfdashbord.index)] = ['ProblemType',training_output['data']['ModelType']] dfdashbord.loc[len(dfdashbord.index)] = ['Version',str(modelVersion)] dfdashbord.loc[len(dfdashbord.index)] = ['Status',modelStatus] if 'vmDetails' in training_output['data']: dfdashbord.loc[len(dfdashbord.index)] = ['DeployLocation', training_output['data']['vmDetails']] else: dfdashbord.loc[len(dfdashbord.index)] = ['DeployLocation',training_output['data']['deployLocation']] dfdashbord.loc[len(dfdashbord.index)] = ['BestModel',training_output['data']['BestModel']] dfdashbord.loc[len(dfdashbord.index)] = ['BestScore',training_output['data']['BestScore']] dfdashbord.loc[len(dfdashbord.index)] = ['ScoringParam',training_output['data']['ScoreType']] if training_output['data']['ModelType'] != 'LLM Fine-Tuning': dfdashbord.loc[len(dfdashbord.index)] = ['Test%',configSettingsJson['advance']['testPercentage']] dfdashbord.loc[len(dfdashbord.index)] = ['FeaturesUsed',training_output['data']['featuresused']] from io import BytesIO as IO excel_file = IO() edaFileName = usename + '_training.xlsx' excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter") dfdashbord.to_excel(excel_writer, sheet_name='Dashboard',index=False) if training_output['data']['ModelType'].lower() != 'multimodellearning' and training_output['data']['ModelType'].lower() != 'multilabelprediction': EvaluatedModels = training_output['data']['EvaluatedModels'] EvaluatedModels = pd.DataFrame(EvaluatedModels) EvaluatedModels.to_excel(excel_writer, sheet_name='EvaluatedModels',startrow=0 , startcol=0) if training_output['data']['ModelType'].lower() == 'classification': #print(training_output['data']['matrix']) row1 = 10 row2 = 10 if 'ConfusionMatrix' in training_output['data']['matrix']: confusionMatrix = training_output['data']['matrix']['ConfusionMatrix'] confusionMatrix = pd.DataFrame(confusionMatrix) confusionMatrix.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) row1 =confusionMatrix.shape[0]+5 if 'ConfusionMatrix' in training_output['data']['trainmatrix']: confusionMatrix = training_output['data']['trainmatrix']['ConfusionMatrix'] confusionMatrix = pd.DataFrame(confusionMatrix) confusionMatrix.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0) if 'ClassificationReport' in training_output['data']['matrix']: confusionMatrix = training_output['data']['matrix']['ClassificationReport'] confusionMatrix = pd.DataFrame(confusionMatrix) confusionMatrix.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=row1 , startcol=0) if 'ClassificationReport' in training_output['data']['trainmatrix']: confusionMatrix = training_output['data']['trainmatrix']['ClassificationReport'] confusionMatrix = pd.DataFrame(confusionMatrix) confusionMatrix.to_excel(excel_writer, sheet_name='Training Matrix',startrow=row2 , startcol=0) if training_output['data']['ModelType'].lower() == 'regression': dict = {'Attribute':[],'Value':[]} testingDF = pd.DataFrame(dict) try: testingDF.loc[len(testingDF.index)] = ['MAE',training_output['data']['matrix']['MAE']] testingDF.loc[len(testingDF.index)] = ['R2Score',training_output['data']['matrix']['R2Score']] testingDF.loc[len(testingDF.index)] = ['MSE',training_output['data']['matrix']['MSE']] testingDF.loc[len(testingDF.index)] = ['MAPE',training_output['data']['matrix']['MAPE']] testingDF.loc[len(testingDF.index)] = ['RMSE',training_output['data']['matrix']['RMSE']] except: pass testingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) trainingDF = pd.DataFrame(dict) try: trainingDF.loc[len(trainingDF.index)] = ['MAE',training_output['data']['trainmatrix']['MAE']] trainingDF.loc[len(trainingDF.index)] = ['R2Score',training_output['data']['trainmatrix']['R2Score']] trainingDF.loc[len(trainingDF.index)] = ['MSE',training_output['data']['trainmatrix']['MSE']] trainingDF.loc[len(trainingDF.index)] = ['MAPE',training_output['data']['trainmatrix']['MAPE']] trainingDF.loc[len(trainingDF.index)] = ['RMSE',training_output['data']['trainmatrix']['RMSE']] except: pass trainingDF.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0) if training_output['data']['ModelType'].lower() == 'clustering': dict = {'Attribute':[],'Value':[]} trainingDF = pd.DataFrame(dict) try: trainingDF.loc[len(trainingDF.index)] = ['SilHouette_Avg',round(training_output['data']['trainmatrix']['SilHouette_Avg'],2)] trainingDF.loc[len(trainingDF.index)] = ['DaviesBouldinScore',round(training_output['data']['trainmatrix']['DaviesBouldinScore'],2)] trainingDF.loc[len(trainingDF.index)] = ['CalinskiHarabazScore',round(training_output['data']['trainmatrix']['CalinskiHarabazScore'],2)] except: pass trainingDF.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0) centroidpath = os.path.join(training_output['data']['deployLocation'],'centers.csv') if(os.path.isfile(centroidpath)): df_center = pd.read_csv(centroidpath) df_center = df_center.rename(columns={"Unnamed: 0": "Cluster"}) df_center.to_excel(excel_writer, sheet_name='Centroid',startrow=0 , startcol=0) if training_output['data']['ModelType'].lower() == 'timeseriesforecasting': #task 11997 if training_output['data']['BestModel'].lower() == 'var': dict = {'Features':[],'Attribute':[],'Value':[]} trainingDF = pd.DataFrame(dict) FeaturesMatrix = training_output['data']['matrix'] for x in FeaturesMatrix: try: trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MAE',x['MAE']] trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MSE',x['MSE']] trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MAPE',x['MAPE']] trainingDF.loc[len(trainingDF.index)] = [x['Features'],'RMSE',x['RMSE']] except: pass trainingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) else: dict = {'Attribute':[],'Value':[]} trainingDF = pd.DataFrame(dict) try: trainingDF.loc[len(trainingDF.index)] = ['MAE',training_output['data']['matrix']['MAE']] trainingDF.loc[len(trainingDF.index)] = ['MSE',training_output['data']['matrix']['MSE']] trainingDF.loc[len(trainingDF.index)] = ['MAPE',training_output['data']['matrix']['MAPE']] trainingDF.loc[len(trainingDF.index)] = ['RMSE',training_output['data']['matrix']['RMSE']] except: pass trainingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0) workbook = excel_writer.book #excel_writer.save() excel_writer.close() excel_file.seek(0) return edaFileName,excel_file
images_analysis.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' def analysis_images(folder_path): from AIX import image_eda qualityscore = image_eda.img_MeasureImageQuality(folder_path) eda_result = image_eda.img_EDA(folder_path) #Image Duplicate Finder duplicate_img = image_eda.img_duplicatefinder(folder_path) color_plt = image_eda.img_plot_colour_hist(folder_path) return qualityscore,eda_result,duplicate_img,color_plt
models.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os.path from pathlib import Path import time import subprocess import sys import shutil from appbe.aion_config import kafka_setting from appbe.aion_config import running_setting from appbe.publish import chech_publish_info from llm.llm_tuning import update_sqllite_data from appbe.data_io import sqlite_db from appbe.dataPath import DATA_DIR from appbe import installPackage from appbe import compute import json import os import signal from os.path import expanduser import platform import pandas as pd LOG_FILE_PATH = os.path.join(DATA_DIR,'logs') GITHUB_FILE_PATH = os.path.join(DATA_DIR,'github') PUBLISH_PATH = os.path.join(DATA_DIR,'target') DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite') os.makedirs(LOG_FILE_PATH, exist_ok=True) ''' def check_publish_info(usecase,version): sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') if sqlite_dbObj.table_exists('publish'): publishState= 'Published' ''' def get_instance(modelID): from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if sqlite_obj.table_exists("LLMTuning"): data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) if len(data) > 0: return (data[3],data[2],data[5],data[6],data[4]) else: return '','','','','' else: return '','','','','' def startServices(request,usecasedetails,Existusecases): try: models = Existusecases.objects.filter(publishStatus='Published') print(models) if len(models) > 0: for model in models: try: portNo = model.portNo ppid = model.publishPID if ppid == 0: continue try: os.kill(int(model.publishPID), signal.SIGTERM) except Exception as e: print(e) scriptPath = os.path.join(PUBLISH_PATH,model.ModelName.usecaseid,'aion_publish_service.py') if os.path.exists(scriptPath): outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)]) model.publishStatus = 'Published' model.publishPID = outputStr.pid model.portNo = portNo model.save() else: print("Pass") pass except Exception as e: print(e) except Exception as e: print(e) def publishmodel(request,usecaseid,version,Existusecases,usecasedetails): portNo=0 usecased = usecasedetails.objects.get(usecaseid=usecaseid) models = Existusecases.objects.filter(ModelName=usecased,publishStatus='Published') if len(models) > 0: for model in models: try: portNo = model.portNo try: os.kill(int(model.publishPID), signal.SIGTERM) except Exception as e: print(e) mod = Existusecases.objects.get(id=model.id) mod.publishStatus = '' mod.publishPID = 0 mod.portNo = 0 mod.save() except Exception as e: print(e) pass missingNumbers = [] if portNo == 0: models = Existusecases.objects.filter(publishStatus='Published') usedPortNo=[] for model in models: usedPortNo.append(model.portNo) startPortNo = 8091 endPortNo = 8091+5 missingNumbers = [ i for i in range(startPortNo,endPortNo) if i not in usedPortNo] if len(missingNumbers) > 0: portNo = missingNumbers[0] if portNo != 0: scriptPath = os.path.join(PUBLISH_PATH,usecaseid,'aion_publish_service.py') model = Existusecases.objects.get(ModelName=usecased,Version=version) isExist = os.path.exists(scriptPath) if isExist: configfile = os.path.join(PUBLISH_PATH,usecaseid,'config.json') configdata = {'version': str(version)} with open(configfile, "w") as outfile: json.dump(configdata, outfile) outfile.close() outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)]) model.publishStatus = 'Published' model.publishPID = outputStr.pid model.portNo = portNo model.save() Status = 'SUCCESS' hosturl =request.get_host() hosturl = hosturl.split(':') url = 'http://'+hosturl[0]+':'+str(portNo)+'/AION/'+str(usecaseid)+'/predict' Msg = 'Model Published Successfully' else: Status = 'Error' Msg = 'Model Published Error' url = '' else: Status = 'Error' Msg = 'All ports are utilized' url='' return Status,Msg,url def get_published_models(instanceid): from appbe.sqliteUtility import sqlite_db file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if sqlite_obj.table_exists("LLMTuning"): condition = f'"instance"=="{instanceid}" AND "status"=="Published"' datas = sqlite_obj.read_data('LLMTuning',condition) if len(datas)>0: return True,datas[0][0] return False,'' def maac_command(request,Existusecases,usecasedetails): command = request.POST.get('maacsubmit') kafkaSetting = kafka_setting() ruuningSetting = running_setting() computeinfrastructure = compute.readComputeConfig() modelID = request.POST.get('modelID') Version = request.POST.get('Version') p = Existusecases.objects.get(id=modelID,Version=Version) usecasename = p.ModelName.usecaseid #bugid 13339 usecaseid = p.ModelName.id # runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) # installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename) usecasedetail = usecasedetails.objects.get(id=p.ModelName.id) usecase = usecasedetails.objects.all() problemType = p.ProblemType score = 0 scoreType = '' deployedModel = '' deployedModelVersion = p.Version models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') computeinfrastructure = compute.readComputeConfig() for model in models: model.scoringCreteria = 'NA' model.score = 'NA' model.deploymodel = 'NA' if os.path.isdir(str(model.DeployPath)): modelPath = os.path.join(str(model.DeployPath),'etc','output.json') try: with open(modelPath) as file: outputconfig = json.load(file) file.close() if outputconfig['status'] == 'SUCCESS': if deployedModelVersion == model.Version: problemType = outputconfig['data']['ModelType'] scoreType = outputconfig['data']['ScoreType'] score = outputconfig['data']['BestScore'] deployedModel = outputconfig['data']['BestModel'] model.scoringCreteria = outputconfig['data']['ScoreType'] model.score = outputconfig['data']['BestScore'] model.deploymodel = outputconfig['data']['BestModel'] model.maacsupport = 'True' model.flserversupport = 'False' supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"] if model.deploymodel in supportedmodels: model.flserversupport = 'True' else: model.flserversupport = 'False' supportedmodels = ["Extreme Gradient Boosting (XGBoost)"] if model.deploymodel in supportedmodels: model.encryptionsupport = 'True' else: model.encryptionsupport = 'False' except Exception as e: print(e) pass MLaaC_output = '' if command == 'generatemaac': deployPath = str(p.DeployPath) codeconfig = os.path.join(deployPath,'etc','code_config.json') if os.path.isfile(codeconfig): with open(codeconfig,'r') as f: cconfig = json.load(f) f.close() dbserver = request.POST.get('productiondb') db_config = {} if dbserver.lower() == 'influxdb': cconfig['prod_db_type'] = 'influx' db_config['host'] = request.POST.get('influxdbhost') db_config['port'] = request.POST.get('influxdbportno') db_config['user'] = request.POST.get('influxdbuser') db_config['password'] = request.POST.get('influxpassword') db_config['database'] = 'production' db_config['measurement'] = usecasename tags = {} db_config['tags']=tags cconfig['db_config'] = db_config else: cconfig['prod_db_type'] = 'sqlite' cconfig['db_config'] = db_config dbserver = request.POST.get('mlflowserver') mlflow_config = {} if dbserver.lower() == 'local': cconfig['mlflow_config'] = mlflow_config else: mlflow_config['tracking_uri_type'] = request.POST.get('mlflowserverurl') mlflow_config['tracking_uri'] = request.POST.get('mlflowserverurl') mlflow_config['registry_uri'] = request.POST.get('mlflowserverurl') mlflow_config['artifacts_uri'] = request.POST.get('mlflowserverurl') cconfig['mlflow_config'] = mlflow_config with open(codeconfig,'w') as f: json.dump(cconfig, f) f.close() from bin.aion_mlac import generate_mlac_code outputStr = generate_mlac_code(codeconfig) output = json.loads(outputStr) from appbe.telemetry import UpdateTelemetry UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'MLaC','Yes') if output['Status'] == 'SUCCESS': Status = 'SUCCESS' MLaaC_output = output['MLaC_Location'].replace('\\', '\\\\') Msg = 'MLaC code successfully generated' else: Status = 'Failure' Msg = output['msg'] else: Status = 'Failure' Msg = 'Code Config Not Present' if command == 'buildContainer': deployPath = str(p.DeployPath) maac_path = os.path.join(deployPath,'publish','MLaC') if os.path.isdir(maac_path): config={'usecase':str(usecasename),'version':str(p.Version),'mlacPath':maac_path} config = json.dumps(config) scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py')) if platform.system() == 'Windows': outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','buildMLaCContainerLocal' ,'-j',config],creationflags = subprocess.CREATE_NEW_CONSOLE) else: outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','buildMLaCContainerLocal' ,'-j',config]) #cmd = scriptPath+" "+str(usecasename)+" "+str(p.Version)+" "+str(maac_path) #subprocess.Popen(cmd,shell=True) Status = 'SUCCESS' Msg = 'Build Container Started' else: Status = 'Failure' Msg = 'Run Code Generator' if command == 'runpipeline': deployPath = str(p.DeployPath) dockerlist = os.path.join(deployPath,'publish','MLaC','dockerlist.json') if os.path.isfile(dockerlist): persistancevolume = request.POST.get('persistancevolume') datasetpath = request.POST.get('dataset') filetimestamp = str(int(time.time())) logfilepath = os.path.join(LOG_FILE_PATH,'AIONPipeline_'+str(filetimestamp)+'.log') config={'usecase':str(usecasename),'version':str(p.Version),'persistancevolume':persistancevolume,'datasetpath':datasetpath,'dockerlist':str(dockerlist),'logfilepath':logfilepath} config = json.dumps(config) scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py')) if platform.system() == 'Windows': outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','runpipelinelocal','-j',config],creationflags = subprocess.CREATE_NEW_CONSOLE) else: outputStr = subprocess.Popen([sys.executable, scriptPath, str(usecasename),str(p.Version),persistancevolume,datasetpath,str(dockerlist),logfilepath]) Status = 'SUCCESS' Msg = 'Pipeline Started' MLaaC_output = 'Check log file for pipeline execution status: ' + str(logfilepath) else: Status = 'Failure' Msg = 'Not found container information' if command == 'generateyaml': deployPath = str(p.DeployPath) maac_path = os.path.join(deployPath,'publish','MLaC') if os.path.isdir(maac_path): persistancevolume = request.POST.get('persistancevolume') datasetpath = request.POST.get('dataset') supported_urls_starts_with = ('gs://','https://','http://') if datasetpath.startswith(supported_urls_starts_with): datasetpath = request.POST.get('dataset') else: datasetpath = '/aion/'+request.POST.get('dataset') serviceport = request.POST.get('serviceport') scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_generateyaml.py')) outputStr = subprocess.check_output([sys.executable, scriptPath, str(usecasename),str(p.Version),persistancevolume,datasetpath,maac_path,serviceport]) outputStr = outputStr.decode('utf-8') outputStr=outputStr.strip() print(outputStr) output = json.loads(outputStr) if output['Status'] == 'SUCCESS': Status = 'SUCCESS' MLaaC_output = output['location'] Msg = 'MLaaC dockerfile successfully generated' else: Status = 'Failure' Msg = output['msg'] else: Status = 'Failure' Msg = 'Execute generate code first' if command == 'githubupload': if shutil.which('git') is None: Status = 'Failure' Msg = 'Git is not installed, Please install Git first.' else: try: deployPath = str(p.DeployPath) maac_path = os.path.join(deployPath,'publish','MLaC') if os.path.isdir(maac_path): githuburl = request.POST.get('githuburl') githubusername = request.POST.get('githubusername') githubtoken = request.POST.get('githubtoken') githubemail = request.POST.get('githubemail') githubconfig = {"url_type":"https","url":githuburl,"username":githubusername,"email":githubemail,"token":githubtoken,"location":maac_path,"modelName":usecasename,"gitFolderLocation":GITHUB_FILE_PATH} from mlops import git_upload outputStr = git_upload.upload(githubconfig) print(outputStr) output = json.loads(outputStr) if output['Status'] == 'SUCCESS': Status = 'SUCCESS' MLaaC_output = githuburl Msg = 'Code Uploaded to GitHub Successfully' else: Status = 'Failure' Msg = output['msg'] else: Status = 'Failure' Msg = 'GitHub Upload failed' except Exception as e: print(e) Status = 'Failure' Msg = 'GitHub Upload failed' if command == 'unpublishmodel': try: models = Existusecases.objects.filter(ModelName=usecasedetail,publishStatus='Published') if len(models) > 0: for model in models: try: if problemType.lower() == "llm fine-tuning": cloudconfig = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json')) modelid = usecasename + '_' + str(Version) usecasename = usecasename.replace(" ", "_") hypervisor,instanceid,region,image,status = get_instance(usecasename + '_' + str(Version)) from llm.llm_inference import kill_inference_server kill_inference_server(cloudconfig,instanceid,hypervisor,region,image) update_sqllite_data(modelid,'status','Success') else: try: os.kill(int(model.publishPID), signal.SIGTERM) mod.publishPID = 0 except Exception as e: print(e) mod = Existusecases.objects.get(id=model.id) mod.publishStatus = '' mod.portNo = 0 mod.save() Status = 'SUCCESS' Msg = 'Model Unpublished Successfully' except Exception as e: print(e) Status = 'Error' Msg = 'Model Unpublished Error' except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(e) pass if command == 'publishmodel': try: portNo=0 models = Existusecases.objects.filter(ModelName=usecasedetail,publishStatus='Published') if len(models) > 0: for model in models: try: portNo = model.portNo try: os.kill(int(model.publishPID), signal.SIGTERM) except Exception as e: print(e) mod = Existusecases.objects.get(id=model.id) mod.publishStatus = '' mod.publishPID = 0 mod.portNo = 0 mod.save() except Exception as e: print(e) pass missingNumbers = [] if problemType.lower() == "llm fine-tuning": model = Existusecases.objects.get(ModelName=usecasedetail,Version=Version) try: usecasename = usecasename.replace(" ", "_") hypervisor,instanceid,region,image,status = get_instance(usecasename + '_' + str(Version)) if status.lower() in ['published','success'] : if status.lower() == 'published': from llm.llm_inference import kill_inference_server kill_inference_server('',instanceid, hypervisor, region, image) update_sqllite_data(usecasename + '_' + str(Version), 'status', 'Success') already_published,published_usecase = get_published_models(instanceid) if already_published: Status = 'Error' Msg = f'{published_usecase} is published at the same id, Please Unpublish mentioned model to proceed.' else: if not region: region = '' cloudconfig = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json')) usecase = usecasename + '_' + str(Version) #modelid = usecasename + '_' + str(Version) scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'aion.py')) cmd = [sys.executable, scriptPath, '-m', 'llmpublish', '-cc', cloudconfig, '-i',instanceid,'-hv',hypervisor,'-md',deployedModel,'-uc',usecase,'-r',region,'-im',image ] outputStr = subprocess.Popen(cmd) model.publishStatus = 'Published' model.publishPID = 0 model.portNo = 8000 model.save() Status = 'SUCCESS' from llm.llm_inference import get_ip instanceip = get_ip(cloudconfig,instanceid,hypervisor,region,image) print(instanceip) url = 'http://' + instanceip + ':' + str(model.portNo) + '/generate' Msg = 'Model Published Successfully, Server will take few minutes to be ready for Inferencing. URL: ' + url update_sqllite_data(usecase,'status','Published') else: Status = 'Error' Msg = 'Only Trained models are availble for Publish.' except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) Status = 'Error' Msg = 'Model Published Error' else: if portNo == 0: models = Existusecases.objects.filter(publishStatus='Published') usedPortNo=[] for model in models: usedPortNo.append(model.portNo) startPortNo = 8091 endPortNo = 8091+5 missingNumbers = [ i for i in range(startPortNo,endPortNo) if i not in usedPortNo] if len(missingNumbers) > 0: portNo = missingNumbers[0] if portNo != 0: model = Existusecases.objects.get(ModelName=usecasedetail,Version=Version) scriptPath = os.path.join(PUBLISH_PATH,usecasename,'aion_publish_service.py') isExist = os.path.exists(scriptPath) if isExist: configfile = os.path.join(PUBLISH_PATH,usecasename,'config.json') configdata = {'version': str(Version)} with open(configfile, "w") as outfile: json.dump(configdata, outfile) outfile.close() outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)]) model.publishStatus = 'Published' model.publishPID = outputStr.pid model.portNo = portNo model.save() Status = 'SUCCESS' hosturl =request.get_host() hosturl = hosturl.split(':') url = 'http://'+hosturl[0]+':'+str(portNo)+'/AION/'+str(usecasename)+'/predict' Msg = 'Model Published Successfully URL: '+url else: Status = 'Error' Msg = 'Model Published Error' else: Status = 'Error' Msg = 'All ports are utilized' except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(e) pass if command == 'generatekubeflowyaml': try: if problemType.lower() == 'timeseriesforecasting': #task 11997 from appbe.aionpipelinets import aionpipelinets else: from appbe.aionpipeline import aionpipeline deployPath = str(p.DeployPath) codeconfig = os.path.join(deployPath,'etc','code_config.json') featuresmapping = {'modelBased':'mlbased','statisticalBased':'statisticalBased'} if os.path.isfile(codeconfig): with open(codeconfig,'r') as f: codeconfig = json.load(f) f.close() modelsarray=[] for featureselection in codeconfig['feature_selector']: for algo in codeconfig['algorithms'].keys(): if problemType.lower() == 'timeseriesforecasting': #task 11997 modelname = 'modeltraining_'+algo.lower() else: modelname = 'modeltraining_'+algo.lower()+'_'+featuresmapping[featureselection] modelx = {'modelname':modelname} modelsarray.append(modelx) modelsjson = {'models':modelsarray} kubeflowhost= request.POST.get('kubeflowhost') containerregistry= request.POST.get('containerregistry') containerlabel= request.POST.get('containerlabel') containersecret= request.POST.get('containersecret') if problemType.lower() == 'timeseriesforecasting': #task 11997 ap = aionpipelinets(modelsjson,containerregistry,containerlabel,containersecret) else: ap = aionpipeline(modelsjson,containerregistry,containerlabel,containersecret) ap.aion_mlops() ap.compilepl() ap.executepl(kubeflowhost) Status = 'SUCCESS' MLaaC_output = '' Msg = 'MLOps pipeline executed successfully' except Exception as e: print(e) Status = 'Failure' Msg = 'Error in pipeline execution' from appbe.pages import get_usecase_page if command in ['publishmodel','unpublishmodel']: status,context,action = get_usecase_page(request,usecasedetails,Existusecases,usecaseid) context['Status'] = Status context['MLaaC_output'] = MLaaC_output context['Msg'] = Msg return(context,'usecasedetails.html') else: status,context,action = get_usecase_page(request,usecasedetails,Existusecases) context['Status'] = Status context['MLaaC_output'] = MLaaC_output context['Msg'] = Msg return(context,'usecases.html') def getusercasestatus(request): if 'UseCaseName' in request.session: selected_use_case = request.session['UseCaseName'] else: selected_use_case = 'Not Defined' if 'ModelVersion' in request.session: ModelVersion = request.session['ModelVersion'] else: ModelVersion = 0 if 'ModelStatus' in request.session: ModelStatus = request.session['ModelStatus'] else: ModelStatus = 'Not Trained' return selected_use_case,ModelVersion,ModelStatus
textSummarization.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import time import os import subprocess import base64 import sys import re from appbe.dataIngestion import getcommonfields from appbe.dataIngestion import getusercasestatus def startSummarization(request,DEFAULT_FILE_PATH,CONFIG_PATH,DATA_FILE_PATH): try: if request.FILES: Datapath = request.FILES['summarypath'] ext = str(Datapath).split('.')[-1] filetimestamp = str(int(time.time())) if ext.lower() in ['txt','pdf','doc','docs']: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) else: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) with open(dataFile, 'wb+') as destination: for chunk in Datapath.chunks(): destination.write(chunk) destination.close() configFile = os.path.join(DEFAULT_FILE_PATH,'aion_textSummerization.json') filetimestamp = str(int(time.time())) config_json_filename = os.path.join(CONFIG_PATH, 'AION_' + filetimestamp + '.json') f = open(configFile) data = json.load(f) f.close() data['basic']['dataLocation'] = dataFile type = request.POST.get('type') model = request.POST.get('model') slength = request.POST.get('length') types = data['basic']['analysisAproach']['textSummarization'] for x in list(types.keys()): data['basic']['analysisAproach']['textSummarization'][x] = 'False' data['basic']['analysisAproach']['textSummarization'][type] = 'True' format = request.POST.get('format') algorithm = data['basic']['algorithms']['textSummarization'] for x in list(algorithm.keys()): data['basic']['algorithms']['textSummarization'][x] = 'False' data['basic']['algorithms']['textSummarization'][model]='True' length = data['advance']['textSummarization']['summaryLength'] for x in list(types.keys()): data['advance']['textSummarization']['summaryLength'][x] = 'False' data['advance']['textSummarization']['summaryLength'][slength] = 'True' with open(config_json_filename, "w") as outfile: json.dump(data, outfile) outfile.close() from bin.aion_text_summarizer import aion_textsummary outputStr = aion_textsummary(config_json_filename) #scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','bin','aion_text_summarizer.py')) #outputStr = subprocess.check_output([sys.executable, scriptPath, config_json_filename]) #outputStr = outputStr.decode('utf-8') #outputStr = re.search(r'Summary:(.*)', str(outputStr), re.IGNORECASE).group(1) predict_dict = json.loads(str(outputStr)) summary = predict_dict['summary'] except Exception as e: print(e) summary = str(e) context = getcommonfields() selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) context.update({'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}) context.update({'summary':summary}) return context
s3bucketsDB.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import sqlite3 from pathlib import Path import json import os import rsa import boto3 #usnish import pandas as pd import time class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() def table_exists(self, name): query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() return len(listOfTables) > 0 def read_data(self, table_name): query = f"SELECT * FROM {table_name}" row = self.cursor.execute(query).fetchall() return list(row) #return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def delete_record(self,table_name,col_name, col_value): try: query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'" self.conn.execute(query) self.conn.commit() return 'success' except Exception as e : print(str(e)) print("Deletion Failed") return 'error' def get_data(self,table_name,col_name,col_value): query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'" row = self.cursor.execute(query).fetchone() if(row == None): return [] return list(row) def write_data(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def close(self): self.conn.close() def add_new_s3bucket(request): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if request.POST["aionreferencename"] =='' or request.POST["s3bucketname"] == '' or request.POST["awsaccesskey"] == '' : return 'error' pkeydata='''-----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1AfnrMv fVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw0m4e wQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2PM4Re n0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHyKxlq i/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhxWrs/ lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQAB -----END RSA PUBLIC KEY-----''' pubkey = rsa.PublicKey.load_pkcs1(pkeydata) awssecretaccesskey = rsa.encrypt(request.POST["awssecretaccesskey"].encode(), pubkey) newdata = {} newdata['Name'] = [request.POST["aionreferencename"]] newdata['AWSAccessKeyID'] = [request.POST["awsaccesskey"]] newdata['AWSSecretAccessKey'] = [str(awssecretaccesskey)] newdata['S3BucketName'] = [request.POST["s3bucketname"]] name = request.POST["aionreferencename"] if sqlite_obj.table_exists("s3bucket"): if(len(sqlite_obj.get_data("s3bucket","Name",name)) > 0): return 'error1' sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'s3bucket') except Exception as e: print(e) return 'error' def get_s3_bucket(): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') temp_data = sqlite_obj.read_data('s3bucket') data = [] for x in temp_data: data_dict = {} data_dict['Name'] = x[0] data_dict['AWSAccessKeyID'] = x[1] data_dict['AWSSecretAccessKey'] = x[2] data_dict['S3BucketName'] = x[3] data.append(data_dict) except Exception as e: print(e) data = [] return data def remove_s3_bucket(name): from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') return sqlite_obj.delete_record('s3bucket','Name',name) def read_s3_bucket(name,filename,DATA_FILE_PATH): privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqQIBAAKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1Af nrMvfVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw 0m4ewQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2P M4Ren0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHy Kxlqi/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhx Wrs/lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQABAoIBAC/VbNfQPEqJSO3f VFPqfR73q2MbGdgiMQOTgeDvLxiF1QdizJ+j/I5mgiIAMviXuOpPU+NbdMHbZZWd D15kNlD8UCXVg6yyiOuHStjmjK4uHe8I86E1nxTb0hbyZCWZlbk/WizlDHInu+dT KdIZcq2AIidU6tAxtwA0ingHaRSoXDlSGwOTEigNqmWOKnDTVg0SMscoHOD7siXF DHm1/lkvD3uvcZk6c7fGxC8SgNX2dj6n/Nbuy0Em+bJ0Ya5wq4HFdLJn3EHZYORF ODUDYoGaSxeXqYsGg/KHJBc8J7xW9FdN9fGbHfw1YplrmiGL3daATtArjMmAh0EQ H8Sj7+ECgYkA3oWMCHi+4t8txRPkg1Fwt8dcqYhGtqpAus3NESVurAdi0ZPqEJcQ 4cUbflwQPhX0TOaBlkgzdP8DMdcW/4RalxHsAh5N8ezx/97PQMb3Bht0WsQUBeYJ xLV7T2astjTRWactGCG7dwTaUYRtU3FqL6//3CysmA12B5EMX0udNBOTKwmaYKww AwJ5AOISS7f12Q0fgTEVY0H8Zu5hHXNOA7DN92BUzf99iPx+H+codLet4Ut4Eh0C cFmjA3TC78oirp5mOOQmYxwaFaxlZ7Rs60dlPFrhz0rsHYPK1yUOWRr3RcXWSR13 r+kn+f+8k7nItfGi7shdcQW+adm/EqPfwTHM8QKBiQCIPEMrvKFBzVn8Wt2A+I+G NOyqbuC8XSgcNnvij4RelncN0P1xAsw3LbJTfpIDMPXNTyLvm2zFqIuQLBvMfH/q FfLkqSEXiPXwrb0975K1joGCQKHxqpE4edPxHO+I7nVt6khVifF4QORZHDbC66ET aTHA3ykcPsGQiGGGxoiMpZ9orgxyO3l5Anh92jmU26RNjfBZ5tIu9dhHdID0o8Wi M8c3NX7IcJZGGeCgywDPEFmPrfRHeggZnopaAfuDx/L182pQeJ5MEqlmI72rz8bb JByJa5P+3ZtAtzc2RdqNDIMnM7fYU7z2S279U3nZv0aqkk3j9UDqNaqvsZMq73GZ y8ECgYgoeJDi+YyVtqgzXyDTLv6MNWKna9LQZlbkRLcpg6ELRnb5F/dL/eB/D0Sx QpUFi8ZqBWL+A/TvgrCrTSIrfk71CKv6h1CGAS02dXorYro86KBLbJ0yp1T/WJUj rHrGHczglvoB+5stY/EpquNpyca03GcutgIi9P2IsTIuFdnUgjc7t96WEQwL -----END RSA PRIVATE KEY-----''' try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') data = sqlite_obj.get_data("s3bucket",'Name',name) except: data = [] awssecretaccesskey = '' found = False if len(data)!=0: aws_access_key_id = data[1] awssecretaccesskey = data[2] bucketName = data[3] found = True if found: privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') awssecretaccesskey = eval(awssecretaccesskey) awssecretaccesskey = rsa.decrypt(awssecretaccesskey, privkey) awssecretaccesskey = awssecretaccesskey.decode('utf-8') #awssecretaccesskey = 'SGcyJavYEQPwTbOg1ikqThT+Op/ZNsk7UkRCpt9g'#rsa.decrypt(awssecretaccesskey, privkey) client_s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(awssecretaccesskey)) #print(bucketName,filename) try: response = client_s3.get_object(Bucket=bucketName, Key=filename) df = pd.read_csv(response['Body']) except Exception as e: print(str(e))#usnish return 'Error',str(e), pd.DataFrame() #return 'Error', pd.DataFrame() return 'Success','',df return 'Error',"Please check bucket configuration", pd.DataFrame()
sqliteUtility.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path import sqlite3 class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem db_file = str(location / self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() def table_exists(self, name): query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() return len(listOfTables) > 0 def read_data(self, table_name, condition = None): if condition: query = f"SELECT * FROM {table_name} WHERE "+condition else: query = f"SELECT * FROM {table_name}" row = self.cursor.execute(query).fetchall() return list(row) def column_names(self, table_name): query = f"SELECT * FROM {table_name}" row = self.cursor.execute(query).fetchall() column_names = list(map(lambda x:x[0],self.cursor.description)) return column_names # return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) def create_table(self, name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def delete_record(self, table_name, col_name, col_value): try: query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'" self.conn.execute(query) self.conn.commit() return 'success' except Exception as e: print(str(e)) print("Deletion Failed") return 'error' def drop_table(self,table_name): query = f"DROP TABLE {table_name}" self.cursor.execute(query) print("Table dropped... ") # Commit your changes in the database self.conn.commit() def get_data(self, table_name, col_name, col_value): query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'" row = self.cursor.execute(query).fetchone() if (row == None): return [] return list(row) def execute_query(self,query): self.cursor.execute(query) self.conn.commit() def write_data(self, data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def update_dict_data(self,data:dict,condition,table_name): if not data: return if not table_name: raise ValueError('Database table name is not provided') updates = '' #TODO validation of keys for i,kv in enumerate(data.items()): if i: updates += ',' updates += f'"{kv[0]}"="{kv[1]}"' if condition == '': update_query = f'UPDATE {table_name} SET {updates}' else: update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' self.cursor.execute(update_query) self.conn.commit() def update_data(self,updates,condition,table_name): if condition == '': update_query = f'UPDATE {table_name} SET {updates}' else: update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' self.cursor.execute(update_query) self.conn.commit() def close(self): self.conn.close()
prediction.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging from appbe.dataIngestion import getcommonfields from appbe.dataIngestion import getusercasestatus from appbe import service_url import json from appbe.dataIngestion import delimitedsetting import os,sys import pandas as pd from django.http import HttpResponse import time from appbe.dataPath import LOG_LOCATION from appbe.log_ut import logg def get_instance_id(modelID): from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if sqlite_obj.table_exists("LLMTuning"): data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) print(data) if len(data) > 0: return (data[3]+' instance '+data[2]) else: return 'Instance ID not available' else: return 'Instance ID not available' def get_instance(modelID): from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if sqlite_obj.table_exists("LLMTuning"): data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID) if len(data) > 0: return (data[3],data[2],data[5],data[6]) else: return '','','','' else: return '','','','' def getprompt(promptfeature,contextFeature,responseFeature,promptFriendlyName,responseFriendlyName,data): if contextFeature != '': promptData = data[promptfeature].replace('\n','') inputData = data[contextFeature].replace('\n','') prompt = ( f"Below is an {promptFriendlyName} that describes a task, paired with an Input that provides further context. " f"Write a {responseFriendlyName} that appropriately completes the request.\n\n" f"### {promptFriendlyName}:\n{promptData}\n\n### Input:\n{inputData}\n\n### {responseFriendlyName}:\n") else: promptData = data[promptfeature].replace('\n','') prompt=( f"Below is an {promptFriendlyName} that describes a task. " f"Write a {responseFriendlyName} that appropriately completes the request.\n\n" f"### {promptFriendlyName}:\n{promptData}\n\n### {responseFriendlyName}:\n") return prompt def getDataInstance(problem_type,mlmodels,configSettingsJson): log = logging.getLogger('log_ux') delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier']) if problem_type == 'timeSeriesForecasting': #task 11997 inputFieldsDict = {'noofforecasts': 10} elif problem_type == 'recommenderSystem' and mlmodels =='ItemRating': inputFieldsDict = {"uid": 1, "iid": 31, "rating": 0} elif problem_type == 'stateTransition': inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] inputFeaturesList = inputFeatures.split(',') inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'} else: inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] inputFeaturesList = inputFeatures.split(',') if targetFeature in inputFeaturesList: inputFeaturesList.remove(targetFeature) if problem_type == 'survivalAnalysis': inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) dataFilePath = str(configSettingsJson['basic']['dataLocation']) if os.path.isfile(dataFilePath): df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,encoding_errors= 'replace') try: singleInstanceData = df.loc[0, inputFeaturesList] except: singleInstanceData = pd.Series(0, index =inputFeaturesList) inputFieldsDict = singleInstanceData.to_dict() else: inputFieldsDict = {"File":"EnterFileContent"} inputFields = [] inputFields.append(inputFieldsDict) return inputFields def createInstanceFeatures(configSettingsJson,problem_type,mlmodels,usecaseid,version,ser_url): delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier']) inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] if inputFeatures != '': inputFeaturesList = inputFeatures.split(',') else: inputFeaturesList = [] if targetFeature in inputFeaturesList: inputFeaturesList.remove(targetFeature) if configSettingsJson['basic']['contextFeature'] != '': inputFeaturesList.append(configSettingsJson['basic']['contextFeature']) if problem_type == 'llmFineTuning': inputFeaturesList.append('Temperature') inputFeaturesList.append('Max Tokens') if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997 if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na': inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) dataFilePath = str(configSettingsJson['basic']['dataLocation']) if problem_type == 'timeSeriesForecasting': #task 11997 inputFieldsDict = {'noofforecasts': 10} elif problem_type == 'recommenderSystem' and mlmodels=='ItemRating': inputFieldsDict = {"uid": 1, "numberOfRecommendation":10} elif problem_type == 'stateTransition': inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] if inputFeatures != '': inputFeaturesList = inputFeatures.split(',') else: inputFeaturesList = [] inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'} elif problem_type != 'llmFineTuning': if os.path.isfile(dataFilePath): df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') try: inputFieldsDict = df.to_dict(orient='index')[0] except: inputFieldsDict = pd.Series(0, index =inputFeaturesList).to_dict() else: inputFieldsDict = {"File":"EnterFileContent"} else: inputFieldsDict = pd.Series('', index =inputFeaturesList).to_dict() inputFieldsDict['Temperature'] = '0.1' hypervisor,instanceid,region,image = get_instance(usecaseid+'_'+str(version)) if hypervisor.lower() == 'AWS': inputFieldsDict['Max Tokens'] = '1024' else: inputFieldsDict['Max Tokens'] = '4096' inputFields = [] inputFields.append(inputFieldsDict) if problem_type == 'llmFineTuning': ser_url = get_instance_id(usecaseid+'_'+str(version)) elif problem_type == 'stateTransition': ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+usecaseid+'&version='+str(version) else: ser_url = ser_url+'predict?usecaseid='+usecaseid+'&version='+str(version) return inputFields,ser_url def singleInstancePredict(request, Existusecases, usecasedetails): log = logging.getLogger('log_ux') modelType='' context = getcommonfields() submittype = request.POST.get('predictsubmit') selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) t1 = time.time() try: try: model = Existusecases.objects.get(ModelName=request.session['ModelName'], Version=request.session['ModelVersion']) output_train_json_filename = str(model.TrainOuputLocation) f = open(output_train_json_filename, "r+") training_output = f.read() f.close() training_output = json.loads(training_output) featureused = training_output['data']['featuresused'] except: featureused = [] from appbe.telemetry import UpdateTelemetry UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Prediction','Yes') usecasename = request.session['usecaseid'].replace(" ", "_") context.update({'usecasename':usecasename}) updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r", encoding = "utf-8") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] if inputFeatures != '': inputFeaturesList = inputFeatures.split(',') else: inputFeaturesList = [] if targetFeature in inputFeaturesList: inputFeaturesList.remove(targetFeature) if configSettingsJson['basic']['contextFeature'] != '': inputFeaturesList.append(configSettingsJson['basic']['contextFeature']) problemtypes = configSettingsJson['basic']['analysisType'] problem_type = '' modelSize = '' for k in problemtypes.keys(): if configSettingsJson['basic']['analysisType'][k] == 'True': problem_type = k break if problem_type == 'llmFineTuning': inputFeaturesList.append('Temperature') inputFeaturesList.append('Max Tokens') mlmodels ='' algorihtms = configSettingsJson['basic']['algorithms'][problem_type] for k in algorihtms.keys(): if configSettingsJson['basic']['algorithms'][problem_type][k] == 'True': if mlmodels != '': mlmodels += ', ' mlmodels += k if problem_type == 'llmFineTuning': ser_url = get_instance_id(usecasename+'_'+str(request.session['ModelVersion'])) if 'modelSize' in configSettingsJson['basic']: selectedModelSize = configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels] for k in selectedModelSize.keys(): if configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True': modelSize = k break elif problem_type == 'stateTransition': ser_url = service_url.read_service_url_params(request) ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion']) else: ser_url = service_url.read_service_url_params(request) ser_url = ser_url+'predict?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion']) if submittype.lower() == 'predict': inputFieldsDict = {} if problem_type == 'timeSeriesForecasting': #task 11997 inputFieldsDict['noofforecasts'] = int(request.POST.get('noofforecasts')) elif problem_type == 'stateTransition': inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] sessionid = request.POST.get('SessionID') activity = request.POST.get(targetFeature) inputFieldsDict[inputFeatures] = request.POST.get(inputFeatures) inputFieldsDict[targetFeature] = request.POST.get(targetFeature) elif problem_type == 'recommenderSystem' and mlmodels == 'ItemRating': inputFieldsDict['uid'] = request.POST.get('uid') inputFieldsDict['numberOfRecommendation'] = int(request.POST.get('numberOfRecommendation')) #Task 11190 else: if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997 if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na': inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature']) for feature in inputFeaturesList: inputFieldsDict[feature] = request.POST.get(feature) if problem_type.lower() not in ['contextualsearch','similarityidentification']: for key, value in inputFieldsDict.items(): if value == 'nan': inputFieldsDict[key] = '' if value == '': if key in featureused: context.update({'tab': 'predict','ser_url':ser_url, 'error': ' Error : Mandatory field(s) are empty', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) return context inputFieldsJson = json.dumps(inputFieldsDict) if problem_type == 'llmFineTuning': modelType = request.POST.get('modelTypeforInferencing') x = inputFieldsDict.keys() from appbe.dataPath import DATA_DIR prompt = inputFieldsDict[configSettingsJson['basic']['trainingFeatures']] promptobj = {'prompt':prompt} if configSettingsJson['basic']['contextFeature'] != '': inputData = inputFieldsDict[configSettingsJson['basic']['contextFeature']] promptobj.update({'input':inputData}) filetimestamp = str(int(time.time())) file_path = os.path.join(DATA_DIR,'logs',filetimestamp+'.json') f= open(file_path,"w",encoding="utf-8") #print(promptobj) json.dump(promptobj,f) f.close() from llm.llm_inference import LLM_predict cloudconfig = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config','compute_conf.json')) hypervisor,instanceid,region,image = get_instance(usecasename+'_'+str(request.session['ModelVersion'])) if hypervisor and instanceid: if modelSize != '': mlmodels = mlmodels+'-'+modelSize cachepath = os.path.join(DATA_DIR,'sqlite','cachePrompt.db') import sqlite3 conn = sqlite3.connect(cachepath) from llm.llm_cache import CachePrompt cachepromptObj = CachePrompt(conn) searchFlag,result = cachepromptObj.selectFromCache(prompt,usecasename+'_'+str(request.session['ModelVersion']),modelType,temperature=inputFieldsDict['Temperature'],max_token=inputFieldsDict['Max Tokens']) if searchFlag: buf = LLM_predict(cloudconfig,instanceid,file_path,hypervisor,mlmodels,usecasename+'_'+str(request.session['ModelVersion']),region,image,inputFieldsDict['Temperature'],inputFieldsDict['Max Tokens'],modelType) import re outputStr = buf.split('ModelOutput:')[1] cachepromptObj.insertRecord(prompt,outputStr,usecasename+'_'+str(request.session['ModelVersion']),modelType,temperature=inputFieldsDict['Temperature'],max_token=inputFieldsDict['Max Tokens']) else: outputStr = result if configSettingsJson['basic']['folderSettings']['fileType'].lower() != 'llm_document': outputStr = outputStr.split('### '+configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response']+':')[1] singlePredictionResults = [] singlePredictionsummary="" Results={} Results['Response'] = outputStr singlePredictionResults.append(Results) else: context.update( {'tab': 'tabconfigure', 'error': 'Prediction Error: Instance ID not found ', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'mlmodels':mlmodels}) log.info('Predict Instance :' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Prediction Error, Instance ID not found') return context else: try: import requests #response = requests.post(ser_url,auth=(aion_service_username,aion_service_password),data=inputFieldsJson,headers={"Content-Type":"application/json",}) response = requests.post(ser_url,data=inputFieldsJson,headers={"Content-Type":"application/json",}) if response.status_code != 200: outputStr=response.content context.update({'tab': 'tabconfigure', 'error': outputStr.decode('utf-8'), 'selected': 'prediction'}) log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : '+str(outputStr.decode('utf-8'))) return context except Exception as inst: if 'Failed to establish a new connection' in str(inst): context.update({'tab': 'tabconfigure', 'error': 'AION service need to be started', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) log.info('Predict Instance :'+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0'+' sec'+' : '+'Error : AION service need to be started, '+str(inst)) return context else: context.update({'tab': 'tabconfigure', 'error': 'Prediction Error '+str(inst),'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) log.info('Predict Instance :'+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : Prediction Error, '+str(inst)) return context outputStr=response.content outputStr = outputStr.decode('utf-8','ignore') outputStr = outputStr.strip() predict_dict = json.loads(str(outputStr)) #print(predict_dict) singlePredictionsummary="" if (predict_dict['status'] == 'SUCCESS'): data = predict_dict['data'] singlePredictionResults = [] Results = {} if problem_type == 'multiModalLearning': data = data[0] Results['prediction'] = data['predict'] singlePredictionResults.append(Results) if problem_type == 'textSummarization': data = data[0] Results['msg'] = predict_dict['msg'] singlePredictionResults.append(Results) Results['prediction'] = predict_dict['data'] singlePredictionResults.append(Results) Results1 = {} Results1['prediction'] = predict_dict['data'] print("prdata------------",predict_dict['data']) singlePredictionsummary=predict_dict['data'] print("singlePredictionsummary",singlePredictionsummary) t2 = time.time() elif problem_type == 'multiLabelPrediction': prediction = '' for x in data: for y in x: if 'predict' in y: if prediction != '': prediction += ',' prediction += str(y)+':'+str(x[y]) Results['prediction'] = prediction singlePredictionResults.append(Results) elif problem_type == 'timeSeriesForecasting': #task 11997 Results['prediction'] = json.dumps(data) singlePredictionResults.append(Results) elif problem_type == 'stateTransition': if str(data['Anomaly']) == 'False': Results['prediction'] = 'No Anomaly' else: Results['prediction'] = str(data['Remarks']) singlePredictionResults.append(Results) elif problem_type.lower() in ['similarityidentification','contextualsearch']: data = data[0] prediction = data['prediction'] i = 1 for x in prediction: te = '' for y in x: info = (str(x[y])[:50] + '...') if len(str(x[y])) > 50 else str(x[y]) te += y+': '+info+'\n\n' Results[i] = te i = i+1 singlePredictionResults.append(Results) else: data = data[0] if 'prediction' in data: Results['prediction'] = data['prediction'] if 'probability' in data: Results['probability'] = data['probability'] if 'remarks' in data: Results['remarks'] = json.loads(data['remarks']) singlePredictionResults.append(Results) t2 = time.time() log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+str(round(t2-t1))+' sec'+' : '+'Success') else: context.update({'tab': 'tabconfigure', 'error': 'Prediction Error '+str(predict_dict['message']), 'selected': 'prediction','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion}) log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : Prediction Error') return context inputFields = [] inputFields.append(inputFieldsDict) ##Below added by sjayaram for llm langkit evaluation metrics Task:17109 prompt_response_results = '' if problem_type == 'llmFineTuning': try: response_msg = outputStr prompt_msg = prompt except: response_msg = '' prompt_msg = '' from appbe.evaluate_prompt import evaluate_prompt_response_inputs final_output_json,prompt_response_results = evaluate_prompt_response_inputs(prompt_msg,response_msg) #ser_url = service_url.read_service_url_params(request) #ser_url = ser_url+'predict?usecaseid='+usecasename+'&version='+str(ModelVersion) context.update({'tab': 'predict','mlmodels':mlmodels,'fineTunedModelType':modelType,'ser_url':ser_url, 'inputFields': inputFields,'singlePredictionResults': singlePredictionResults,'singlePredictionsummary':singlePredictionsummary,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction', 'prompt_response_results':prompt_response_results}) return context elif submittype.lower() == 'script': scriptdata="'''\n" scriptdata+="* =============================================================================\n" scriptdata+="* COPYRIGHT NOTICE\n" scriptdata+="* =============================================================================\n" scriptdata+="* @ Copyright HCL Technologies Ltd. 2021, 2022, 2023\n" scriptdata+="* Proprietary and confidential. All information contained herein is, and\n" scriptdata+="* remains the property of HCL Technologies Limited. Copying or reproducing the\n" scriptdata+="* contents of this file, via any medium is strictly prohibited unless prior\n" scriptdata+="* written permission is obtained from HCL Technologies Limited.\n" scriptdata+="'''\n" scriptdata+='import sys\n' scriptdata+='import json\n' scriptdata+='import requests\n' scriptdata+='import pandas as pd\n' scriptdata+='from pandas import json_normalize\n' scriptdata+='ser_url ="'+ser_url+'"\n\n' scriptdata+="def predict(data):\n" scriptdata+=" if data.endswith('.tsv'):\n" scriptdata+=" df=pd.read_csv(data,encoding='utf-8',encoding_errors= 'replace',sep='\\t')\n" scriptdata+=" else:\n" scriptdata+=" df=pd.read_csv(data,encoding='utf-8',encoding_errors= 'replace')\n" scriptdata+=' features = "'+",".join([feature for feature in inputFeaturesList])+'"\n' scriptdata+=" features = features.split(',')\n" scriptdata+=" df = df[features]\n" scriptdata+=" data = df.to_json(orient='records')\n" scriptdata+=" try:\n" scriptdata+=' response = requests.post(ser_url,data=data,headers={"Content-Type":"application/json",})\n' scriptdata+=" if response.status_code == 200:\n" scriptdata+=" outputStr=response.content\n" scriptdata+=" outputStr = outputStr.decode('utf-8')\n" scriptdata+=" outputStr = outputStr.strip()\n" scriptdata+=" predict_dict = json.loads(str(outputStr))\n" scriptdata+=" print(predict_dict)\n" scriptdata+=" except Exception as e:\n" scriptdata+=' print(e)\n' scriptdata+='\nif __name__ == "__main__":\n' scriptdata+=' predict(sys.argv[1])' response = HttpResponse() response['content_type'] = 'text/plain' response['Content-Disposition'] = 'attachment; filename=prediction.py' response.write(scriptdata) return response except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) context.update({'tab': 'tabconfigure', 'error': 'Failed To perform prediction','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction'}) log.info('Predict Instance :' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + ' 0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction, '+ str(inst)) return context
flconfig.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os.path import time import subprocess import sys from appbe.aion_config import kafka_setting from appbe.aion_config import running_setting from appbe import installPackage from appbe import compute from appbe.models import getusercasestatus import json import pandas as pd from os.path import expanduser import ntpath import shutil import platform from pathlib import Path home = expanduser("~") if platform.system() == 'Windows': LOG_FILE_PATH = os.path.join(home,'AppData','Local','HCLT','AION','logs') else: LOG_FILE_PATH = os.path.join(home,'HCLT','AION','logs') def convert(obj): if isinstance(obj, bool): return str(obj).capitalize() if isinstance(obj, (list, tuple)): return [convert(item) for item in obj] if isinstance(obj, dict): return {convert(key):convert(value) for key, value in obj.items()} return obj def fl_command(request,Existusecases,usecasedetails): command = request.POST.get('flsubmit') print(command) #kafkaSetting = kafka_setting() ruuningSetting = running_setting() computeinfrastructure = compute.readComputeConfig() modelID = request.POST.get('modelID') p = Existusecases.objects.get(id=modelID) usecasename = p.ModelName.UsecaseName runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename) usecasedetail = usecasedetails.objects.get(id=p.ModelName.id) usecase = usecasedetails.objects.all() models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') for model in models: model.scoringCreteria = 'NA' model.score = 'NA' model.deploymodel = 'NA' if os.path.isdir(str(model.DeployPath)): modelPath = os.path.join(str(model.DeployPath),'etc','output.json') try: with open(modelPath) as file: outputconfig = json.load(file) file.close() if outputconfig['status'] == 'SUCCESS': model.scoringCreteria = outputconfig['data']['ScoreType'] model.score = outputconfig['data']['BestScore'] model.deploymodel = outputconfig['data']['BestModel'] model.modelType = outputconfig['data']['ModelType'] model.featuresused = eval(outputconfig['data']['featuresused']) model.targetFeature = outputconfig['data']['targetFeature'] model.modelParams = outputconfig['data']['params'] model.dataPath = os.path.join(str(model.DeployPath),'data', 'postprocesseddata.csv.gz') model.maacsupport = 'True' model.flserversupport = 'False' supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"] if model.deploymodel in supportedmodels: model.flserversupport = 'True' else: model.flserversupport = 'False' supportedmodels = ["Logistic Regression", "Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge","Extreme Gradient Boosting (XGBoost)","Light Gradient Boosting (LightGBM)","Categorical Boosting (CatBoost)"] if model.deploymodel in supportedmodels: model.maacsupport = 'True' else: model.maacsupport = 'False' supportedmodels = ["Extreme Gradient Boosting (XGBoost)"] if model.deploymodel in supportedmodels: model.encryptionsupport = 'True' else: model.encryptionsupport = 'False' except Exception as e: pass flserver = os.path.join(str(p.DeployPath),'publish','FedLearning') if command == 'startServer': flservicefile = os.path.join(flserver,'fedServer','aionfls.py') confilefile = os.path.join(flserver,'fedServer','config.json') if platform.system() == 'Windows': outputStr = subprocess.Popen([sys.executable, flservicefile,confilefile],creationflags = subprocess.CREATE_NEW_CONSOLE) else: outputStr = subprocess.Popen([sys.executable, flservicefile,confilefile]) Status = 'SUCCESS' Msg = 'Federated Learning Server Started' if command == 'saveflconfig': #print(command) fedserverPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','utilities','fedServer')) shutil.rmtree(flserver, ignore_errors=True) Path(flserver).mkdir(parents=True, exist_ok=True) flcopypath = os.path.join(flserver,'fedServer') shutil.copytree(fedserverPath,flcopypath) fedserverDataPath = os.path.join(flcopypath,'data') shutil.copy2(request.POST.get('flserver_datalocation'),fedserverDataPath) flcon = {} AlgorithmNames={'Logistic Regression':'LogisticRegression','Neural Network':'deeplearning','Linear Regression':'LinearRegression'} flcon['server_IP'] = request.POST.get('flserver_ipaddress') flcon['server_port'] = request.POST.get('flserver_port') flcon['model_name'] = AlgorithmNames[request.POST.get('flserver_model')] flcon['version'] = request.POST.get('flserver_Version') flcon['model_hyperparams'] = convert(eval(request.POST.get('flserver_params'))) dataLocation = request.POST.get('flserver_datalocation') dataPath,datafile = ntpath.split(dataLocation) flcon['data_location'] = 'data/'+datafile flcon['selected_feature'] = ",".join([model for model in eval(request.POST.get('flserver_trainingfeatures'))]) flcon['target_feature'] = request.POST.get('flserver_targetfeature') flcon['problem_type'] = request.POST.get('flserver_modelType') flcon['min_available_clients'] = request.POST.get('flserver_noofclient') flcon['min_fit_clients'] = 2 flcon['fl_round'] = request.POST.get('flserver_trainround') flcon['evaluation_required'] = request.POST.get('flserver_evaluation') flcon['model_store'] = "" flconfigfile = os.path.join(flcopypath,'config.json') flconjson = json.dumps(flcon) f = open(flconfigfile, "w+") f.seek(0) f.write(flconjson) f.truncate() f.close() nouc = 0 Status = 'Success' Msg = 'Federated Learning Server Configured' if command =='startClient': flconfigfile = os.path.join(str(model.DeployPath),'publish','FedLearning','fedServer','config.json') if os.path.isfile(flconfigfile): with open(flconfigfile) as file: flconfig = json.load(file) file.close() numberofclient = flconfig['min_available_clients'] for x in range(int(numberofclient)): flclientdirectory = os.path.join(str(model.DeployPath),'publish','FedLearning','fedClient_'+str(x+1)) flclientpath = os.path.join(str(model.DeployPath),'publish','FedLearning','fedClient_'+str(x+1),'fedClient.bat') if platform.system() == 'Windows': outputStr = subprocess.Popen([flclientpath],creationflags = subprocess.CREATE_NEW_CONSOLE,cwd=flclientdirectory) else: outputStr = subprocess.Popen([flclientpath],cwd=flclientdirectory) Status = 'SUCCESS' Msg = 'Federated Learning Client Started' if command == 'generateClient': flconfigfile = os.path.join(str(model.DeployPath),'publish','FedLearning','fedServer','config.json') if os.path.isfile(flconfigfile): with open(flconfigfile) as file: flconfig = json.load(file) file.close() numberofclient = flconfig['min_available_clients'] trainingDataLocation = os.path.join(str(p.DeployPath),'data','postprocesseddata.csv.gz') from utils.file_ops import read_df_compressed status,df = read_df_compressed(trainingDataLocation,encoding='utf8') for x in range(int(numberofclient)): flclientpath = os.path.join(str(model.DeployPath),'publish','FedLearning','fedClient_'+str(x+1)) logPath = os.path.join(flclientpath,'logs') modelsPath = os.path.join(flclientpath,'models') Path(flclientpath).mkdir(parents=True, exist_ok=True) Path(logPath).mkdir(parents=True, exist_ok=True) Path(modelsPath).mkdir(parents=True, exist_ok=True) flclientor = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','utilities','fedClient','aionflc.py')) shutil.copy2(flclientor,flclientpath) flclientor = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','utilities','fedClient','utils.py')) shutil.copy2(flclientor,flclientpath) flclientor = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','utilities','fedClient','dl_model.py')) shutil.copy2(flclientor,flclientpath) subset = df.sample(frac=0.8) dataPath = os.path.join(flclientpath,'data') Path(dataPath).mkdir(parents=True, exist_ok=True) datafile = os.path.join(dataPath,'data.dat') subset.to_csv(datafile, index=False) flclient = {} flclient['server_IP'] = flconfig['server_IP'] flclient['server_port'] = flconfig['server_port'] flclient['model_name'] = flconfig['model_name'] flclient['problem_type'] = flconfig['problem_type'] flclient['version'] = flconfig['version'] flclient['model_hyperparams'] = flconfig['model_hyperparams'] flclient['data_location'] = 'data\data.dat' flclient['selected_feature'] = flconfig['selected_feature'] flclient['target_feature'] = flconfig['target_feature'] flclient['train_size'] = 80 #flclient['deploy_location'] = flconfig['deploy_location'] flclient['num_records_per_round'] = request.POST.get('flserver_recordperround') flclient['wait_time'] = request.POST.get('flserver_roundtime') flclient['model_overwrite'] = request.POST.get('model_overwritelabel') configPath = os.path.join(flclientpath,'config') Path(configPath).mkdir(parents=True, exist_ok=True) configFile = os.path.join(configPath,'config.json') flconjson = json.dumps(flclient) f = open(configFile, "w+") f.seek(0) f.write(flconjson) f.truncate() f.close() locate_python = sys.exec_prefix bathfilePath = os.path.join(flclientpath,'fedClient.bat') batfilecontent=''' @ECHO OFF GOTO weiter :setenv SET "Path={python_path}\;%Path%;" GOTO :EOF :weiter IF "%1" EQU "setenv" ( ECHO. ECHO Setting environment for AION Federated Learning Client. CALL :setenv python %CD%\\aionflc.py %CD%\config\config.json ) ELSE ( SETLOCAL EnableDelayedExpansion TITLE ION Federated Learning Client PROMPT %username%@%computername%$S$P$_#$S IF EXIST aion.config (FOR /F "delims=" %%A IN (aion.config) DO SET "%%A") START "" /B %COMSPEC% /K "%~f0" setenv ) '''.format(python_path=str(locate_python)) f = open(bathfilePath, "w",encoding="utf-8") f.write(str(batfilecontent)) f.close() Status = 'Success' Msg = 'Federated Learning Client Code Generated' nouc = 0 #selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) from appbe.pages import get_usecase_page status,context,action = get_usecase_page(request,usecasedetails,Existusecases) context['Status'] = Status context['Msg'] = Msg return(context)
evaluate_prompt.py
from langkit import textstat from whylogs.experimental.core.udf_schema import udf_schema import pandas as pd import whylogs as why from langkit import light_metrics from whylogs.experimental.core.udf_schema import udf_schema from whylogs.experimental.core.udf_schema import register_dataset_udf import whylogs as why import json from sentence_transformers import SentenceTransformer, util from langkit import lang_config, response_column def evaluate_prompt_metrics(prompt_msg: any): """ Evaluate prompt only information.""" text_schema = udf_schema() llm_schema = light_metrics.init() df = pd.DataFrame({ "prompt": [ prompt_msg ]}) results = why.log(df, schema=udf_schema()) # .profile() view = results.view() automated_readability_index_prompt = view.get_column("prompt.automated_readability_index").to_summary_dict() automated_readability_index_prompt_mean = automated_readability_index_prompt['distribution/mean'] arip_m = lambda x:1 if x < 1 else (14 if x > 14 else x) automated_readability_index_prompt_mean = arip_m(automated_readability_index_prompt_mean) automated_readability_index_prompt_value = get_readability_index_range_value(automated_readability_index_prompt_mean) flesch_reading_ease_prompt = view.get_column("prompt.flesch_reading_ease").to_summary_dict() flesch_reading_ease_prompt_mean = flesch_reading_ease_prompt['distribution/mean'] frep_m = lambda x:1 if x < 1 else (100 if x > 100 else x) flesch_reading_ease_prompt_mean = frep_m(flesch_reading_ease_prompt_mean) flesch_reading_ease_prompt_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_prompt_mean) prompt_results = {'prompt_readability_score': str(automated_readability_index_prompt_mean), 'prompt_readability_value': automated_readability_index_prompt_value, 'prompt_reading_ease': str(flesch_reading_ease_prompt_mean), 'prompt_reading_ease_value': flesch_reading_ease_prompt_value} prompt_results_json = json.dumps(prompt_results, indent=4) return prompt_results_json,prompt_results model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') @register_dataset_udf(["prompt", "response"], "response.relevance_to_prompt") def similarity_MiniLM_L6_v2(text): x = text["prompt"] y = text["response"] embedding_1 = model.encode(x, convert_to_tensor=True) embedding_2 = model.encode(y, convert_to_tensor=True) similarity = util.pytorch_cos_sim(embedding_1, embedding_2) result = similarity.item() return result def get_readability_index_range_value(readability_value): if readability_value <= 1: ## Grade level Kindergarden to fourth grade return "Kindergarten" elif 1 < readability_value <= 2: ## Grade level Kindergarden to fourth grade return "First Grade" elif 2 < readability_value <= 3: ## Grade level Fifth grade to Ninth grade return "Second Grade" elif 3 < readability_value <= 4: ## Grade level Fifth grade to Ninth grade return "Third Grade" elif 4 < readability_value <= 5: ## Grade level Fifth grade to Ninth grade return "Fourth Grade" elif 5 < readability_value <= 6: ## Grade level Fifth grade to Ninth grade return "Fifth Grade" elif 6 < readability_value <= 7: ## Grade level Fifth grade to Ninth grade return "Sixth Grade" elif 7 < readability_value <= 8: ## Grade level Fifth grade to Ninth grade return "Seventh Grade" elif 8 < readability_value <= 9: ## Grade level Fifth grade to Ninth grade return "Eighth Grade" elif 9 < readability_value <=10: ## Grade level Fifth grade to Ninth grade return "Ninth Grade" elif 10 < readability_value <=11: ## Grade level Fifth grade to Ninth grade return "Tenth Grade" elif 11 < readability_value <=12: ## Grade level Fifth grade to Ninth grade return "Eleventh Grade" elif 12 < readability_value <= 13: ## Grade level Fifth grade to Ninth grade return "Twelfth Grade" elif readability_value > 13: ## Grade level Fifth grade to Ninth grade return "College Grade" else: return "College Grade" def get_flesch_reading_ease_prompt_value(readability_value): """ Get flesch readability score range approximation""" if readability_value <= 29: return "Very Confusing" elif 29 < readability_value <= 49: return "Difficult" elif 49 < readability_value <= 59: return "Fairly Difficult" elif 59 < readability_value <= 69: return "Standard" elif 69 < readability_value <= 79: return "Fairly Easy" elif 79 < readability_value <= 89: return "Easy" elif 89 < readability_value <= 100: return "Very Easy" else: return "Very Easy" def get_relevence_to_response_value(similarity_score): """ To findout relevence to response results based on similarity score.""" if similarity_score <=0.3: return "Low" elif 0.3 < similarity_score <= 0.5: return "Average" elif 0.5 < similarity_score <= 0.8: return "Good" elif similarity_score > 0.8: return "High" def evaluate_prompt_response_inputs (prompt_msg:any, response_msg:any)->str: """ Predict the text quality, text relevence for both prompt and response messages.""" df = pd.DataFrame({ "prompt": [prompt_msg], "response": [response_msg]}) results = why.log(df, schema=udf_schema()) view = results.view() automated_readability_index_prompt = view.get_column("prompt.automated_readability_index").to_summary_dict() automated_readability_index_prompt_mean = automated_readability_index_prompt['distribution/mean'] arip_m = lambda x:1 if x < 1 else (14 if x > 14 else x) automated_readability_index_prompt_mean = arip_m(automated_readability_index_prompt_mean) automated_readability_index_prompt_value = get_readability_index_range_value(automated_readability_index_prompt_mean) flesch_reading_ease_prompt = view.get_column("prompt.flesch_reading_ease").to_summary_dict() flesch_reading_ease_prompt_mean = flesch_reading_ease_prompt['distribution/mean'] frep_m = lambda x:1 if x < 1 else (100 if x > 100 else x) flesch_reading_ease_prompt_mean = frep_m(flesch_reading_ease_prompt_mean) flesch_reading_ease_prompt_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_prompt_mean) automated_readability_index_response = view.get_column("response.automated_readability_index").to_summary_dict() automated_readability_index_response_mean = automated_readability_index_response['distribution/mean'] arir_m = lambda x:1 if x < 1 else (14 if x > 14 else x) automated_readability_index_response_mean = arir_m(automated_readability_index_response_mean) automated_readability_index_response_value = get_readability_index_range_value(automated_readability_index_response_mean) flesch_reading_ease_response = view.get_column("response.flesch_reading_ease").to_summary_dict() flesch_reading_ease_response_mean = flesch_reading_ease_response['distribution/mean'] frer_m = lambda x:1 if x < 1 else (100 if x > 100 else x) flesch_reading_ease_response_mean = frer_m(flesch_reading_ease_response_mean) flesch_reading_ease_response_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_response_mean) relevance_to_response = view.get_column("response.relevance_to_prompt").to_summary_dict() relevance_to_response_mean = relevance_to_response['distribution/mean'] r2r_m = lambda x:0 if x < 0 else (1 if x > 1 else x) relevance_to_response_mean = r2r_m(relevance_to_response_mean) relevance_to_response_value = get_relevence_to_response_value(relevance_to_response_mean) sentence_count_response = view.get_column("response.sentence_count").to_summary_dict() sentence_count_response_mean = sentence_count_response['distribution/mean'] word_count_response = view.get_column("response.lexicon_count").to_summary_dict() word_count_response_mean = word_count_response['distribution/mean'] prompt_response_results = {'prompt_readability_score': str(automated_readability_index_prompt_mean), 'prompt_readability_value': automated_readability_index_prompt_value, 'prompt_reading_ease': str(flesch_reading_ease_prompt_mean), 'prompt_reading_ease_value': flesch_reading_ease_prompt_value, 'response_readability': str(automated_readability_index_response_mean), 'response_readability_value': str(automated_readability_index_response_value), 'response_reading_ease': str(flesch_reading_ease_response_mean), 'response_reading_ease_value': str(flesch_reading_ease_response_value), 'response_sentence_count': str(sentence_count_response_mean), 'response_word_count_response': str(word_count_response_mean), 'relevance_to_response': str(relevance_to_response_mean), 'relevance_to_response_value': relevance_to_response_value } final_output_json = json.dumps(prompt_response_results, indent=4) return final_output_json,prompt_response_results if __name__ == "__main__": ##Test only prompt message information option = 'predict' if option == 'evaluate': prompt_only_response_msg = "A large language model is an advanced artificial intelligence (AI) system designed to process, understand, and generate human-like text based on massive amounts of data. These models are typically built using deep learning techniques, such as neural networks, and are trained on extensive datasets that include text from a broad range, such as books and websites, for natural language processing.Fine-tuning a large language model involves adjusting and adapting a pre-trained model to perform specific tasks or to cater to a particular domain more effectively. The process usually entails training the model further on a smaller, targeted dataset that is relevant to the desired task or subject matter.Few-shot learning (FSL) can be considered as a meta-learning problem where the model learns how to learn to solve the given problem. In this approach, the model is provided with a very limited number of examples (i.e., “few shots”) from the new task, and it uses this information to adapt and perform well on that task. Adapter Training: Adapter training is a method that involves training lightweight modules that are plugged into the pre-trained model, allowing for fine-tuning on a specific task without affecting the original model’s performance on other tasks.Multi-task Learning: Multi-task learning is a method where the pre-trained model is fine-tuned on multiple tasks simultaneously. This approach enables the model to learn and leverage the shared representations across different tasks, leading to better generalization and performance. Task-specific Fine-tuning: Task-specific fine-tuning is a method where the pre-trained model is fine-tuned on a specific task or domain using a task-specific dataset. This method requires more data and time than transfer learning but can result in higher performance on the specific task. Sequential Fine-tuning: Sequential fine-tuning is a method where a pre-trained model is fine-tuned on multiple related tasks or domains sequentially. This allows the model to learn more nuanced and complex language patterns across different tasks, leading to better generalization and performance.A noteworthy avenue of research within LLM fine-tuning explores strategies to reduce the expenses associated with updating model parameters. This endeavor is the essence of parameter-efficient fine-tuning (PEFT), a collection of techniques aiming to curtail the number of parameters requiring adjustments.Various PEFT techniques exist, and one prominent example is a low-rank adaptation (LoRA), a technique gaining popularity among open-source language models." prompt_res = evaluate_prompt_metrics(prompt_only_response_msg) elif option == 'predict': prompt_msg = "What is AION?" response_msg = "AION (Artificial Intelligence ONline) is an open -source software platform for building, deploying and operating the entire lifecycle of AI applications. It supports various use cases such as predictive analytics , machine learning and deep learning . Key features: 1. Data Ingestion : Supports multiple data sources like text files, excel sheet, database etc." evaluation_metrics_json = evaluate_prompt_response_inputs(prompt_msg,response_msg) print("evaluation_metrics_json: \n",evaluation_metrics_json)
aionpipeline.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import kfp import kfp.dsl as dsl import json from pathlib import Path class aionpipeline(): containerRegistry = str() containerLabel = str() containerSecret = str() pipelineName = 'AION MLOps Pipeline {0}' exeCmd = 'python' codeFile = 'aionCode.py' mntPoint = '/aion' inputArg = '-i' msIP = '0.0.0.0' port = '8094' cachingStrategy = 'P0D' deafultVolume = '1Gi' volName = 'aion-pvc' volMode = 'ReadWriteMany' fileExt = '.tar.gz' fileName = 'aion_mlops_pipeline_{0}' containerMM = 'modelmonitoring' containerDI = 'dataingestion' containerDT = 'datatransformation' containerFE = 'featureengineering' containerMR = 'modelregistry' containerMS = 'modelserving' containerImage = '{0}/{1}:{2}' models = {} nameSeprator = '-' modelsLiteral = 'models' modelNameLiteral = 'modelname' msTemplate = '{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "{{workflow.name}}-{0}"}, "spec": {"containers": [{"name": "{0}", "image": "{1}", "command": ["python"], "args": ["aionCode.py", "-ip", "{2}", "-pn", "{3}"],"volumeMounts": [{"name": "aion-pvc", "mountPath": "{4}"}], "ports": [{"name": "http", "containerPort": {3}, "protocol": "TCP"}]}], "imagePullSecrets": [{"name": "{5}"}], "volumes": [{"name": "aion-pvc", "persistentVolumeClaim": {"claimName": "{{workflow.name}}-{6}"}}]}}' def __init__(self, models, containerRegistry, containerLabel, containerSecret=str()): self.models = models self.containerRegistry = containerRegistry self.containerLabel = containerLabel self.containerSecret = containerSecret @dsl.pipeline( name=pipelineName.format(containerLabel), description=pipelineName.format(containerLabel), ) def aion_mlops(self, inputUri=str(), volSize=deafultVolume): vop = dsl.VolumeOp( name=self.volName + self.nameSeprator + self.containerLabel, resource_name=self.volName, modes=[self.volMode], size=volSize ) mm = dsl.ContainerOp( name=self.containerMM, image=self.containerImage.format(self.containerRegistry,self.containerMM,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, self.inputArg, inputUri, ], pvolumes={self.mntPoint: vop.volume} ) mm.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy di = dsl.ContainerOp( name=self.containerDI, image=self.containerImage.format(self.containerRegistry,self.containerDI,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: mm.pvolume} ) di.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy dt = dsl.ContainerOp( name=self.containerDT, image=self.containerImage.format(self.containerRegistry,self.containerDT,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: di.pvolume} ) dt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy fe = dsl.ContainerOp( name=self.containerFE, image=self.containerImage.format(self.containerRegistry,self.containerFE,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: dt.pvolume} ) fe.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy dictMT = {} listMTOps = [] for model in self.models[self.modelsLiteral]: modelName = model[self.modelNameLiteral] mt=dsl.ContainerOp( name=modelName, image=self.containerImage.format(self.containerRegistry,modelName,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: fe.pvolume}) mt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy listMTOps.append(mt) dictMT[self.mntPoint]=mt.pvolume mr = dsl.ContainerOp( name=self.containerMR, image=self.containerImage.format(self.containerRegistry,self.containerMR,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes=dictMT ).after(*tuple(listMTOps)) mr.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy msJson = self.msTemplate.replace(str({0}),self.containerMS).replace(str({1}),self.containerImage.format(self.containerRegistry,self.containerMS,self.containerLabel)).replace(str({2}),self.msIP).replace(str({3}),self.port).replace(str({4}),self.mntPoint).replace(str({5}),self.containerSecret).replace(str({6}),self.volName) ms = dsl.ResourceOp( name=self.containerMS + self.nameSeprator + self.containerLabel, k8s_resource=json.loads(msJson), ) ms.after(mr) def compilepl(self, targetPath=str()): filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt if targetPath != str(): filePath = Path(targetPath, filePath) kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath)) def executepl(self, kfhost=str()): client = kfp.Client(kfhost) client.create_run_from_pipeline_func(self.aion_mlops,arguments={})
telemetry.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import requests import json import os from datetime import datetime import socket import getmac from appbe.sqliteUtility import sqlite_db import pandas as pd from appbe.dataPath import DATA_DIR def TelemetryCreateSyncState(state): try: newdata = {} file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'telemetry.db') now = datetime.now() SyncingTime = int(datetime.timestamp(now)) newdata.update({'ID':['1'],'state':[state],'syncingTime':[SyncingTime]}) sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'syncState') except Exception as e: print(e) pass def TelemetryUpdateSyncState(state): try: newdata = {} file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'telemetry.db') now = datetime.now() SyncingTime = int(datetime.timestamp(now)) updated_data = '"state"="'+state+'","syncingTime"="'+str(SyncingTime)+'"' sqlite_obj.update_data(updated_data,'ID="1"','syncState') except Exception as e: print(e) pass def checkTelemtry(): import subprocess import sys scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py')) if os.path.exists(scriptPath): outputStr = subprocess.Popen([sys.executable,scriptPath,'-m','pushtelemetry']) def SyncTelemetry(): try: newdata = {} file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'telemetry.db') if sqlite_obj.table_exists('syncState'): data = sqlite_obj.read_data('syncState')[0] param_keys = ['ID','state','syncingTime'] sync_data = dict((x,y) for x,y in zip(param_keys,data)) #print(sync_data['state'],sync_data['syncingTime']) if sync_data['state'].lower() != 'syncing': sync_time = sync_data['syncingTime'] now = datetime.now() currTime = datetime.timestamp(now) diffTime = int(float(currTime)) - int(float(sync_time)) #print(diffTime) if int(diffTime) > 86400: TelemetryUpdateSyncState('Syncing') SendTelemetryUpdate(sync_time) TelemetryUpdateSyncState('Done') else: TelemetryCreateSyncState('Initialize') except Exception as e: print(e) pass def UseCaseCreated(Usecase): try: file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'telemetry.db') newdata = {} now = datetime.now() ID = datetime.timestamp(now) record_date = int(datetime.timestamp(now)) computername = socket.getfqdn() macaddress = getmac.get_mac_address() try: user = os.getlogin() except: user = 'NA' newdata.update({'ID':[str(int(ID))],'RecordDate': [record_date],'Usecase': [Usecase],'Operation':['Created'],'User':[str(user)],'HostName' :[computername],'MACAddress':[macaddress],'ProblemType':[''],'Algorithms':[''],'EDA':['No'],'Prediction':['No'],'MLaC':['No'],'Drift':['No'],'TrustedAI':['No']}) sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'logs') except Exception as e: print(e) pass def UpdateTelemetry(Usecase,operation,value): try: file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'telemetry.db') data = sqlite_obj.read_data('logs','Usecase="'+Usecase+'"') #print(data) if sqlite_obj.table_exists('logs'): updated_data = operation+'="'+value+'"' now = datetime.now() ID = datetime.timestamp(now) record_date = int(datetime.timestamp(now)) updated_data += ',"RecordDate"="'+str(record_date)+'"' sqlite_obj.update_data(updated_data,'Usecase="'+Usecase+'"','logs') except Exception as e: print(e) pass def SendTelemetryUpdate(sync_time): file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'telemetry.db') if sqlite_obj.table_exists('logs'): ddata = sqlite_obj.read_data("logs","RecordDate >= '"+str(sync_time)+"'") #print(ddata) keys = sqlite_obj.column_names('logs') for data in ddata: now = datetime.now() ID = datetime.timestamp(now) item = {} item['ID'] = str(int(ID)) item['RecordID'] = data[ keys.index('ID')] item['RecordDate'] = data[ keys.index('RecordDate')] item['Usecase'] = data[ keys.index('Usecase')] item['Operation'] = data[ keys.index('Operation')] item['User'] = data[ keys.index('User')] item['HostName'] = data[ keys.index('HostName')] item['MACAddress'] = data[ keys.index('MACAddress')] item['Algorithms'] = data[ keys.index('Algorithms')] item['ProblemType'] = data[ keys.index('ProblemType')] item['EDA'] = data[ keys.index('EDA')] item['Prediction'] = data[ keys.index('Prediction')] item['MLaC'] = data[ keys.index('MLaC')] item['Drift'] = data[ keys.index('Drift')] item['TrustedAI'] = data[ keys.index('TrustedAI')] url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' record = {} record['TableName'] = 'AION_LOGS' record['Item'] = item record = json.dumps(record) #print(record) try: response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",}) except Exception as e: print(e) def telemetry_data(operation,Usecase,data): now = datetime.now() ID = datetime.timestamp(now) record_date = now.strftime("%y-%m-%d %H:%M:%S") computername = socket.getfqdn() macaddress = getmac.get_mac_address() try: user = os.getlogin() except: user = 'NA' item = {} item['ID'] = str(int(ID)) item['record_date'] = record_date item['UseCase'] = Usecase item['operation'] = operation item['remarks'] = data item['user'] = str(user) item['hostname'] = computername item['macaddress'] = macaddress url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' record = {} record['TableName'] = 'AION_OPERATION' record['Item'] = item record = json.dumps(record) try: response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",}) check_telemetry_file() except Exception as inst: filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') f=open(filename, "a+") f.write(record+'\n') f.close() def check_telemetry_file(): file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') if(os.path.isfile(file_path)): f = open(file_path, 'r') url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' file_content = f.read() f.close() matched_lines = file_content.split('\n') write_lines = [] for record in matched_lines: try: response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",}) except: write_lines.append(record) f = open(file_path, "a") f.seek(0) f.truncate() for record in write_lines: f.write(record+'\n') f.close() else: return True
xplain.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import os import pandas as pd import numpy as np import subprocess import sys import re import plotly.graph_objects as go import plotly.figure_factory as ff def global_explain(request): try: selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) problemType = 'classification' for key in configSettingsJson['basic']['analysisType']: if configSettingsJson['basic']['analysisType'][key] == 'True': problemType = key break if problemType.lower() != 'classification' and problemType.lower() != 'regression': return 'Problem Type Error','Explainable AI only available for classification and regression problem','NA','NA','NA','NA',0,0,'NA','NA','NA','NA',0,'NA','NA',0,'NA','NA','NA','NA','NA','NA' displaypath = os.path.join( request.session['deploypath'],'etc','display.json') with open(displaypath) as file: config = json.load(file) file.close() inputFeatures = configSettingsJson['basic']['trainingFeatures'] targetFeature = configSettingsJson['basic']['targetFeature'] inputFeatures = inputFeatures.split(',') if targetFeature in inputFeatures: inputFeatures.remove(targetFeature) dataFilePath = str(configSettingsJson['basic']['dataLocation']) from utils.file_ops import read_df_compressed status,df = read_df_compressed(config['postprocessedData'],encoding='utf8',nrows=10) #print(df) df.rename(columns=lambda x: x.strip(), inplace=True) df = df[inputFeatures] #print(df) singleInstanceData = df.loc[5, inputFeatures] inputFieldsDict = singleInstanceData.to_dict() inputFields = [] inputFields.append(inputFieldsDict) if 'nrows' in config: nrows = config['nrows'] else: nrows = 'Not Available' if 'ncols' in config: ncols = config['ncols'] else: ncols = 'Not Available' if 'targetFeature' in config: targetFeature = config['targetFeature'] else: targetFeature = '' labelMaps = config['labelMaps'] modelfeatures = config['modelFeatures'] mfcount = len(modelfeatures) df_proprocessed = pd.read_csv(dataFilePath) if 'targetFeature' != '': target_classes = df_proprocessed[targetFeature].unique() numberofclasses = len(target_classes) else: target_classes = [] numberofclasses = 'Not Available' dataPoints = df_proprocessed.shape[0] df_proprocessed = df_proprocessed.head(5) df_proprocessed = df_proprocessed.to_json(orient="records") df_proprocessed = json.loads(df_proprocessed) expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py') outputStr = subprocess.check_output([sys.executable,expainableAIPath,'global']) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() ale_json = json.loads(str(outputStr)) ale_json = ale_json['data'] ale_view = ale_json['data'] sentences = ale_json['sentences'] scoreMessage = '' feature_importance = ale_json['feature_importance'] dfimp = pd.DataFrame.from_dict(feature_importance) dfimp = dfimp.sort_values(by=['values'],ascending=False).reset_index() yaxis_data = dfimp['values'].tolist() xaxis_data = dfimp['labels'].tolist() cfig = go.Figure() cfig.add_trace(go.Bar(x=xaxis_data,y=yaxis_data,name='Feature Importance')) cfig.update_layout(barmode='stack',xaxis_title='Features') bargraph = cfig.to_html(full_html=False, default_height=450,default_width=1000) dftoprecords = dfimp.head(2) topTwoFeatures = dfimp['labels'].tolist() topFeaturesMsg = [] for i in range(0,len(dfimp)): value = round(dfimp.loc[i, "values"],2)*100 value = round(value,2) tvalue = str(dfimp.loc[i, "labels"])+' contributing to '+ str(value)+'%' topFeaturesMsg.append(tvalue) most_influencedfeature = ale_json['most_influencedfeature'] interceppoint = ale_json['interceptionpoint'] anchorjson = ale_json['anchorjson'] return 'Success','Success',ale_view,sentences,bargraph,inputFields,nrows,ncols,targetFeature,dataPoints,target_classes,df_proprocessed,numberofclasses,modelfeatures,problemType,mfcount,topTwoFeatures,topFeaturesMsg,most_influencedfeature,interceppoint,anchorjson,labelMaps except Exception as Inst: print(Inst) return 'Error','Exception: '+str(Inst),'NA','NA','NA','NA',0,0,'NA','NA','NA','NA',0,'NA','NA',0,'NA','NA','NA','NA','NA','NA'
codeclonedetection_sklearn.py
# -*- coding: utf-8 -*- import os # import glob from glob import glob import ast from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.metrics.pairwise import cosine_similarity import numpy as np import pandas as pd import json import time import logging from datetime import datetime """ Code clone detection based on user input files. """ class codeCloneDetectionSklearn: """ Detect code clones based on sklearn text vectorizer modules. Input params: files_dir: python files folder, deply_dir: logs,resultant dataframe stored location. chunk_size: max size split for the input text or code function. return values: report_dict which contains clone type, path and clones. """ def __init__(self,files_dir,deploy_dir, chunk_size): self.files_dir = files_dir self.deploy_dir = deploy_dir self.chunk_size = chunk_size try: self.ccdreportpath = os.path.join(self.deploy_dir, "codeCloneReport") os.makedirs(self.ccdreportpath, exist_ok = True) except OSError as error: print("Directory 'codeCloneReport' cann't be created.Error msg: ",error) try: current_datetime = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") str_current_datetime = str(current_datetime) log_file_name = 'codeclonelog_sklearn'+f"_{str_current_datetime}"+".log" logpath = os.path.join(self.ccdreportpath,log_file_name) logging.basicConfig(level=logging.INFO,filename=logpath,filemode='w',format='%(message)s') self.log = logging.getLogger() except Exception as e: print("code clone log object creation error.",e) pass def get_function_names(self,filename): """ Get the function names from python files """ try: with open(filename, 'r') as file: content = file.read() tree = ast.parse(content) function_names = [] for node in ast.walk(tree): if isinstance(node, ast.FunctionDef): function_names.append(node.name) except Exception as e: self.log.info("function name read error: "+str(e)) return function_names def get_function_code(self,filename, function_name): """ To get the function codes """ try: with open(filename, 'r') as file: content = file.read() tree = ast.parse(content) function_code = "" for node in ast.walk(tree): if isinstance(node, ast.FunctionDef) and node.name == function_name: function_code = ast.unparse(node) except Exception as e: self.log.info("function name read error: "+str(e)) return function_code def get_python_files(self,root_dir): """ Walk thru the directory user given, get all py files. """ try: code_files = [y for x in os.walk(root_dir) for y in glob(os.path.join(x[0], '*.py'))] except Exception as e: self.log.info("Python file read error: "+str(e)) return code_files def chunk_functions(self,function_code, chunk_size): """ Check the function size based on chunk size. """ try: if (len(function_code) > 20): chunks = [function_code[i:i + chunk_size] for i in range(0, len(function_code), chunk_size)] else: chunks = list((function_code,)) except Exception as e: self.log.info("function chunk based on chunk_size error: "+str(e)) total_tokens = round(len(function_code)/4) return chunks,total_tokens def get_clone(self): """ Main code clone detection function using sklearn tfidf_vectorizer and cosine_similarity. return values:report_dict which contains total_clones, """ try: start_time = time.time() chunk_size = int(self.chunk_size) ccdreportpath = os.path.join(self.deploy_dir, "codeCloneReport") python_files = self.get_python_files(self.files_dir) total_files = len(python_files) # print('python_files: \n',python_files) function_codes = [] function_n = [] file_name=[] # files_info=[] total_tokens_used = [] for file in python_files: function_names = self.get_function_names(file) for i,function_name in enumerate(function_names): file_name.append(file) function_n.append(function_name) function_code = self.get_function_code(file, function_name) chunks,total_tokens = self.chunk_functions(function_code, chunk_size) total_tokens_used.append(total_tokens) function_codes.extend(chunks) total_functions = len(function_n) files_info=list(zip(file_name, function_n,function_codes)) tfidf_vectorizer = TfidfVectorizer() ## we can use other vectorizer models also. # tfidf_vectorizer = HashingVectorizer() tfidf_matrix = tfidf_vectorizer.fit_transform(function_codes) similarity_matrix = cosine_similarity(tfidf_matrix) #Uncomment if you want to send two different code clonne blocks at a time for similarity comparison # similarity_matrix = cosine_similarity(tfidf_matrix, tfidf_matrix) clone_d = dict() total_clones = 0 final_report=list() #getting funtion and next function for comparison for i in range(len(similarity_matrix)): for j in range(i + 1, len(similarity_matrix)): if(similarity_matrix[i, j] >= 0.90 and similarity_matrix[i, j] <= 0.95): clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone1_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone1_path':files_info[j][0]},'cloneType':'parametricClone'}}) report_json = json.dumps(clone_d, indent = 4) total_clones=total_clones+1 elif(similarity_matrix[i, j] > 0.95): clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone_path':files_info[j][0] },'cloneType':'exactClone'}}) report_json = json.dumps(clone_d, indent = 4) final_report.append(clone_d) total_clones=total_clones+1 elif(similarity_matrix[i, j] > 0.80 and similarity_matrix[i, j] < 0.90): clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone_path':files_info[j][0] },'cloneType':'NearMissClones'}}) report_json = json.dumps(clone_d, indent = 4) final_report.append(clone_d) total_clones=total_clones+1 else: ##add other conditionas in future pass ## To get clone type clone_type = [list(item.values())[2] for item in list(clone_d.values())] report_str = json.dumps(final_report) json_l=json.loads(report_str) json_keys = list(json_l[0].keys()) json_values = list(json_l[0].values()) end_time = time.time() total_time_taken = end_time - start_time # self.log.info("ccd_report: \n"+str(ccd_report)) f_df=pd.DataFrame(list(zip(json_keys, json_values,clone_type)), columns =['Clone', 'CloneDetails','CloneType']) codeclonereport_file = os.path.join(self.ccdreportpath,'clone_detection_report_sklearn.csv') f_df.to_csv(codeclonereport_file, index=False) ccd_report = f_df.to_markdown(tablefmt='psql') self.log.info("total_clones: \n"+str(total_clones)) exact_clone_count = f_df['CloneType'].str.count("exactClone").sum() parametric_clone_count = f_df['CloneType'].str.count("parametricClone").sum() nearmiss_clone_count = f_df['CloneType'].str.count("NearMissClones").sum() total_tokens = sum(total_tokens_used) # nearmiss_clone_count =0 self.log.info("exact_clone_count: \n"+str(exact_clone_count)) self.log.info("parametric_clone_count: \n"+str(parametric_clone_count)) self.log.info("nearmiss_clone_count: \n"+str(nearmiss_clone_count)) self.log.info("Total tokens used: \n"+str(total_tokens)) self.log.info("Total time taken to excute code clone detction: \t"+str(total_time_taken)) clone_info="1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces,\ 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments and less similarity threshold (0.90-0.95), result in this clone,\ 3. Near-miss clone: Near-miss clone are clones detected with less similarity threshold." clone_count = {"Exact Clone":exact_clone_count,"Parametric Clone":parametric_clone_count,"Nearmiss Clone":nearmiss_clone_count} report_str = f"""Code_directory: {self.files_dir} Files: {total_files} Functions: {total_functions} Total_code_clones_detected: {total_clones} Tokens used: {total_tokens} Three_types_of_clone: 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments and less similarity threshold (0.90-0.95), result in this clone. 3. Near-miss clone: Near-miss clone are clones detected with less similarity threshold. Code_clones_count_by_clone_type: {clone_count} Clone_functions: {ccd_report} total_time_taken: {total_time_taken} """ codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt') with open(codeclonereport_txt, "w") as f: f.write(report_str) report_dict = {"clone_info":clone_info,"total_clones":total_clones,'total_files':total_files,"exact_clone_count":exact_clone_count,'total_functions':total_functions,"total_tokens":total_tokens, "parametric_clone_count":parametric_clone_count,"nearmiss_clone_count":nearmiss_clone_count,"result_df":f_df } self.log.info("ccd_report: \n"+str(ccd_report)) # print("report_dict:\n\n",report_dict) # end_time = time.time() # total_time = (end_time - start_time) return report_dict except Exception as e: self.log.info("Clone detection function error. error msg: "+str(e)) # import traceback # print("traceback error: \n",traceback.print_exc()) if __name__ == "__main__": print("code clone detection started....") ##Use this for standalone fn debuging.
exploratory_Analysis.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import plotly.figure_factory as ff from plotly.subplots import make_subplots import plotly.graph_objects as go from wordcloud import WordCloud, STOPWORDS import pandas as pd import numpy as np from appbe import distribution import io import urllib import os import sys import base64 from appbe import help_Text as ht import math import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from natsort import natsorted from sklearn.cluster import KMeans import json from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator from appbe.aion_config import eda_setting from dython.nominal import associations def calculateNumberofCluster(featureData): Sum_of_squared_distances = [] K = range(1, 15) for k in K: km = KMeans(n_clusters=k) km = km.fit(featureData) Sum_of_squared_distances.append(km.inertia_) x1, y1 = 1, Sum_of_squared_distances[0] x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances) - 1] distances = [] for inertia in range(len(Sum_of_squared_distances)): x0 = inertia + 2 y0 = Sum_of_squared_distances[inertia] numerator = abs((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1) denominator = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2) distances.append(numerator / denominator) n_clusters = distances.index(max(distances)) + 2 #print(n_clusters) return (n_clusters) def get_eda(request): hopkins_val = '' hopkins_tip = '' if request.session['datatype'] == 'Normal': from appbe.eda import ux_eda # EDA Subsampling changes # ---------------------------- edasampleSize = request.POST.get('SubsampleSize') edasampleSize = str(int(edasampleSize)/100) sampleFile = str(request.session['datalocation']) repText = sampleFile[sampleFile.find('sub_'):sampleFile.find('_sampled_') + 9] if len(repText) == 30: dataLocation = sampleFile.replace(repText,"") else: dataLocation = sampleFile eda_obj = ux_eda(dataLocation,request.session['delimiter'],request.session['textqualifier']) df0 = eda_obj.getdata() if os.path.isfile(dataLocation): if(len(edasampleSize) > 0): df0 = df0.sample(frac = float(edasampleSize)) #EDA Performance change # ---------------------------- dflength = len(df0) # sample_size = int(eda_setting()) # if dflength >= sample_size: # eda_obj.subsampleData(sample_size) # else: eda_obj.subsampleData(dflength) # ---------------------------- TrainSampleSelected = request.POST.get('TrainSampleSize') if(TrainSampleSelected == 'EDASize'): from pathlib import Path filePath = Path(dataLocation) import datetime timestamp = datetime.datetime.now().replace(microsecond=0).isoformat() timestamp = str(timestamp.replace(":","")) sub_sampledFile = filePath.parent/("sub_" + timestamp + "_sampled_"+filePath.name) # sub_sampledFile = filePath.parent/(usename + "_sub_sampled_"+filePath.name) df0.to_csv(sub_sampledFile,index=False,) request.session['datalocation'] = str(sub_sampledFile) records = df0.shape[0] request.session['NoOfRecords'] = records edaFeatures = request.POST.getlist('InputFeatures') request.session['edaFeatures'] = edaFeatures if(len(edaFeatures) > 0): eda_obj.subsetFeatures(edaFeatures) # ---------------------------- features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() request.session['edanumericCatFeatures'] = numericCatFeatures request.session['edatextFeature'] = textFeature categoricalfeatures = catfeatures numericfeaturecount = eda_obj.getNumericFeatureCount() cluster_details = [] dataCharts = [] # correlated_features=[] pca_details = [] if numericfeaturecount > 1: try: cluster_details,hopkins_val = eda_obj.getClusterDetails() if hopkins_val!='': if float(hopkins_val) <0.3: hopkins_tip = ht.hopkins_tip[0] elif float(hopkins_val)>0.7: hopkins_tip = ht.hopkins_tip[2] else: hopkins_tip = ht.hopkins_tip[1] else: hopkins_tip = '' except Exception as e: print("========================"+str(e)) pass try: pca_map = eda_obj.getPCATop10Features() pca_details = pca_map yaxis_data = pca_map.tolist() xaxis_data = pca_map.index.values.tolist() import plotly.graph_objects as go cfig = go.Figure() cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) cfig.update_layout(barmode='stack', xaxis_title='Features',yaxis_title='Explained Variance Ratio') bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) dataCharts.append(bargraph) except: pass df = eda_obj.getdata() # try: # top5highcorr = eda_obj.getHighlyCorrelatedFeatures(5) # correlated_features = getHighlyCorrelatedFeatureCharts(df,top5highcorr) # except: # pass else: df = eda_obj.getdata() # # EDA Subsampling changes # # ---------------------------- # if os.path.isfile(dataLocation): # if dflength < 10000: # if(len(edasampleSize) > 0): # df = df.sample(frac = float(edasampleSize)) # ---------------------------- if len(textFeature) > 0: commonfeatures = eda_obj.getTopTextFeatures(10) # comment_words = eda_obj.word_token() del eda_obj wordcloudpic = '' showtextFeature = False if len(textFeature) > 0: showtextFeature = True # try: # stopwords = set(STOPWORDS) # wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, # min_font_size=10).generate(comment_words) # try: # plt.clf() # except: # pass # plt.imshow(wordcloud, interpolation='bilinear') # plt.axis("off") # plt.tight_layout(pad=0) # image = io.BytesIO() # plt.savefig(image, format='png') # image.seek(0) # string = base64.b64encode(image.read()) # wordcloudpic = 'data:image/png;base64,' + urllib.parse.quote(string) # except: # pass xaxis_data = commonfeatures['most_common_words'].tolist() yaxis_data = commonfeatures['freq'].tolist() import plotly.graph_objects as go cfig = go.Figure() cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) cfig.update_layout(barmode='stack', xaxis_title='Features',yaxis_title='Count') bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) dataCharts.append(bargraph) df_top = df.head(10) df_json = df_top.to_json(orient="records") df_json = json.loads(df_json) # if len(df) > 10000: # df1 = df.sample(n=10000, random_state=1) # else: # df1 = df df1 = df data_deep_json = df_top.to_json(orient='records') #df1.to_json(orient='records') try: gfsg = GenericFeatureStatisticsGenerator() proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': df1}]) protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8") except Exception as e: protostr='' print('protostr '+str(e)) try: correlationgraph = getCorrelationMatrix(df) except Exception as e: print(e) try: dataDrift = 'onRequest' #getDriftDistribution(numericCatFeatures, df[numericCatFeatures]) except Exception as e: dataDrift = '' print(e) selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] statusmsg = 'Successfully Done' DF_list = list() des1 = df.describe(include='all').T des1['missing count %'] = df.isnull().mean() * 100 des1['zero count %'] = df.isin([0]).mean() * 100 data = list(df.columns.values) des1.insert(0, 'Features', data) des1 = des1.to_json(orient="records") pca_df=pd.DataFrame() #print(pca_details) # if pca_details.empty: if len(pca_details) > 0: pca_df = pd.DataFrame({'Feature':pca_details.index, 'Explained Variance Ratio':pca_details.values}).round(4) pca_df = pca_df.to_json(orient="records") if len(df.columns) > 25: df3 = df[df.columns[0:24]] else: df3 = df.copy() #cor_mat = abs(df3.corr()) #cor_mat = cor_mat.round(2) try: if len(df3.columns) > 25: df3 = df3[df3.columns[0:24]] cor_mat= associations(df3,compute_only=True) cor_mat=cor_mat['corr'] #cor_mat = df3.corr() cor_mat = cor_mat.astype(float).round(2) except Exception as e: print("creating correlation mat issue: \n",e) pass data = list(cor_mat.index) cor_mat.insert(0, 'Features', data) cor_mat = cor_mat.to_json(orient="records") cluster_df = pd.DataFrame.from_dict(cluster_details) cluster_df = cluster_df.to_json(orient="records") #textFeature = json.dumps(textFeature) # 2.2 patch changes #------------------------------------------------- request.session['edaRecords'] = df.shape[0] print(textFeature) context = {'data_deep_json': data_deep_json, 'sampleFile':sampleFile,'protostr': protostr, 'data': df_json, 'oneda': True, 'dataCharts': dataCharts,'dataDrift': dataDrift, 'drift_tip': ht.drift_tip,'des1':des1,'cluster_df':cluster_df,'hopkins_val':hopkins_val, 'pca_df':pca_df,'cor_mat':cor_mat,'correlationgraph': correlationgraph, 'centroids':cluster_details, 'wordcloudpic': wordcloudpic, 'showtextFeature': showtextFeature, 'textFeature': textFeature, # 'featurepairgraph': correlated_features, 'data_overview_tip': ht.data_overview_tip,'timeseries_analysis_tip':ht.timeseries_analysis_tip, 'feature_importance_tip': ht.feature_importance_tip,'hopkins_tip':hopkins_tip, 'correlation_analysis_tip': ht.correlation_analysis_tip, 'exploratory_analysis_tip': ht.exploratory_analysis_tip, 'data_deep_drive_tip': ht.data_deep_drive_tip,'status_msg': statusmsg,'selected_use_case': selected_use_case, 'pair_graph_tip':ht.pair_graph_tip, 'fair_metrics_tip':ht.fair_metrics_tip, 'categoricalfeatures':categoricalfeatures, 'numericCatFeatures':numericCatFeatures, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning', 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':True,'NumericFeatureList':numericFeature,'dateFeature':dateFeature,'targetFeature':targetFeature} return(context) # EDA Visualization changes # ---------------------------- def get_edaGraph(request): if request.session['datatype'] == 'Normal': from appbe.eda import ux_eda df_temp = dict(request.GET).get('features[]') graphType = request.GET.get('graphType') d3_url = request.GET.get('d3_url') mpld3_url = request.GET.get('mpld3_url') dataLocation = request.session['datalocation'] eda_obj = ux_eda(dataLocation) # 2.2 patch changes #------------------------------------------------- edaRecords = request.session['edaRecords'] #df = df.sample(n=int(edaRecords), random_state=1) eda_obj.subsampleData(edaRecords) eda_obj.subsetFeatures(df_temp) features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature, catfeatures = eda_obj.getFeatures() numericfeaturecount = eda_obj.getNumericFeatureCount() correlated_features=[] df = eda_obj.getdata() if numericfeaturecount > 1: try: if graphType == 'Default': top5highcorr = eda_obj.getHighlyCorrelatedFeatures(5) correlated_features = getHighlyCorrelatedFeatureCharts(df,top5highcorr) else: correlated_features = getFeatureCharts(df,graphType,d3_url,mpld3_url) except: pass return correlated_features # ---------------------------- # ---------------------- 12686:Data Distribution related Changes S T A R T ---------------------- def get_DataDistribution(request): selectedFeature = request.GET.get('selected_feature') _featureItem = [] _featureItem.append(selectedFeature) from appbe.eda import ux_eda dataLocation = request.session['datalocation'] eda_obj = ux_eda(dataLocation) df = eda_obj.getdata() numericCatFeatures = request.session['edanumericCatFeatures'] textFeature = request.session['edatextFeature'] # features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() dataDrift = '' if selectedFeature in numericCatFeatures: dataDrift = getDriftDistribution(_featureItem, df[numericCatFeatures]) elif selectedFeature in textFeature: try: comment_words = eda_obj.word_token_for_feature(selectedFeature, df[_featureItem]) stopwords = set(STOPWORDS) wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=10).generate(comment_words) try: plt.clf() except: pass plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.tight_layout(pad=0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) # wordcloudpic = 'data:image/png;base64,' + urllib.parse.quote(string) dataDrift = urllib.parse.quote(string) except: dataDrift = '' del eda_obj return dataDrift # -------------------------------------------- E N D -------------------------------------------- def get_DeepDiveData(request): if request.session['datatype'] == 'Normal': from appbe.eda import ux_eda dataLocation = request.session['datalocation'] eda_obj = ux_eda(dataLocation) edaRecords = request.session['edaRecords'] edaFeatures = request.session['edaFeatures'] eda_obj.subsampleData(edaRecords) eda_obj.subsetFeatures(edaFeatures) df = eda_obj.getdata() data_deep_json = df.to_json(orient='records') return (data_deep_json) # Fairness Metrics changes # ---------------------------- def get_fairmetrics(request): import mpld3 if request.session['datatype'] == 'Normal': from appbe.eda import ux_eda df_temp = dict(request.GET).get('features[]') d3_url = request.GET.get('d3_url') mpld3_url = request.GET.get('mpld3_url') global metricvalue metricvalue = request.GET.get('metricvalue') dataLocation = request.session['datalocation'] # dataLocation = 'C:\\MyFolder\\AION\\AION Datasets\\AIF360\\database.csv' eda_obj = ux_eda(dataLocation, optimize=1) features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() # data = eda_obj.getdata() data = pd.read_csv(dataLocation, na_values=['Unknown', ' ']) features_toEncode = features from sklearn.preprocessing import MinMaxScaler, LabelEncoder data_encoded = data.copy() categorical_names = {} encoders = {} # Use Label Encoder for categorical columns (including target column) for feature in features_toEncode: le = LabelEncoder() le.fit(data_encoded[feature]) data_encoded[feature] = le.transform(data_encoded[feature]) categorical_names[feature] = le.classes_ encoders[feature] = le data_perp = data_encoded protected_feature = df_temp[0] #'Victim Race' target_feature = df_temp[1] #'Perpetrator Sex' # ------Theil index----- Task->13843 from aif360.sklearn.metrics import generalized_entropy_index Ti_List = [] for items in categorical_names[protected_feature]: df = data[data[protected_feature]==items] le = LabelEncoder() le.fit(df[target_feature]) df[target_feature] = le.transform(df[target_feature]) tf = generalized_entropy_index(df[target_feature], alpha = 1) tf = round(tf, 4) Ti_List.append(tf) global Thi_idx Thi_idx = Ti_List #claas_size = categorical_names[protected_feature].size new_list = [item for item in categorical_names[protected_feature] if not(pd.isnull(item)) == True] claas_size = len(new_list) if claas_size > 10: return 'HeavyFeature' metrics = fair_metrics(categorical_names, data_perp, protected_feature, target_feature, claas_size) figure = plot_fair_metrics(metrics) html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url) return html_graph def fair_metrics(categorical_names, data_perp, protected_feature, target_feature, claas_size): import aif360 from aif360.datasets import StandardDataset from aif360.metrics import BinaryLabelDatasetMetric cols = [metricvalue] obj_fairness = [[0]] fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols) for indx in range(claas_size): priv_group = categorical_names[protected_feature][indx] privileged_class = np.where(categorical_names[protected_feature] == priv_group)[0] data_orig = StandardDataset(data_perp, label_name=target_feature, favorable_classes=[1], protected_attribute_names=[protected_feature], privileged_classes=[privileged_class]) dataset_pred = data_orig attr = dataset_pred.protected_attribute_names[0] idx = dataset_pred.protected_attribute_names.index(attr) privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] unprivileged_size = dataset_pred.unprivileged_protected_attributes[0].size unprivileged_groups = [] for idx2 in range(unprivileged_size): unprivileged_groups.extend([{attr:dataset_pred.unprivileged_protected_attributes[idx][idx2]}]) metric_pred = BinaryLabelDatasetMetric(dataset_pred, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) if metricvalue == "Theil Index": row = pd.DataFrame([Thi_idx[indx]], columns = cols , index = [priv_group]) elif metricvalue == "Disparate Impact": row = pd.DataFrame([[metric_pred.disparate_impact()]], columns = cols , index = [priv_group]) elif metricvalue == "Statistical Parity Difference": row = pd.DataFrame([[metric_pred.mean_difference()]], columns = cols , index = [priv_group]) #fair_metrics = fair_metrics.append(row) fair_metrics = pd.concat([fair_metrics,row]) return fair_metrics def plot_fair_metrics(fair_metrics): import matplotlib.patches as patches plt.style.use('default') import seaborn as sns fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1) plt.subplots_adjust( left = 0.125, bottom = 0.1, right = 0.9, top = 0.9, wspace = .5, hspace = 1.1 ) y_title_margin = 1.2 plt.suptitle("Fairness metrics", y = 1.09, fontsize=20) sns.set(style="dark") cols = fair_metrics.columns.values obj = fair_metrics.loc['objective'] if metricvalue == "Theil Index": size_rect = [0.5] rect = [-0.1] bottom = [-0.1] top = [2] bound = [[-0.1,0.1]] elif metricvalue == "Disparate Impact": size_rect = [0.4] rect = [0.8] bottom = [0] top = [2] bound = [[-0.1,0.1]] elif metricvalue == "Statistical Parity Difference": size_rect = [0.2] rect = [-0.1] bottom = [-1] top = [1] bound = [[-0.1,0.1]] #display(Markdown("### Check bias metrics :")) #display(Markdown("A model can be considered bias if just one of these five metrics show that this model is biased.")) for attr in fair_metrics.index[0:len(fair_metrics)].values: #display(Markdown("#### For the %s attribute :"%attr)) check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)] #display(Markdown("With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics"%(5 - sum(check)))) for i in range(0,1): plt.subplot(1, 1, i+1) xx = fair_metrics.index[1:len(fair_metrics)].values.tolist() yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist() palette = sns.color_palette('husl', len(xx)) ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx) index = 0 for p in zip(ax.get_xticks(), yy): if (p[1] > 2.0): _color = palette.as_hex()[index] _val = 'Outlier(' + str(round(p[1],3)) + ')' ax.text(p[0]-0.5, 0.02, _val, color=_color) else: ax.text(p[0], p[1]+0.05, round(p[1],3), color='k') index = index + 1 plt.ylim(bottom[i], top[i]) plt.setp(ax.patches, linewidth=0) ax.get_xaxis().set_visible(False) ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1) ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor="green", linewidth=1, linestyle='solid')) # plt.axhline(obj[i], color='black', alpha=0.3) plt.title(cols[i], fontname="Times New Roman", size=20,fontweight="bold") ax.set_ylabel('') ax.set_xlabel('') return fig # ---------------------------- def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()): try: import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import scipy from scipy import stats from scipy.stats import norm import matplotlib.gridspec as gridspec import math import io, base64, urllib np.seterr(divide='ignore', invalid='ignore') from appbe.eda import ux_eda eda_obj = ux_eda() try: plt.clf() except: pass plt.rcParams.update({'figure.max_open_warning': 0}) sns.set(color_codes=True) pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] if len(feature) > 4: numneroffeatures = len(feature) plt.figure(figsize=(10, numneroffeatures*2)) else: plt.figure(figsize=(10,5)) for i in enumerate(feature): dataType = dataframe[i[1]].dtypes if dataType not in pandasNumericDtypes: dataframe[i[1]] = pd.Categorical(dataframe[i[1]]) dataframe[i[1]] = dataframe[i[1]].cat.codes dataframe[i[1]] = dataframe[i[1]].astype(int) dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0]) else: dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean()) plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1) plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1) distname, sse = eda_obj.DistributionFinder(dataframe[i[1]]) try: ax = sns.distplot(dataframe[i[1]], label=distname) ax.legend(loc='best') if newdataframe.empty == False: dataType = newdataframe[i[1]].dtypes if dataType not in pandasNumericDtypes: newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]]) newdataframe[i[1]] = newdataframe[i[1]].cat.codes newdataframe[i[1]] = newdataframe[i[1]].astype(int) newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0]) else: newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean()) distname, sse = distribution.DistributionFinder(newdataframe[i[1]]) ax = sns.distplot(newdataframe[i[1]], label=distname) ax.legend(loc='best') except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) pass buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) string = base64.b64encode(buf.read()) uri = urllib.parse.quote(string) return uri except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def getCategoryWordCloud(df): labels = df.Label.unique() df_output = pd.DataFrame() tcolumns=['text'] for label in labels: df2 = df[df['Label'] == label] df2 = df2.reset_index() wordcloud,df_text = getWordCloud(df2,tcolumns) newrow = {'Label':label,'wordCloud':wordcloud} df_output = df_output.append(newrow,ignore_index=True) return(df_output) def getHighlyCorrelatedFeatureCharts(df, df_top): numOfRows = df.shape[0] cratio = 0.01 if (numOfRows < 1000): cratio = 0.2 elif (numOfRows < 10000): cratio = 0.1 elif (numOfRows < 100000): cratio = 0.01 barcolor = ["red", "green", "blue", "goldenrod", "magenta"] ffig = make_subplots(rows=2, cols=3) height = 800 rowno = 1 colno = 1 featureCharts = [] try: for index, row in df_top.iterrows(): feature1 = row['FEATURE_1'] feature2 = row['FEATURE_2'] df_temp = df[[feature1, feature2]] feature1data = df_temp[feature1] feature2data = df_temp[feature2] nUnique = len(feature1data.unique().tolist()) if nUnique / numOfRows >= cratio: feature1type = 'Continous' else: feature1type = 'Category' nUnique = len(feature2data.unique().tolist()) if nUnique / numOfRows >= cratio: feature2type = 'Continous' else: feature2type = 'Category' charttype = 0 if feature1type == 'Continous' and feature2type == 'Continous': df_temp[feature1] = pd.qcut(df_temp[feature1], q=8, duplicates='drop',precision=0) df_temp[feature1] = df_temp[feature1].astype(str).str.strip('()[]') feature1type = 'Category' xaxis = feature1 yaxis = feature2 charttype = 1 if feature1type == 'Category' and feature2type == 'Continous': xaxis = feature1 yaxis = feature2 charttype = 1 if feature1type == 'Continous' and feature2type == 'Category': xaxis = feature1 #xaxis = feature2 yaxis = feature2 #yaxis = feature1 charttype = 1 if feature1type == 'Category' and feature2type == 'Category': if (len(feature1data.unique().tolist()) < len(feature2data.unique().tolist())): xaxis = feature1 #xaxis = feature2 yaxis = feature2 #yaxis = feature1 else: xaxis = feature1 yaxis = feature2 if (len(df_temp[xaxis].unique().tolist()) > 5): df_temp[xaxis] = pd.qcut(df_temp[xaxis], q=5, duplicates='drop',precision=0) df_temp[xaxis] = df_temp[xaxis].astype(str).str.strip('()[]') if (len(df_temp[yaxis].unique().tolist()) > 5): df_temp[yaxis] = pd.qcut(df_temp[yaxis], q=3, duplicates='drop',precision=0) df_temp[yaxis] = df_temp[yaxis].astype(str).str.strip('()[]') charttype = 2 # if feature1type == 'Category' and feature2type == 'Category': if charttype == 2: uniqueclasses = df_temp[yaxis].unique().tolist() cfig = go.Figure() i = 1 for x in uniqueclasses: df_temp3 = df_temp.loc[df_temp[yaxis] == x] df_temp2 = df_temp3.groupby(xaxis, as_index=False)[yaxis].count() if df_temp2[xaxis].dtypes == "object": df_temp2 = df_temp2.set_index(xaxis).reindex( natsorted(df_temp2[xaxis].tolist(), key=lambda y: y.lower())).reset_index() xaxis_data = df_temp2[xaxis].tolist() yaxis_data = df_temp2[yaxis].tolist() cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name=x, marker_color=barcolor[i])) i = i + 1 if i == 5: break cfig.update_layout(barmode='stack', xaxis_title=xaxis, yaxis_title=yaxis) bargraph = cfig.to_html(full_html=False, default_height=450, default_width=400) featureCharts.append(bargraph) if charttype == 1: df_temp2 = df_temp.groupby(xaxis, as_index=False)[yaxis].mean() if df_temp2[xaxis].dtypes == "object": df_temp2 = df_temp2.set_index(xaxis).reindex( natsorted(df_temp2[xaxis].tolist(), key=lambda y: y.lower())).reset_index() xaxis_data = df_temp2[xaxis].tolist() yaxis_data = df_temp2[yaxis].tolist() cfig = go.Figure() cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Primary Product', marker_color='blue')) cfig.update_layout(xaxis_title=xaxis, yaxis_title=yaxis) bargraph = cfig.to_html(full_html=False, default_height=450, default_width=400) featureCharts.append(bargraph) colno += 1 if colno > 3: colno = 1 rowno += 1 except Exception as e: print(e) return (featureCharts) # EDA Visualization changes # ---------------------------- def getFeatureCharts(df, graphType, d3_url,mpld3_url): featureCharts = [] feature1 = df.columns[0] feature2 = df.columns[1] import seaborn as sns import mpld3 fig, ax = plt.subplots(figsize=[10,5]) if graphType == 'marker': df.plot(ax=ax, marker='o') # df[['age','education-num']].plot(ax=ax, marker='o') if graphType == 'area': df.plot(ax=ax, kind ="area") # df[['education-num','age']].plot(ax=ax, kind ="area") # UIprb if graphType == 'hexbin': df.plot.hexbin(ax=ax, x=feature1, y=feature2, gridsize=2) if graphType == 'boxplot': plt.boxplot(df) if graphType == 'scatter': ax.scatter(df[feature1], df[feature2]) if graphType == 'regplot': ax = sns.regplot(x= feature1, y=feature2, data= df, fit_reg = False, scatter_kws={"alpha": 0.5}) if graphType == 'lineplot': ax = sns.lineplot(x= feature1, y=feature2, data= df) if graphType == 'barplot': ax = sns.barplot(x= feature1, y=feature2, data= df) # ax = sns.barplot(x= 'age', y='fnlwgt', data= df) #Start_prb ax.legend() ax.set_xlabel(feature1) ax.set_ylabel(feature2) #print(d3_url) #print(mpld3_url) html_graph = mpld3.fig_to_html(fig,d3_url=d3_url,mpld3_url=mpld3_url) if graphType == 'kde': ax = sns.pairplot(df, kind="kde", height=4, x_vars=feature1,y_vars = feature2) # ax = sns.pairplot(df[['age','fnlwgt']], kind="kde") html_graph = mpld3.fig_to_html(ax.fig) if graphType == 'relplot': sns.set(style ="darkgrid") ax = sns.relplot(x =feature1, y =feature2, data = df) html_graph = mpld3.fig_to_html(ax.fig) featureCharts.append(html_graph) return (featureCharts) # ---------------------------- def MostCommonWords(stopwords, inputCorpus, num_of_words=10): try: from collections import Counter new = inputCorpus.str.split() new = new.values.tolist() corpus = [word for i in new for word in i if word not in stopwords] counter = Counter(corpus) most = counter.most_common() x, y = [], [] for word, count in most[: num_of_words + 1]: x.append(word) y.append(count) return pd.DataFrame([x, y], index=['most_common_words', 'freq']).T except: print("exception", sys.exc_info()) return False def removeFeature(df): featuresList = df.columns.values.tolist() modelFeatures = featuresList.copy() datetimeFeatures = [] sequenceFeatures = [] unimportantFeatures = [] featuresRatio = {} for i in featuresList: check = match_date_format(df[i]) if check == True: modelFeatures.remove(i) continue seq_check = check_seq_feature(df[i]) if seq_check == True: modelFeatures.remove(i) continue ratio = check_category(df[i]) if ratio != 0: featuresRatio[i] = ratio else: modelFeatures.remove(i) return featuresList, modelFeatures def check_category(data): total_record = len(data) nUnique = len(data.unique().tolist()) if nUnique == 1: return 0 ratio = nUnique / total_record return (ratio) def check_seq_feature(data): if data.dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: total_record = data.count() count = (data - data.shift() == 1).sum() if ((total_record - count) == 1): return True return False def match_date_format(data): data = data.astype(str) beforecheckcount = (data.count()*80)/100 #####YYYY-MM-DD HH:MM:SS#### check1 = data[data.str.match( r'(^\d\d\d\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$)') == True] aftercheckcount = check1.count() if (beforecheckcount <= aftercheckcount): return True #####MM/DD/YYYY HH:MM#### check2 = data[data.str.match( r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\d\d\d\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] aftercheckcount = check2.count() if (beforecheckcount <= aftercheckcount): return True #####DD-MM-YYYY HH:MM#### check2 = data[data.str.match( r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9]|1[0-2])-(\d\d\d\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] aftercheckcount = check2.count() if (beforecheckcount <= aftercheckcount): return True #####YYYY/MM/DD#### check2 = data[data.str.match(r'(^\d\d\d\d/(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$)') == True] aftercheckcount = check2.count() if (beforecheckcount <= aftercheckcount): return True #####MM/DD/YYYY#### check2 = data[data.str.match(r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\d\d\d\d)$)') == True] aftercheckcount = check2.count() if (beforecheckcount <= aftercheckcount): return True return False def check_text_features(df, modelFeatures): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] textFeature = [] for i in enumerate(modelFeatures): dataType = df[i[1]].dtypes numOfRows = df.shape[0] if dataType not in aionNumericDtypes: if dataType != 'bool': nUnique = len(df[i[1]].unique().tolist()) textnumbericratio = 0.01 if (numOfRows < 1000): textnumbericratio = 0.2 elif (numOfRows < 10000): textnumbericratio = 0.1 elif (numOfRows < 100000): textnumbericratio = 0.01 if nUnique / numOfRows >= textnumbericratio: textFeature.append(i[1]) return (textFeature) def getWordCloud(df, text_columns): df_text = pd.DataFrame() stopwords = set(STOPWORDS) if (len(text_columns) > 1): df_text['combined'] = df[text_columns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) features = ['combined'] else: df_text[['combined']] = df[text_columns] features = ['combined'] df_text[features[0]] = df_text[features[0]].fillna("NA") textCorpus = df_text[features[0]] from text import TextProcessing tp = TextProcessing.TextProcessing() preprocessed_text = tp.transform(textCorpus) df_text['combined'] = preprocessed_text df_text_list = df_text.values.tolist() comment_words = "" for val in df_text_list: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += " ".join(tokens) + " " wordcloud = WordCloud(stopwords=stopwords).generate(comment_words) try: plt.clf() except: pass try: plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.tight_layout(pad=0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) image_64='' return (image_64, df_text) def getTopTextFeatures(df_text): stopwords = set(STOPWORDS) commonfeatures = MostCommonWords(stopwords, df_text['combined']) xaxis_data = commonfeatures['most_common_words'].tolist() yaxis_data = commonfeatures['freq'].tolist() import plotly.graph_objects as go cfig = go.Figure() cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) cfig.update_layout(barmode='stack', xaxis_title='Features') bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) return (bargraph) def getPCATop10Features(df, modelFeatures): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorial_features = [] for i in enumerate(modelFeatures): dataType = df[i[1]].dtypes if dataType not in aionNumericDtypes: categorial_features.append(i[1]) df[i[1]] = pd.Categorical(df[i[1]]) df[i[1]] = df[i[1]].cat.codes df[i[1]] = df[i[1]].astype(int) df[i[1]] = df[i[1]].fillna(df[i[1]].mode()[0]) else: df[i[1]] = df[i[1]].fillna(df[i[1]].mean()) from sklearn.decomposition import PCA pca = PCA(n_components=2).fit(df) map = pd.DataFrame(pca.components_, columns=modelFeatures) map = map.diff(axis=0).abs() map = map.iloc[1] map = map.sort_values(ascending=False).head(10) yaxis_data = map.tolist() xaxis_data = map.index.values.tolist() import plotly.graph_objects as go cfig = go.Figure() cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance')) cfig.update_layout(barmode='stack', xaxis_title='Features') bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000) return (bargraph) def getCorrelationMatrix(df): try: #from dython.nominal import associations if len(df.columns) > 25: df3 = df[df.columns[0:24]] else: df3 = df.copy() cor_mat= associations(df3,compute_only=True) cor_mat=cor_mat['corr'] #cor_mat = df3.corr() cor_mat = cor_mat.astype(float).round(2) #print(cor_mat) z = cor_mat.values.tolist() fig = ff.create_annotated_heatmap(z, x=cor_mat.columns.tolist(), y=cor_mat.index.tolist(), annotation_text=z, colorscale='Blues') fig.layout.yaxis.automargin = True correlationgraph = fig.to_html(full_html=True, default_height=450, default_width=1000) except Exception as e: print(e) correlationgraph = '' return (correlationgraph)
publish.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from appbe.data_io import sqlite_db from os.path import expanduser import platform import pandas as pd import os from appbe.dataPath import DATA_DIR PUBLISH_PATH = os.path.join(DATA_DIR,'publish') DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite') def chech_publish_info(usecasename): version = 0 status = 'Not Published' inputDriftStatus = 'No Drift' MODEL_DEPLOY_DATABASE_PATH = os.path.join(PUBLISH_PATH,usecasename,'database') sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') if sqlite_dbObj.table_exists('publish'): data = sqlite_dbObj.read('publish',"usecase = '"+usecasename+"' and status = 'Published'") if data.shape[0] > 0: model_sqlite_dbObj = sqlite_db(MODEL_DEPLOY_DATABASE_PATH,'deploy.db') version = data['version'].iloc[0] status = 'Published' if model_sqlite_dbObj.table_exists('monitoring'): data = model_sqlite_dbObj.read('monitoring',"version = '"+str(version)+"'") if data.shape[0] > 0: msg = data['Msg'].iloc[-1] if 'Affected Columns' in msg: inputDriftStatus = 'Input Drift Found' return version,status,inputDriftStatus def check_input_data(usecasename): MODEL_DEPLOY_DATABASE_PATH = os.path.join(PUBLISH_PATH,usecasename,'database') sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') data = pd.DataFrame() if sqlite_dbObj.table_exists('publish'): dataa = sqlite_dbObj.read('publish',"usecase = '"+usecasename+"' and status = 'Published'") if dataa.shape[0] > 0: modelsqlite_dbObj = sqlite_db(MODEL_DEPLOY_DATABASE_PATH,'deploy.db') if modelsqlite_dbObj.table_exists('prodData'): data = modelsqlite_dbObj.read('prodData') return data
mlstyles.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os from os.path import expanduser import platform import json import subprocess import re import sys import pandas as pd from django.http import HttpResponse from appbe.dataPath import DATA_DIR Usecaselocation = os.path.join(DATA_DIR,'Usecases') def mlstyles(request): try: from appbe.aion_config import settings usecasetab = settings() selectid = request.GET['usecaseid'] configFile = os.path.join(Usecaselocation, 'usecases.json') f = open(configFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) #usecase = configSettingsJson['usecaselist'] desciption="" usecasename="" found = False for v_id in configSettingsJson['verticallist']: for p_id in v_id['usecaselist']: usecaseid = p_id.get('usecaseid') if str(usecaseid) == str(selectid) : usecasename = p_id.get('usecasename') desciption = p_id.get('desciption') usecaseid = p_id.get('usecaseid') iconname = p_id.get('iconname') prediction_input = p_id.get('prediction_input') outputtype = p_id.get('outputtype') smalldescription = p_id.get('smalldescription') trainingFeatures = p_id.get('trainingFeatures','None') if trainingFeatures != 'None': trainingFeatures = trainingFeatures.split(',') found = True break if found == True: break #print(usecaseid,selectid) context ={'usecasename':usecasename,'desciption':desciption,'prediction_input':prediction_input,'usecaseid':usecaseid,'trainingFeatures':trainingFeatures,'iconname':iconname,'smalldescription':smalldescription,'outputtype':outputtype,'usecasetab':usecasetab} return context except Exception as inst: print(inst) context = { 'error3':'error3','error1': "No UseCases to show"} return context def getusecasedetails(selectid): configFile = os.path.join(Usecaselocation, 'usecases.json') f = open(configFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) #usecase = configSettingsJson['usecaselist'] desciption="" usecasename="" found = False for v_id in configSettingsJson['verticallist']: for p_id in v_id['usecaselist']: usecaseid = p_id.get('usecaseid') if str(usecaseid) == str(selectid) : usecasename = p_id.get('usecasename') desciption = p_id.get('desciption') usecaseid = p_id.get('usecaseid') modelConfig = p_id.get('modelConfig') folder = p_id.get('folder') prediction = p_id.get('prediction') prediction_input = p_id.get('prediction_input') ai_modeldata = p_id.get('modeldata') outputtype = p_id.get('outputtype') smalldescription = p_id.get('smalldescription') prediction_template = p_id.get('prediction_template') trainingFeatures = p_id.get('trainingFeatures','None') if trainingFeatures != 'None': trainingFeatures = trainingFeatures.split(',') found = True break if found == True: break #print(usecasename) return(usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures) def mlpredict(request): selectid=request.POST.get('usecaseid') mlpredict =request.POST.get('mlpredict') usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures = getusecasedetails(selectid) from appbe.aion_config import settings usecasetab = settings() usecasename = usecasename desciption = desciption input='' for x in prediction_input: if input != '': input += ',' input = request.POST.get(x['name']) if mlpredict in ['prediction','predictsingle']: if mlpredict == 'prediction': dataFile = request.POST.get('predictfilePath') if(os.path.isfile(dataFile) == False) or dataFile=="": context = {'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption , 'error1': 'Please enter a valid csv filepath','usecasetab':usecasetab} return context, mlpredict else: inputFieldsDict = {} for feature in trainingFeatures: inputFieldsDict[feature] = request.POST.get(feature) dataFile = json.dumps(inputFieldsDict) try: predictionScriptPath= os.path.join(Usecaselocation,folder,'model',prediction) # predictionScriptPath = os.path.join(predictionscript, 'aion_prediction.py') outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile,input]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() predict_dict = json.loads(outputStr) #print(predict_dict) heading ='' timetaken='' print(predict_dict) if (predict_dict['status'] == 'SUCCESS'): predictionResults = predict_dict['data'] #print(predictionResults) if 'heading' in predict_dict: heading = predict_dict['heading'] if 'Time' in predict_dict: timetaken = round(predict_dict['Time'],2) if outputtype.lower() in ['similarityidentification','contextualsearch']: data = predictionResults[0] predictionResults= [] Results={} prediction = data['prediction'] i = 1 for x in prediction: te = '' for y in x: info = (str(x[y])[:100] + '...') if len(str(x[y])) > 100 else str(x[y]) te += y+': '+info+'\n\n' Results[i] = te i = i+1 predictionResults.append(Results) else: context = {'usecaseid':selectid ,'dataFile':dataFile,'prediction_input':prediction_input,'usecasename':usecasename,'desciption':desciption , 'error': 'Failed To perform prediction','usecasetab':usecasetab} return context, mlpredict print(heading) context = {'usecasename':usecasename,'desciption':desciption,'prediction_input':prediction_input,'usecaseid':selectid ,'dataFile':dataFile,'predictionResults': predictionResults,'outputtype':outputtype,'heading':heading,'timetaken':timetaken,'usecasetab':usecasetab,'trainingFeatures':trainingFeatures} return context, mlpredict except Exception as inst: print(inst) context = { 'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'errorp': 'Failed To perform prediction','usecasetab':usecasetab} return context, mlpredict if mlpredict == 'download_predict': # predictionResults = 'C:\\DataSets\\Classification\\bug_severity_class.csv' try: csvdata= os.path.join(Usecaselocation,folder,'Data',prediction_template) if os.path.isfile(csvdata) and os.path.exists(csvdata): df = pd.read_csv(csvdata,encoding='utf8',encoding_errors= 'replace') downloadFileName = usecasename.replace(" ", "_") + '_predict.csv' response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename='+downloadFileName df.to_csv(response, index=False) return response,mlpredict else: context = {'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'error': 'File not found','usecasetab':usecasetab} return context, mlpredict except Exception as inst: context = { 'usecaseid':selectid ,'usecasename':usecasename,'desciption':desciption, 'error3':'error3','error1': 'Failed To Download','usecasetab':usecasetab} return context, mltrain def process(data): cleaned_data = {"verticallist":[]} for vertical in data['verticallist']: updated_list = [] for usecase in vertical['usecaselist']: if usecase['prediction'] and usecase['prediction'] != "Not Implemented": updated_list.append(usecase) if updated_list: cleaned_data['verticallist'].append({'id':vertical['id'],'name':vertical['name'],'usecaselist':updated_list}) return cleaned_data def Aiusecases(request,selectedoption='Implemented'): try: from appbe.aion_config import settings usecasetab = settings() configFile = os.path.join(Usecaselocation, 'usecases.json') f = open(configFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) if selectedoption == 'Implemented': configSettingsJson = process(configSettingsJson) usecasedetails = configSettingsJson['verticallist'] context ={'desciption1':usecasedetails,'selected':'AIusecases','usecasetab':usecasetab} return context except Exception as e: print(e) context ={'error':"No Usecases to Show",'selected':'AIusecases','usecasetab':usecasetab} return context def mltrain(request): from appbe.aion_config import settings usecasetab = settings() selectid =request.POST.get('usecaseid1') mltrain =request.POST.get('mltrain') usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures = getusecasedetails(selectid) usecasename = usecasename desciption = desciption if mltrain == 'training': dataFile = request.POST.get('trainfilePath') if(os.path.isfile(dataFile) == False) or dataFile=="": context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'error3':'error3','error1': 'Please enter a valid csv filepath'} return context, mltrain try: scriptPath = os.path.join(Usecaselocation,folder,'config','aion_train.py') print(scriptPath,dataFile) outputStr = subprocess.check_output([sys.executable, scriptPath, dataFile]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() train = json.loads(outputStr) status = train['status'] DeployLocation = train['data']['deployLocation'] ModelType = train['data']['ModelType'] BestModel = train['data']['BestModel'] BestScore = train['data']['BestScore'] ScoreType = train['data']['ScoreType'] FeaturesUsed = train['data']['featuresused'] context={'result':train,'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption,'status':status,'DeployLocation':DeployLocation,'ModelType':ModelType,'BestModel':BestModel,'BestScore':BestScore,'ScoreType':ScoreType,'FeaturesUsed':FeaturesUsed,'result':'result','usecasetab':usecasetab} return context,mltrain except Exception as inst: context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'errort': 'Failed To perform Training','usecasetab':usecasetab} return context, mltrain if mltrain == 'download_train': try: csvdata= os.path.join(Usecaselocation,folder,'data',ai_modeldata) #print(csvdata) if os.path.isfile(csvdata) and os.path.exists(csvdata): df = pd.read_csv(csvdata,encoding='utf8',encoding_errors= 'replace') downloadFileName = usecasename.replace(" ", "_") + '_training.csv' response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename='+downloadFileName df.to_csv(response, index=False) return response,mltrain else: context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'error': 'File not found','usecasetab':usecasetab} return context, mltrain except Exception as inst: context = { 'usecaseid':selectid ,'usecasename':usecasename,'desciption':desciption, 'error3':'error3','error1': 'Failed To Download','usecasetab':usecasetab} return context, mltrain
log_ut.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time from pathlib import Path import logging from datetime import datetime as dt class logg(): from appbe.dataPath import LOG_LOCATION def __init__(self, LOG_LOCATION): self.log_location = LOG_LOCATION def create_log(self,version): log_file_path = Path(self.log_location) log_file_path.mkdir(parents=True, exist_ok=True) time_stamp = dt.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S') fileName='log_ux_'+time_stamp+'.log' filehandler = logging.FileHandler(log_file_path/fileName, 'a','utf-8') formatter = logging.Formatter('%(asctime)s %(message)s') filehandler.setFormatter(formatter) log = logging.getLogger('log_ux') log.propagate = False for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) log.addHandler(filehandler) log.setLevel(logging.INFO) log.info('********** AION_'+str(version)+' **********') return log
hopkinsStat.py
from typing import Union import numpy as np import pandas as pd from sklearn.neighbors import BallTree def hopkins(data_frame: Union[np.ndarray, pd.DataFrame], sampling_size: int) -> float: if type(data_frame) == np.ndarray: data_frame = pd.DataFrame(data_frame) data_frame_sample = sample_observation_from_dataset(data_frame, sampling_size) sample_distances_to_nearest_neighbours = get_distance_sample_to_nearest_neighbours( data_frame, data_frame_sample ) uniformly_selected_observations_df = simulate_df_with_same_variation( data_frame, sampling_size ) df_distances_to_nearest_neighbours = get_nearest_sample( data_frame, uniformly_selected_observations_df ) x = sum(sample_distances_to_nearest_neighbours) y = sum(df_distances_to_nearest_neighbours) if x + y == 0: raise Exception("The denominator of the hopkins statistics is null") return x / (x + y)[0] def get_nearest_sample(df: pd.DataFrame, uniformly_selected_observations: pd.DataFrame): tree = BallTree(df, leaf_size=2) dist, _ = tree.query(uniformly_selected_observations, k=1) uniformly_df_distances_to_nearest_neighbours = dist return uniformly_df_distances_to_nearest_neighbours def simulate_df_with_same_variation( df: pd.DataFrame, sampling_size: int ) -> pd.DataFrame: max_data_frame = df.max() min_data_frame = df.min() uniformly_selected_values_0 = np.random.uniform( min_data_frame[0], max_data_frame[0], sampling_size ) uniformly_selected_values_1 = np.random.uniform( min_data_frame[1], max_data_frame[1], sampling_size ) uniformly_selected_observations = np.column_stack( (uniformly_selected_values_0, uniformly_selected_values_1) ) if len(max_data_frame) >= 2: for i in range(2, len(max_data_frame)): uniformly_selected_values_i = np.random.uniform( min_data_frame[i], max_data_frame[i], sampling_size ) to_stack = (uniformly_selected_observations, uniformly_selected_values_i) uniformly_selected_observations = np.column_stack(to_stack) uniformly_selected_observations_df = pd.DataFrame(uniformly_selected_observations) return uniformly_selected_observations_df def get_distance_sample_to_nearest_neighbours(df: pd.DataFrame, data_frame_sample): tree = BallTree(df, leaf_size=2) dist, _ = tree.query(data_frame_sample, k=2) data_frame_sample_distances_to_nearest_neighbours = dist[:, 1] return data_frame_sample_distances_to_nearest_neighbours def sample_observation_from_dataset(df, sampling_size: int): if sampling_size > df.shape[0]: raise Exception("The number of sample of sample is bigger than the shape of D") data_frame_sample = df.sample(n=sampling_size) return data_frame_sample
basic_Config.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from appbe import exploratory_Analysis as ea import pandas as pd from appbe.checkConfiguration import start_check import json import os import ast import time import numpy as np from appfe.modelTraining.models import usecasedetails from appfe.modelTraining.models import Existusecases # from modelTraining.models import view from appbe.aion_config import kafka_setting from appbe.aion_config import running_setting from appbe.s3buckets import get_s3_bucket from appbe.gcsbuckets import get_gcs_bucket from appbe import help_Text as ht def is_value_na( value): if isinstance( value, str): return value.strip().lower() in ['','na','none'] return not value def set_ts_preprocessing(request,configSettingsJson): #Task 13052 Timeseries Preprocessing interpolationType = request.POST.get('interpolationType') ts_config = configSettingsJson['basic']['preprocessing']['timeSeriesForecasting'] for key in ts_config['interpolation']: configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['interpolation'][ key] = 'False' if interpolationType != 'na': configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['interpolation'][ interpolationType] = 'True' ts_config['rollingWindow'] = request.POST.get('rollingWindow') if ts_config['rollingWindow'] == 'True': ts_config['rollingWindowSize'] = request.POST.get('rollWindowsize') aggregation = request.POST.get('aaggregationType') for key in ts_config['aggregation']['type']: ts_config['aggregation']['type'][key]='False' if is_value_na(aggregation) == False: ts_config['aggregation']['type'][aggregation] = 'True' granularityType = request.POST.get('unitType') granularitySize = request.POST.get('garnularitysize') for key in ts_config['aggregation']['granularity']['unit']: ts_config['aggregation']['granularity']['unit'][key] = 'False' ts_config['aggregation']['granularity']['unit'][granularityType]='True' ts_config['aggregation']['granularity']['size'] = granularitySize configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']= ts_config return configSettingsJson def update_granularity(configSettingsJson,datapath=None): try: from AION.appbe.utils import set_true_option import pandas as pd from pathlib import Path MINUTES = 60 if not is_value_na(configSettingsJson['basic']['dateTimeFeature']): if not datapath: datapath = configSettingsJson['basic']['dataLocation'] if Path( datapath).exists(): df = pd.read_csv(datapath, nrows=2) if isinstance( configSettingsJson['basic']['dateTimeFeature'], list): datetime_feature = configSettingsJson['basic']['dateTimeFeature'][0] else: datetime_feature = configSettingsJson['basic']['dateTimeFeature'] datetime = pd.to_datetime(df[ datetime_feature]) if len(datetime) > 1: time_delta = (datetime[1] - datetime[0]).total_seconds() granularity_unit = configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['unit'] if time_delta < (1 * MINUTES): set_true_option(granularity_unit, key='second') elif time_delta < (60 * MINUTES): set_true_option(granularity_unit, key='minute') elif time_delta < (24 * 60 * MINUTES): set_true_option(granularity_unit, key='hour') elif time_delta < (7 * 24 * 60 * MINUTES): set_true_option(granularity_unit, key='day') elif time_delta < (30 * 24 * 60 * MINUTES): set_true_option(granularity_unit, key='week') elif time_delta < (365 * 24 * 60 * MINUTES): set_true_option(granularity_unit, key='month') else: set_true_option(granularity_unit, key='year') return configSettingsJson except Exception as e: print(f'\nIgnoring error during granularity unit conversion\n:{str(e)}') return configSettingsJson def save(request): try: status = 'pass' msg = "" DEPLOY_LOCATION = request.session['deploylocation'] if request.method == 'POST': submittype = request.POST.get('BasicSubmit') if submittype != 'BasicDefault': filterjson = 'NA' timegroupingjson = 'NA' groupingjson = 'NA' if request.POST.get('filters') != '': filterjson = str(json.loads(request.POST.get('filters'))) if request.POST.get('timegroup') != '': timegroupingjson = str(json.loads(request.POST.get('timegroup'))) if request.POST.get('idgroup') != '': groupingjson = str(json.loads(request.POST.get('idgroup'))) configFile = request.session['config_json'] f = open(configFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) temp = {} # Retraing settings changes # -------- S T A R T -------- prbType = request.POST.get('ProblemType') if prbType is None: prbType = request.POST.get('tempProblemType') # temp['ProblemType'] = request.POST.get('ProblemType') # request.session['Problem'] = request.POST.get('ProblemType') temp['ProblemType'] = prbType request.session['Problem'] = request.POST.get('ProblemType') # --------------------------- temp['ModelName'] = request.session['usecaseid'] temp['Version'] = str(request.session['ModelVersion']) temp['InputFeatures'] = request.POST.getlist('IncInputFeatures') temp['dataLocation'] = str(request.session['datalocation']) onlinelearning=request.POST.get('onlineLearning',None) if (onlinelearning is not None): if onlinelearning.lower() == 'onlinelearning': configSettingsJson['basic']['onlineLearning'] = 'True' if onlinelearning.lower() == 'distributedlearning': configSettingsJson['basic']['distributedLearning'] = 'True' temp['InputFeatures'] = request.POST.getlist('IncInputFeatures') temp['TargetFeatures'] = request.POST.getlist('TargetFeatures') temp['DateTimeFeatures'] = '' temp['IndexFeatures'] = '' for x in configSettingsJson['advance']['profiler']['normalization'].keys(): configSettingsJson['advance']['profiler']['normalization'][x] = 'False' configSettingsJson['advance']['profiler']['normalization']['standardScaler'] = 'True' for x in configSettingsJson['advance']['profiler']['numericalFillMethod'].keys(): configSettingsJson['advance']['profiler']['numericalFillMethod'][x] = 'False' configSettingsJson['advance']['profiler']['numericalFillMethod']['Mean'] = 'True' if onlinelearning.lower() == 'distributedlearning': for x in configSettingsJson['advance']['profiler']['categoricalFillMethod'].keys(): configSettingsJson['advance']['profiler']['categoricalFillMethod'][x] = 'False' configSettingsJson['advance']['profiler']['categoricalFillMethod']['MostFrequent'] = 'True' for x in configSettingsJson['advance']['profiler']['categoryEncoding'].keys(): configSettingsJson['advance']['profiler']['categoryEncoding'][x] = 'False' configSettingsJson['advance']['profiler']['categoryEncoding']['OneHotEncoding'] = 'True' configSettingsJson['advance']['profiler']['normalization']['standardScaler'] = 'False' for x in configSettingsJson['advance']['selector']['featureEngineering'].keys(): if x != 'numberofComponents': configSettingsJson['advance']['selector']['featureEngineering'][x] = 'False' elif prbType == 'llmFineTuning': if configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'False': temp['InputFeatures'] = request.POST.getlist('IncInputFeatures') temp['TargetFeatures'] = request.POST.getlist('TargetFeatures') contextFeatures = request.POST.getlist('contextFeatures') configSettingsJson['basic']['contextFeature'] = ",".join([model for model in contextFeatures]) temp['DateTimeFeatures'] = '' temp['IndexFeatures'] = '' if request.POST.get('promptfriendlyname') != '': configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt'] = request.POST.get('promptfriendlyname') else: configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt'] = 'Instruction' if request.POST.get('responsefriendlyname') != '': configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response'] = request.POST.get('responsefriendlyname') else: configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response'] = '' else: if request.session['datatype'] == 'LLM_Document': for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'].keys(): configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'][x] = 'False' configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'][request.POST.get('dataPreprocessing')] = 'True' if request.session['datatype'] == 'LLM_Code': for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'].keys(): configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'][x] = 'False' configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'][request.POST.get('llmObjective')] = 'True' for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'].keys(): configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'][x] = 'False' configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'][request.POST.get('dataPreprocessing')] = 'True' else: configSettingsJson['basic']['onlineLearning'] = 'False' configSettingsJson['basic']['distributedLearning'] = 'False' temp['InputFeatures'] = request.POST.getlist('InputFeatures') temp['TargetFeatures'] = request.POST.getlist('TargetFeatures') temp['DateTimeFeatures'] = request.POST.getlist('DateTimeFeatures') temp['IndexFeatures'] = request.POST.getlist('IndexFeatures') if (configSettingsJson['basic']['algorithms']['timeSeriesAnomalyDetection']['AutoEncoder'] == 'True'):#task 11997 if (request.POST.get('analysis') == 'MultiVariate'): configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'True' #task 11997 configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'False' #task 11997 else: #print(configSettingsJson) configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'True' configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'False' #task 11997 temp['UserID'] = '' temp['ItemID'] = '' temp['rating'] = '' temp['secondDocFeature'] = '' temp['firstDocFeature'] = '' temp['invoiceNoFeature'] = '' temp['itemFeature'] = '' model = '' if temp['ProblemType'].lower() == 'recommendersystem': model = request.POST.get('MachineLearningModels') if model == 'ItemRating': temp['ProblemType'] = 'RecommenderSystem' temp['MachineLearningModels'] = ['ItemRating'] temp['DeepLearningModels'] = '' temp['UserID'] = request.POST.get('UserID') temp['ItemID'] = request.POST.get('ItemID') temp['rating'] = request.POST.get('rating') temp['InputFeatures'] = [] temp['InputFeatures'].append(temp['UserID']) temp['InputFeatures'].append(temp['ItemID']) temp['InputFeatures'].append(temp['rating']) if model == 'TextSimilarity-Siamese': temp['ProblemType'] = 'recommenderSystem' temp['MachineLearningModels'] = ['TextSimilarity-Siamese'] temp['secondDocFeature'] = request.POST.get('secondDocFeature') temp['firstDocFeature'] = request.POST.get('firstDocFeature') temp['InputFeatures'] = [] temp['InputFeatures'].append(temp['secondDocFeature']) temp['InputFeatures'].append(temp['firstDocFeature']) if model == 'AssociationRules-Apriori': temp['ProblemType'] = 'recommenderSystem' temp['DeepLearningModels'] = '' temp['MachineLearningModels'] = ['AssociationRules-Apriori'] temp['invoiceNoFeature'] = request.POST.get('associationRuleInvoiceNo') temp['itemFeature'] = request.POST.get('associationRuleItem') temp['InputFeatures'] = [] temp['InputFeatures'].append(temp['invoiceNoFeature']) temp['InputFeatures'].append(temp['itemFeature']) temp['ScoringCriteria'] = request.POST.get('ScoringCriteria') if temp['ProblemType'].lower() not in ['recommendersystem','textsimilarity','associationrules','llmfinetuning']: temp['MachineLearningModels'] = request.POST.getlist('MachineLearningModels') temp['DeepLearningModels'] = request.POST.getlist('SelectDeepLearningModels') elif temp['ProblemType'].lower() == 'llmfinetuning': temp['MachineLearningModels'] = request.POST.getlist('MachineLearningModels') model = temp['MachineLearningModels'][0] supportedModelsSize = configSettingsJson['basic']['modelSize'][temp['ProblemType']][model] selectedModelSize = request.POST.get('modelSize') for x in supportedModelsSize.keys(): configSettingsJson['basic']['modelSize'][temp['ProblemType']][model][x] = 'False' configSettingsJson['basic']['modelSize'][temp['ProblemType']][model][selectedModelSize] = 'True' temp['noofforecasts'] = request.POST.get('noofforecasts') temp['inlierLabels'] = request.POST.get('inlierLabels') #temp['filterExpression'] = request.POST.get('filterExpression') if temp['ProblemType'].lower() in ['clustering','topicmodelling','similarityidentification','contextualsearch']: temp['TargetFeatures'] = '' configSettingsJson['basic']['modelName'] = temp['ModelName'] configSettingsJson['basic']['modelVersion'] = temp['Version'] configSettingsJson['basic']['dataLocation'] = str(temp['dataLocation']) configSettingsJson['basic']['deployLocation'] = DEPLOY_LOCATION if configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'False': configSettingsJson['basic']['trainingFeatures'] = ",".join([model for model in temp['InputFeatures']]) configSettingsJson['basic']['dateTimeFeature'] = ",".join([model for model in temp['DateTimeFeatures']]) configSettingsJson['basic']['targetFeature'] = ",".join([model for model in temp['TargetFeatures']]) configSettingsJson['basic']['indexFeature'] = ",".join([model for model in temp['IndexFeatures']]) if filterjson == 'NA': configSettingsJson['basic']['filter'] = 'NA' else: configSettingsJson['basic']['filter'] = eval(filterjson) if timegroupingjson == 'NA': configSettingsJson['basic']['timegrouper'] = 'NA' else: configSettingsJson['basic']['timegrouper'] = eval(timegroupingjson) if groupingjson == 'NA': configSettingsJson['basic']['group'] = 'NA' else: configSettingsJson['basic']['group'] = eval(groupingjson) problemtyp = configSettingsJson['basic']['analysisType'] for i in list(problemtyp.keys()): configSettingsJson['basic']['analysisType'][i]='False' algorithm = configSettingsJson['basic']['algorithms'] for i in list(algorithm.keys()): for x in list(configSettingsJson['basic']['algorithms'][i].keys()): if x not in ['textSimilarityConfig','itemRatingConfig','associationRulesConfig','textSummarization']: configSettingsJson['basic']['algorithms'][i][x] = 'False' configSettingsJson['basic']['analysisType'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]] = 'True' # configSettingsJson['basic']['problem_type'] = temp['ProblemType'] scoring = configSettingsJson['basic']['scoringCriteria'] for i in list(scoring.keys()): for x in list(configSettingsJson['basic']['scoringCriteria'][i].keys()): configSettingsJson['basic']['scoringCriteria'][i][x] = 'False' if temp['ProblemType'].lower() in ["classification","regression","survivalanalysis","similarityidentification","timeseriesforecasting","contextualsearch"]: #task 11997 configSettingsJson['basic']['scoringCriteria'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][temp['ScoringCriteria']] = 'True' # configSettingsJson['basic']['problem_type'] = temp['ProblemType'] # configSettingsJson['basic']['scoringCriteria'] = temp['ScoringCriteria'] configSettingsJson['basic']['noofforecasts'] = temp['noofforecasts'] configSettingsJson['basic']['inlierLabels'] = temp['inlierLabels'] #configSettingsJson['basic']['filterExpression'] = temp['filterExpression'] configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['userID'] = temp['UserID'] configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['itemID'] = temp['ItemID'] configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['rating'] = temp['rating'] configSettingsJson['basic']['algorithms']['recommenderSystem']['textSimilarityConfig']['baseFeature'] = temp['firstDocFeature'] configSettingsJson['basic']['algorithms']['recommenderSystem']['textSimilarityConfig']['comparisonFeature'] = temp['secondDocFeature'] configSettingsJson['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] = temp['invoiceNoFeature'] configSettingsJson['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'] = temp['itemFeature'] for x in temp['MachineLearningModels']: if temp['ProblemType'].lower() =='associationrules' or temp['ProblemType'].lower() == 'textsimilarity': temp['ProblemType'] = 'recommenderSystem' if request.POST.get('SearchType') != 'NAS' and request.POST.get('SearchType') != 'GoogleModelSearch'and request.POST.get('SearchType') != 'AutoGluon': configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][x] = 'True' #for y in temp['DeepLearningModels']: # configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][y] = 'True' configSettingsJson['basic']['output']['profilerStage'] = 'True' configSettingsJson['basic']['output']['selectorStage'] = 'True' for key in configSettingsJson['advance']['profiler']['textConversionMethod']: configSettingsJson['advance']['profiler']['textConversionMethod'][key] = 'False' if temp['ProblemType'].lower() != 'topicmodelling': configSettingsJson['advance']['profiler']['textConversionMethod']['TF_IDF'] ='True' else: configSettingsJson['advance']['profiler']['textConversionMethod']['CountVectors'] ='True' #print('============================') #print(temp['ProblemType'].lower()) #print('============================') if temp['ProblemType'].lower() == 'textsummarization': configSettingsJson['basic']['algorithms']['textSummarization']['Text Summarization'] = 'True' configSettingsJson['basic']['textSummarization']['KeyWords'] = str(request.POST.get('addKeywordsForSummarization')) configSettingsJson['basic']['textSummarization']['pathForKeywordFile'] = str(request.POST.get('DataFilePath')) if temp['ProblemType'].lower() not in ['recommendersystem','textsummarization','llmfinetuning']: if configSettingsJson['basic']['onlineLearning'] != 'True' and configSettingsJson['basic']['distributedLearning'] != 'True': jsonarr =request.POST.get('jsonarr') res = ast.literal_eval(jsonarr) for x in res: if x['type'].lower() == 'text': configSettingsJson['advance']['selector']['featureSelection']['allFeatures'] = 'False' configSettingsJson['advance']['selector']['featureSelection']['statisticalBased'] = 'True' configSettingsJson['advance']['selector']['featureSelection']['modelBased'] = 'False' if len(request.POST.get('traindfeatures').split(',')) > 30: configSettingsJson['advance']['selector']['featureSelection']['allFeatures'] = 'False' configSettingsJson['advance']['selector']['featureSelection']['statisticalBased'] = 'True' configSettingsJson['advance']['selector']['featureSelection']['modelBased'] = 'False' configSettingsJson['advance']['profiler']['featureDict'] = res configSettingsJson['basic']['indexFeature'] = request.POST.get('indexfeatures') configSettingsJson['basic']['trainingFeatures'] = request.POST.get('traindfeatures') configSettingsJson['basic']['dateTimeFeature'] = request.POST.get('datefeatures') if request.POST.get('SearchType') == 'GoogleModelSearch': configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]]['GoogleModelSearch_DNN'] = 'True' configSettingsJson['basic']['output']['profilerStage']= 'True' #---------- Time series Changes Task 13052 ----------------- if temp['ProblemType'].lower() == 'timeseriesforecasting': configSettingsJson = set_ts_preprocessing(request,configSettingsJson) status,msg= start_check(configSettingsJson) updatedConfigSettings = json.dumps(configSettingsJson) updatedConfigFile = request.session['config_json'] with open(updatedConfigFile, "w") as fpWrite: fpWrite.write(updatedConfigSettings) fpWrite.close() request.session['ModelStatus'] = 'Not Trained' selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] request.session['currentstate'] = 1 from appbe.telemetry import UpdateTelemetry UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'ProblemType',prbType) UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Configured') context = {'tab': 'configure', 'temp': temp,'advconfig': configSettingsJson, 'basic_status_msg': 'Configuration Done', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'], 'selected': 'modeltraning','training':True,'basic_help':ht.basic_help} # return render(request, 'basicconfig.html', context) if submittype == 'BasicDefault': temp = {} temp['ModelName'] = request.session['UseCaseName'] temp['Version'] = request.session['ModelVersion'] dataLocation = str(request.session['datalocation']) df = pd.read_csv(dataLocation, encoding='latin1') featuresList = df.columns.values.tolist() datetimeFeatures = [] sequenceFeatures = [] unimportantFeatures = [] featuresRatio = {} for i in featuresList: check = ea.match_date_format(df[i]) if check == True: datetimeFeatures.append(i) unimportantFeatures.append(i) seq_check = ea.check_seq_feature(df[i]) if seq_check == True: sequenceFeatures.append(i) unimportantFeatures.append(i) ratio = ea.check_category(df[i]) if ratio != 0: featuresRatio[i] = ratio else: unimportantFeatures.append(i) targetFeature = min(featuresRatio, key=featuresRatio.get) unimportantFeatures.append(targetFeature) config = {} config['modelName'] = request.session['UseCaseName'] config['modelVersion'] = request.session['ModelVersion'] config['datetimeFeatures'] = datetimeFeatures config['sequenceFeatures'] = sequenceFeatures config['FeaturesList'] = featuresList config['unimportantFeatures'] = unimportantFeatures config['targetFeature'] = targetFeature request.session['currentstate'] = 1 context = {'tab': 'configure', 'temp': temp, 'config': config, 'currentstate': request.session['currentstate'], 'selected': 'modeltraning'} except Exception as e: print(e) import sys exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return status,msg,context def openbasicconf(request): # 10012:Decision Threshold related Changes data_is_under_RAM_threshold = True updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r+") configSettingsData = f.read() configSettingsJson = json.loads(configSettingsData) temp = {} # temp['ModelName'] = request.session['UseCaseName'] # temp['Version'] = request.session['ModelVersion'] if request.session['datatype'] == 'Video' or request.session['datatype'] == 'Image' or request.session['datatype'] == 'Document': folderLocation = str(request.session['datalocation']) dataFile = os.path.join(folderLocation, request.session['csvfullpath']) else: dataFile = str(request.session['datalocation']) # -------------------------------- 10012:Decision Threshold related Changes S T A R T ------------------------------- from appbe.dataIngestion import checkRAMThreshold data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation']) # ------------------------------------------------------ E N D ------------------------------------------------------ # Retraing settings changes # -------- S T A R T -------- IsReTrainingCase = False if request.session['IsRetraining'] == 'Yes': IsReTrainingCase = True IsSameFeatures = True # --------------------------- featuresList = configSettingsJson['basic']['featureList'] unimportantFeatures = [] modelfeatures = configSettingsJson['basic']['trainingFeatures'] for x in featuresList: if x not in modelfeatures: unimportantFeatures.append(x) config = {} config['ModelName'] = request.session['usecaseid'] config['Version'] = request.session['ModelVersion'] config['datetimeFeatures'] = configSettingsJson['basic']['dateTimeFeature'] # .split(",") if configSettingsJson['basic']['indexFeature']: config['sequenceFeatures'] = configSettingsJson['basic']['indexFeature'] # .split(",") config['FeaturesList'] = featuresList config['unimportantFeatures'] = unimportantFeatures config['targetFeature'] = configSettingsJson['basic']['targetFeature'].split(",") problemtypes = configSettingsJson['basic']['analysisType'] onlineLearning = configSettingsJson['basic']['onlineLearning'] problem_type = "" for k in problemtypes.keys(): if configSettingsJson['basic']['analysisType'][k] == 'True': problem_type = k break #print('123',problem_type) config['ProblemType'] = problem_type # config['ProblemType'] = configSettingsJson['basic']['problem_type'] scoring = configSettingsJson['basic']['scoringCriteria'] scoringCriteria = "" for k in scoring.keys(): if configSettingsJson['basic']['scoringCriteria'][k] == 'True': scoringCriteria = k break config['ScoringCriteria'] = scoringCriteria # config['ProblemType'] = configSettingsJson['basic']['problem_type'] # config['ScoringCriteria'] = configSettingsJson['basic']['scoringCriteria'] selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] if 'NoOfRecords' in request.session: records = request.session['NoOfRecords'] else: records = 'NA' if request.session['finalstate'] <= 1: request.session['finalstate'] = 1 request.session['currentstate'] = 1 # dataFile = str(request.session['datalocation']) # df = pd.read_csv(dataFile,encoding='utf8') if 'NoOfRecords' in request.session: noofforecast = 20 else: noofforecast = 20 config['noofforecasts'] = noofforecast if 'numericFeature' in request.session: numericFeature = request.session['numericFeature'] else: numericFeature = '' problemType = 'classification' for key in configSettingsJson['basic']['analysisType']: if configSettingsJson['basic']['analysisType'][key] == 'True': problemType = key break scoringCreteria = 'NA' if problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997 for key in configSettingsJson['basic']['scoringCriteria'][problemType]: if configSettingsJson['basic']['scoringCriteria'][problemType][key] == 'True': scoringCreteria = key break selectAlgo = "" if problemType in ['classification','regression','timeSeriesForecasting', 'timeSeriesAnomalyDetection', 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition','llmFineTuning']: #task 11997 for key in configSettingsJson['basic']['algorithms'][problemType]: if configSettingsJson['basic']['algorithms'][problemType][key] == 'True': if selectAlgo != "": selectAlgo += ',' selectAlgo += key modelSize = '' if problemType == 'llmFineTuning': for key in configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo].keys(): if configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo][key] == 'True': modelSize = key break featuresdict = [feature['feature'] for feature in configSettingsJson['advance']['profiler']['featureDict']] context = {'tab': 'tabconfigure','modelSize':modelSize,'featuresdict':featuresdict, 'configsettings': configSettingsJson, 'temp': temp, 'config': config,'numericFeature':numericFeature,'onlineLearning':onlineLearning, 'noOfRecords': records, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'problemType':problemType,'scoringCreteria':scoringCreteria,'selectAlgo':selectAlgo, 'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], 'selected': 'modeltraning','IsSameFeatures':IsSameFeatures,'IsReTrainingCase':IsReTrainingCase,'basic_help':ht.basic_help # 10012:Decision Threshold related changes , 'DLCheckpoint':data_is_under_RAM_threshold} return context def gotoconf(request): selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] try: # 10012:Decision Threshold related Changes data_is_under_RAM_threshold = True ModelName = usecasedetails.objects.get(id=request.session['ModelName']) Version = request.session['ModelVersion'] import os if request.session['datatype'] in ['Video', 'Image','Document','Object']: folderLocation = str(request.session['datalocation']) dataFile = os.path.join(folderLocation, request.session['csvfullpath']) else: dataFile = str(request.session['datalocation']) # -------------------------------- 10012:Decision Threshold related Changes S T A R T ------------------------------- from appbe.dataIngestion import checkRAMThreshold data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation']) # ------------------------------------------------------ E N D ------------------------------------------------------ if request.session['datatype'] not in ['LLM_Document','LLM_Code']: from appbe.eda import ux_eda if 'delimiter' not in request.session: request.session['delimiter'] = ',' if 'textqualifier' not in request.session: request.session['textqualifier'] = '"' eda_obj = ux_eda(dataFile,request.session['delimiter'],request.session['textqualifier'],optimize=1) featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeatures = eda_obj.getFeatures() else: featuresList = [] featuresList.append('Instruction') datetimeFeatures=[] sequenceFeatures=[] constantFeature=[] textFeature=[] targetFeature='Response' numericCatFeatures = [] numericFeature=[] catFeatures=[] featuresListJson = [] for x in featuresList: featureOperation={} featureOperation['feature'] = x if x in datetimeFeatures: featureOperation['type'] = 'date' featureOperation['fillMethod'] = 'na' featureOperation['categoryEncoding'] = 'na' elif x in textFeature: featureOperation['type'] = 'text' featureOperation['fillMethod'] = 'na' featureOperation['categoryEncoding'] = 'na' elif x in sequenceFeatures: featureOperation['type'] = 'index' featureOperation['fillMethod'] = 'median' featureOperation['categoryEncoding'] = 'na' elif (x in catFeatures) or (x in constantFeature): featureOperation['type'] = 'categorical' featureOperation['fillMethod'] = 'mode' featureOperation['categoryEncoding'] = 'targetEncoding' else: featureOperation['type'] = 'numerical' featureOperation['fillMethod'] = 'medium' featureOperation['categoryEncoding'] = 'na' featureOperation['outlierDetection'] = 'disable' featureOperation['outlierOperation'] = 'nochange' featureOperation['normalizer'] = 'none' featuresListJson.append(featureOperation) request.session['numericFeature'] = numericFeature records = 0 import os if os.path.isfile(dataFile): for chunk in pd.read_csv(dataFile, chunksize=20000,encoding="utf-8",encoding_errors= 'replace'): records = records+len(chunk) request.session['NoOfRecords'] = records filetimestamp = str(int(time.time())) CONFIG_FILE_PATH = request.session['configfilepath'] config_json_filename = os.path.join(CONFIG_FILE_PATH, 'AION_' + filetimestamp + '.json') outputfile = os.path.join(CONFIG_FILE_PATH, 'AION_OUTPUT_' + filetimestamp + '.json') request.session['outputfilepath'] = str(outputfile) modelname = request.session['usecaseid'] modelname = modelname.replace(" ", "_") DEPLOY_LOCATION = request.session['deploylocation'] request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION, modelname,str(Version),'log','model_training_logs.log') request.session['config_json'] = config_json_filename #request.session['ModelVersion'] = Version request.session['ModelStatus'] = 'Not Trained' # p = Existusecases(DataFilePath=dataFile, DeployPath=DEPLOY_LOCATION, Status='Not Trained', # ConfigPath=config_json_filename, Version=Version, ModelName=ModelName, # TrainOuputLocation=outputfile) # p.save() # from AION_UX import telemetry # telemetry.telemetry_data('UseCaseCreated',modelname+'_'+str(Version),'UseCaseCreated') # request.session['modelid'] = p.id temp = {} temp['ModelName'] = request.session['usecaseid'] temp['Version'] = request.session['ModelVersion'] ''' featuresList = features #df.columns.values.tolist() datetimeFeatures = datetimeFeatures = [] sequenceFeatures = [] unimportantFeatures = [] featuresRatio = {} for i in featuresList: check = ea.match_date_format(df[i]) if check == True: datetimeFeatures.append(i) unimportantFeatures.append(i) seq_check = ea.check_seq_feature(df[i]) if seq_check == True: sequenceFeatures.append(i) unimportantFeatures.append(i) ratio = ea.check_category(df[i]) if ratio != 0: featuresRatio[i] = ratio else: unimportantFeatures.append(i) targetFeature = min(featuresRatio, key=featuresRatio.get) unimportantFeatures.append(targetFeature) ''' unimportantFeatures = list(datetimeFeatures) unimportantFeatures.extend(sequenceFeatures) #unimportantFeatures = list(set(unimportantFeatures) + set(sequenceFeatures)) unimportantFeatures.append(targetFeature) config = {} noofforecast = 20 config['ModelName'] = request.session['usecaseid'] config['Version'] = request.session['ModelVersion'] config['datetimeFeatures'] = datetimeFeatures config['sequenceFeatures'] = sequenceFeatures config['FeaturesList'] = featuresList config['unimportantFeatures'] = unimportantFeatures config['targetFeature'] = targetFeature config['noofforecasts'] = noofforecast DEFAULT_FILE_PATH = request.session['defaultfilepath'] # Retraing settings changes # -------- S T A R T -------- IsReTrainingCase = False if request.session['IsRetraining'] == 'Yes': id = request.session['ModelName'] p = usecasedetails.objects.get(id=id) model = Existusecases.objects.filter(ModelName=p) indexVal = model.count() - 1 configFile = str(model[indexVal].ConfigPath) # configFile = str(model[0].ConfigPath) # request.session['IsRetraining'] = 'No' IsReTrainingCase = True # --------------------------- else: configFile = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json') f = open(configFile, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) # Retraing settings changes # -------- S T A R T -------- pickDefaultSettings = False IsSameFeatures = False if 'featureList' not in configSettingsJson['basic']: pickDefaultSettings = True IsSameFeatures = True else: if configSettingsJson['basic']['featureList'] == featuresList: pickDefaultSettings = False IsSameFeatures = True else: pickDefaultSettings = True if pickDefaultSettings: # --------------------------- configSettingsJson['basic']['featureList'] = featuresList configSettingsJson['basic']['dateTimeFeature'] = ",".join([feature for feature in datetimeFeatures]) configSettingsJson['basic']['indexFeature'] = sequenceFeatures trainingFeatures = list(set(featuresList) - set(unimportantFeatures)) configSettingsJson['basic']['trainingFeatures'] = ",".join([feature for feature in trainingFeatures]) configSettingsJson['basic']['targetFeature'] = targetFeature if request.session['datatype'].lower() in ['video','image','object','document','llm_document','llm_code']: for x in configSettingsJson['basic']['analysisType'].keys(): configSettingsJson['basic']['analysisType'][x] = 'False' configSettingsJson['basic']['folderSettings']['fileType'] = request.session['datatype'] configSettingsJson['basic']['folderSettings']['labelDataFile'] = request.session['csvfullpath'] configSettingsJson['basic']['folderSettings']['fileExtension'] = request.session['fileExtension'] if request.session['datatype'] in ['LLM_Document','LLM_Code']: configSettingsJson['basic']['analysisType']['llmFineTuning'] = 'True' configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt']='Instruction' configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response']='Response' configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] = 'True' elif request.session['datatype'] == 'Video': configSettingsJson['basic']['analysisType']['videoForecasting'] = 'True' elif request.session['datatype'] == 'Image': configSettingsJson['basic']['analysisType']['imageClassification'] = 'True' elif request.session['datatype'] == 'Object': configSettingsJson['basic']['analysisType']['objectDetection'] = 'True' elif request.session['datatype'].lower() == 'document': df = pd.read_csv(dataFile, encoding='utf8',sep=request.session['delimiter'],quotechar=request.session['textqualifier'],nrows=100) noOfEmotyLevels = 0 shape = df.shape if shape[1] == 2: noOfEmotyLevels = df['Label'].isnull().sum() #print(noOfEmotyLevels) if noOfEmotyLevels == 100: configSettingsJson['basic']['analysisType']['topicModelling'] = 'True' else: configSettingsJson['basic']['analysisType']['classification'] = 'True' else: if 'uploadfiletype' in request.session: configSettingsJson['basic']['folderSettings']['fileType'] = request.session['uploadfiletype'] configSettingsJson['basic']['folderSettings']['labelDataFile'] = request.session['uploadLocation'] try: if isinstance(datetimeFeatures, list): if len(datetimeFeatures) != 0: configSettingsJson = update_granularity(configSettingsJson,datapath=dataFile) elif isinstance(datetimeFeatures, str): if datetimeFeatures != '': configSettingsJson = update_granularity(configSettingsJson,datapath=dataFile) except: pass # Retraing settings changes # -------- S T A R T -------- tot_count=len(numericCatFeatures) #task 11997 if (tot_count > 1): configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'True' configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'False' else: configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'True' configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'False' if 'delimiter' in request.session: configSettingsJson['basic']['fileSettings']['delimiters'] = request.session['delimiter'] else: configSettingsJson['basic']['fileSettings']['delimiters'] = ',' if 'textqualifier' in request.session: configSettingsJson['basic']['fileSettings']['textqualifier'] = request.session['textqualifier'] else: request.session['textqualifier'] = '"' configSettingsJson['advance']['profiler']['featureDict'] = featuresListJson configSettingsJson['basic']['onlineLearning'] = 'False' configSettingsJson['basic']['dataLocation'] = request.session['datalocation'] configSettingsJson['basic']['noOfRecords'] = request.session['NoOfRecords'] onlineLearning = configSettingsJson['basic']['onlineLearning'] updatedConfigSettings = json.dumps(configSettingsJson) with open(config_json_filename, "w") as fpWrite: fpWrite.write(updatedConfigSettings) fpWrite.close() ''' p = Existusecases(DataFilePath=dataFile, DeployPath=DEPLOY_LOCATION, Status='Not Trained', ConfigPath=config_json_filename, Version=Version, ModelName=ModelName, TrainOuputLocation=outputfile) p.save() ''' p = Existusecases.objects.get(ModelName=ModelName,Version=Version) p.DataFilePath = dataFile p.DeployPath = DEPLOY_LOCATION p.ConfigPath = config_json_filename p.TrainOuputLocation = outputfile p.save() #from appbe import telemetry #telemetry.telemetry_data('UseCaseCreated',modelname+'_'+str(Version),'UseCaseCreated') request.session['modelid'] = p.id # --------------------------- from appbe.compute import selectedInfratructure infra = selectedInfratructure() if infra.lower() in ['aws','gcp']: problemType = 'llmFineTuning' else: problemType = 'classification' #print(problemType) for key in configSettingsJson['basic']['analysisType']: if configSettingsJson['basic']['analysisType'][key] == 'True': problemType = key break scoringCreteria = 'NA' if problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997 for key in configSettingsJson['basic']['scoringCriteria'][problemType]: if configSettingsJson['basic']['scoringCriteria'][problemType][key] == 'True': scoringCreteria = key break selectAlgo = "" if problemType in ['classification','regression','timeSeriesForecasting','timeSeriesAnomalyDetection', 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition','llmFineTuning']: #task 11997 for key in configSettingsJson['basic']['algorithms'][problemType]: if configSettingsJson['basic']['algorithms'][problemType][key] == 'True': if selectAlgo != "": selectAlgo += ',' selectAlgo += key modelSize = '' if problemType == 'llmFineTuning': for key in configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo].keys(): if configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo][key] == 'True': modelSize = key break movenext = True request.session['finalstate'] = 1 request.session['currentstate'] = 1 context = {'tab': 'tabconfigure','modelSize':modelSize,'tot_count':tot_count, 'temp': temp, 'configsettings': configSettingsJson, 'config': config,'numericFeature':numericFeature,'onlineLearning':onlineLearning, 'noOfRecords': records, 'selected_use_case': selected_use_case,'problemType':problemType,'scoringCreteria':scoringCreteria,'selectAlgo':selectAlgo,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'movenext': movenext, 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], 'selected': 'modeltraning','advance':True,'basic_help':ht.basic_help # Retraing settings changes ,'IsSameFeatures':IsSameFeatures,'IsReTrainingCase':IsReTrainingCase # 10012:Decision Threshold related ,'DLCheckpoint':data_is_under_RAM_threshold} return context except UnicodeDecodeError as e: print(e) context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'File Reading Error: '+str(e)} return context except Exception as e: print(e) import sys,os exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'Config Error: '+str(e)} return context
advance_Config.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import json import os,sys from appbe import help_Text as ht def save(request): from appbe.dataPath import DEFAULT_FILE_PATH if request.method == 'POST': submittype = request.POST.get('AdvanceSubmit') if submittype != 'AdvanceDefault': configFile = request.session['config_json'] f = open(configFile, "r+") configSettingsData = f.read() configSettings = json.loads(configSettingsData) try: if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'false': numericselectedmethod = request.POST.get('numericfillmethod') for x in list(configSettings['advance']['profiler']['numericalFillMethod'].keys()): configSettings['advance']['profiler']['numericalFillMethod'][x] = 'False' configSettings['advance']['profiler']['numericalFillMethod'][numericselectedmethod] = 'True' categoricalselectedmethod = request.POST.get('categorialfillmethod') for x in list(configSettings['advance']['profiler']['categoricalFillMethod'].keys()): configSettings['advance']['profiler']['categoricalFillMethod'][x] = 'False' configSettings['advance']['profiler']['categoricalFillMethod'][categoricalselectedmethod] = 'True' categoryEncodingMethod = request.POST.get('categoryencoding') for x in list(configSettings['advance']['profiler']['categoryEncoding'].keys()): configSettings['advance']['profiler']['categoryEncoding'][x] = 'False' configSettings['advance']['profiler']['categoryEncoding'][categoryEncodingMethod] = 'True' outlierDetection = request.POST.get('outlierDetection') for x in list(configSettings['advance']['profiler']['outlierDetection'].keys()): configSettings['advance']['profiler']['outlierDetection'][x] = 'False' if outlierDetection != 'Disable': configSettings['advance']['profiler']['outlierDetection'][outlierDetection] = 'True' #configSettings['advance']['profiler']['outlierDetectionStatus'] = request.POST.get('AnamolyDetectionStatus') #configSettings['advance']['profiler']['outlierDetectionMethod'] = request.POST.get('AnaTreatmentMethod') configSettings['advance']['profiler']['misValueRatio'] = request.POST.get('MisValueRatio') #configSettings['advance']['profiler']['categoricalToNumeric'] = request.POST.get('CategoricalToNumeric') configSettings['advance']['profiler']['numericFeatureRatio'] = request.POST.get('NumFeatureRatio') configSettings['advance']['profiler']['categoryMaxLabel'] = request.POST.get('CatMaxLabels') configSettings['advance']['selector']['categoryMaxLabel'] = request.POST.get('CatMaxLabels') normalizationtypes = configSettings['advance']['profiler']['normalization'] for k in normalizationtypes.keys(): configSettings['advance']['profiler']['normalization'][k] = 'False' if request.POST.get('NormalizationMethod').lower() != 'none': configSettings['advance']['profiler']['normalization'][request.POST.get('NormalizationMethod')] = 'True' #configSettings['advance']['profiler']['normalizationMethod'] = request.POST.get('NormalizationMethod') configSettings['advance']['profiler']['removeDuplicate'] = request.POST.get('removeDuplicate') # ---------------------------------------------- Debiasing Changes ---------------------------------------------- configSettings['advance']['profiler']['deBiasing']['FeatureName'] = request.POST.get('InputFeature') configSettings['advance']['profiler']['deBiasing']['ClassName'] = request.POST.get('InputClass') configSettings['advance']['profiler']['deBiasing']['Algorithm'] = request.POST.get('InputAlgorithm') configSettings['advance']['profiler']['deBiasing']['TargetFeature'] = configSettings['basic']['targetFeature'] # ---------------------------------------------- ---------------------------------------------- problemtypes = configSettings['basic']['analysisType'] problem_type = "" for k in problemtypes.keys(): if configSettings['basic']['analysisType'][k] == 'True': problem_type = k break if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'false' and configSettings['basic']['onlineLearning'].lower() == 'false' and configSettings['basic']['distributedLearning'].lower() == 'false': configSettings['advance']['profiler']['textCleaning']['removeNoise'] = request.POST.get('noiseStatus') # -------------------------------- 12301:Remove Noise Config related Changes S T A R T -------------------------------- if request.POST.get('noiseStatus') == 'True': configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['decodeHTML'] = request.POST.get('DecodeHTML') configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHyperLinks'] = request.POST.get('removeHyperlinks') configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeMentions'] = request.POST.get('RemoveMentions') configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHashtags'] = request.POST.get('removeHashtags') configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeEmoji'] = request.POST.get('removeEmoji') configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['unicodeToAscii'] = request.POST.get('unicodeToAscii') configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeNonAscii'] = request.POST.get('removeNonAscii') else: configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['decodeHTML'] = "False" configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHyperLinks'] = "False" configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeMentions'] = "False" configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHashtags'] = "False" configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeEmoji'] = "False" configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['unicodeToAscii'] = "False" configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeNonAscii'] = "False" # ---------------------------------------------------------------- E N D ---------------------------------------------------------------- configSettings['advance']['profiler']['textCleaning']['expandContractions'] = request.POST.get( 'expandContractions') configSettings['advance']['profiler']['textCleaning']['normalize'] = request.POST.get('normalize') if (request.POST.get('normalizeMethod') == 'Lemmatization'): configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['lemmatization'] = "True" configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['stemming'] = "False" else: configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['stemming'] = "True" configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['lemmatization'] = "False" configSettings['advance']['profiler']['textCleaning']['replaceAcronym'] = request.POST.get('replaceAcronym') if request.POST.get('acronymDict') != '' and request.POST.get('acronymDict') != 'None': configSettings['advance']['profiler']['textCleaning']['acronymConfig']['acronymDict'] = eval(request.POST.get( 'acronymDict')) configSettings['advance']['profiler']['textCleaning']['correctSpelling'] = request.POST.get( 'correctSpelling') configSettings['advance']['profiler']['textCleaning']['removeStopwords'] = request.POST.get( 'removeStopwords') if (request.POST.get('ExtendOrReplace') == 'NA'): configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = "False" configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = "False" elif (request.POST.get('ExtendOrReplace') == 'Extend'): configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = "True" configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = "False" else: configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = "False" configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = "True" configSettings['advance']['profiler']['textCleaning']['stopWordsConfig'][ 'stopwordsList'] = request.POST.get('stopwordsList') configSettings['advance']['profiler']['textCleaning']['removePunctuation'] = request.POST.get( 'removePunctuation') configSettings['advance']['profiler']['textCleaning']['removePunctuationConfig'][ 'removePuncWithinTokens'] = request.POST.get('removePuncWithinTokens') configSettings['advance']['profiler']['textCleaning']['removeNumericTokens'] = request.POST.get( 'removeNumericTokens') configSettings['advance']['profiler']['textCleaning']['removeNumericConfig'][ 'removeNumeric_IncludeSpecialCharacters'] = request.POST.get('removeNumeric_IncludeSpecialCharacters') if (request.POST.get('tokenizationLib') == 'nltk'): configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'textblob'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'gensim'] = "False" elif (request.POST.get('tokenizationLib') == 'textblob'): configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'textblob'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'gensim'] = "False" elif (request.POST.get('tokenizationLib') == 'spacy'): configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'textblob'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'gensim'] = "False" elif (request.POST.get('tokenizationLib') == 'keras'): configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'textblob'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'gensim'] = "False" else: configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][ 'textblob'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['gensim'] = "True" if (request.POST.get('lemmatizationLib') == 'nltk'): configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ 'textblob'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ 'spacy'] = "False" elif (request.POST.get('lemmatizationLib') == 'textblob'): configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ 'textblob'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ 'spacy'] = "False" else: configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][ 'textblob'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['spacy'] = "True" if (request.POST.get('stopwordsRemovalLib') == 'nltk'): configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'nltk'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'gensim'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'spacy'] = "False" elif (request.POST.get('stopwordsRemovalLib') == 'gensim'): configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'gensim'] = "True" configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'spacy'] = "False" else: configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'nltk'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'gensim'] = "False" configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][ 'spacy'] = "True" configSettings['advance']['profiler']['textFeatureExtraction']['n_grams'] = request.POST.get('n_grams') configSettings['advance']['profiler']['textFeatureExtraction']['n_grams_config'][ 'min_n'] = int(request.POST.get('range_min_n')) configSettings['advance']['profiler']['textFeatureExtraction']['n_grams_config'][ 'max_n'] = int(request.POST.get('range_max_n')) configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags'] = request.POST.get('pos_tags') if (request.POST.get('pos_tags_lib') == 'nltk'): configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = "True" configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = "False" configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = "False" elif (request.POST.get('pos_tags_lib') == 'textblob'): configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = "False" configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = "True" configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = "False" else: configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = "False" configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = "False" configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = "True" textconvertionmethods = configSettings['advance']['profiler']['textConversionMethod'] for k in textconvertionmethods.keys(): configSettings['advance']['profiler']['textConversionMethod'][k] = 'False' if problem_type.lower() not in ['similarityidentification','contextualsearch']: configSettings['advance']['profiler']['textConversionMethod'][request.POST.get('textConvertionMethod')] = 'True' if 'embeddingSize' in configSettings['advance']['profiler']: glove = configSettings['advance']['profiler']['embeddingSize']['Glove'] for k in glove.keys(): configSettings['advance']['profiler']['embeddingSize']['Glove'][k] = 'False' configSettings['advance']['profiler']['embeddingSize']['Glove'][request.POST.get('txtglovedimensions')] = 'True' fastText = configSettings['advance']['profiler']['embeddingSize']['FastText'] for k in fastText.keys(): configSettings['advance']['profiler']['embeddingSize']['FastText'][k] = 'False' configSettings['advance']['profiler']['embeddingSize']['FastText'][request.POST.get('txtFastTextdimensions')] = 'True' if 'LatentSemanticAnalysis' in configSettings['advance']['profiler']['embeddingSize']: LatentSemanticAnalysis = configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] for k in LatentSemanticAnalysis.keys(): configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'][k] = 'False' configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'][request.POST.get('txttfidfdimensions')] = 'True' if 'TF_IDF' in configSettings['advance']['profiler']['embeddingSize']: configSettings['advance']['profiler']['embeddingSize']['TF_IDF']['maxFeatures'] = request.POST.get('tfidfmaxfeatures') if 'CountVectors' in configSettings['advance']['profiler']['embeddingSize']: configSettings['advance']['profiler']['embeddingSize']['CountVectors']['maxFeatures'] = request.POST.get('cvmaxfeatures') if problem_type.lower() == 'imageclassification': configSettings['advance']['image_config']['img_width'] = int(request.POST.get('img_width')) configSettings['advance']['image_config']['img_height'] = int(request.POST.get('img_height')) configSettings['advance']['image_config']['img_channel'] = int(request.POST.get('img_channel')) configSettings['advance']['image_config']['lr'] = float(request.POST.get('lr')) configSettings['advance']['image_config']['epochs'] = int(request.POST.get('epochs')) configSettings['advance']['image_config']['test_split_ratio'] = float(request.POST.get('test_split_ratio')) if problem_type.lower() == "llmfinetuning": configSettings = llmadvancesettings(configSettings,request) if problem_type.lower() == 'objectdetection' or problem_type.lower() == 'imageclassification': configSettings['advance']['ImageAugmentation']['Enable'] = request.POST.get('advance_ImageAugmentation_Enable') configSettings['advance']['ImageAugmentation']['KeepAugmentedImages'] = request.POST.get('advance_ImageAugmentation_keepAugmentedImages') configSettings['advance']['ImageAugmentation']['Noise']['Blur'] = request.POST.get('advance_ImageAugmentation_Noise_Blur') configSettings['advance']['ImageAugmentation']['Noise']['Brightness'] = request.POST.get('advance_ImageAugmentation_Noise_Brightness') configSettings['advance']['ImageAugmentation']['Noise']['Contrast'] = request.POST.get('advance_ImageAugmentation_Noise_Contrast') configSettings['advance']['ImageAugmentation']['Transformation']['Flip'] = request.POST.get('advance_ImageAugmentation_Transformation_Flip') configSettings['advance']['ImageAugmentation']['Transformation']['Rotate'] = request.POST.get('advance_ImageAugmentation_Transformation_Rotate') configSettings['advance']['ImageAugmentation']['Transformation']['Shift'] = request.POST.get('advance_ImageAugmentation_Transformation_Shift') configSettings['advance']['ImageAugmentation']['Transformation']['Crop'] = request.POST.get('advance_ImageAugmentation_Transformation_Crop') configSettings['advance']['ImageAugmentation']['configuration']['Blur']['noOfImages'] = request.POST.get('noofblurimages') configSettings['advance']['ImageAugmentation']['configuration']['Blur']['limit'] = request.POST.get('limitblurimage') configSettings['advance']['ImageAugmentation']['configuration']['Brightness']['noOfImages'] = request.POST.get('noofbrightnessimages') configSettings['advance']['ImageAugmentation']['configuration']['Brightness']['limit'] = request.POST.get('limitbrightnessimage') configSettings['advance']['ImageAugmentation']['configuration']['Contrast']['noOfImages'] = request.POST.get('noofcontrastimages') configSettings['advance']['ImageAugmentation']['configuration']['Contrast']['limit'] = request.POST.get('limitcontrastimage') configSettings['advance']['ImageAugmentation']['configuration']['Flip']['noOfImages'] = request.POST.get('noofflipimages') configSettings['advance']['ImageAugmentation']['configuration']['Rotate']['noOfImages'] = request.POST.get('noofrotateimages') configSettings['advance']['ImageAugmentation']['configuration']['Shift']['noOfImages'] = request.POST.get('noofshiftimages') configSettings['advance']['ImageAugmentation']['configuration']['Crop']['noOfImages'] = request.POST.get('noofcropimages') configSettings['advance']['selector']['selectionMethod']['featureSelection'] = 'False' configSettings['advance']['selector']['selectionMethod']['featureEngineering'] = 'False' configSettings['advance']['selector']['featureSelection']['allFeatures'] = 'False' configSettings['advance']['selector']['featureSelection']['statisticalBased'] = 'False' configSettings['advance']['selector']['featureSelection']['modelBased'] = 'False' if(request.POST.get('selectionMethod') == 'FeatureSelection'): configSettings['advance']['selector']['selectionMethod']['featureSelection'] = 'True' else: configSettings['advance']['selector']['selectionMethod']['featureEngineering'] = 'True' if request.POST.get('allFeatures'): configSettings['advance']['selector']['featureSelection']['allFeatures'] = request.POST.get('allFeatures') if request.POST.get('statisticalBased'): configSettings['advance']['selector']['featureSelection']['statisticalBased'] = request.POST.get('statisticalBased') if request.POST.get('modelBased'): configSettings['advance']['selector']['featureSelection']['modelBased'] = request.POST.get('modelBased') dimentionalityreductionmethod = request.POST.get('dimentionalityreductionmethod') for x in list(configSettings['advance']['selector']['featureEngineering'].keys()): if x != 'numberofComponents': configSettings['advance']['selector']['featureEngineering'][x] = 'False' configSettings['advance']['selector']['featureEngineering'][dimentionalityreductionmethod] = 'True' configSettings['advance']['selector']['featureEngineering']['numberofComponents'] = request.POST.get('numberofComponents') #configSettings['advance']['selector']['categoricalFeatureRatio'] = request.POST.get('CatFeatureRatio') configSettings['advance']['selector']['statisticalConfig']['correlationThresholdFeatures'] = request.POST.get('correlationThresholdFeatures') configSettings['advance']['selector']['statisticalConfig']['correlationThresholdTarget'] = request.POST.get('correlationThresholdTarget') configSettings['advance']['selector']['statisticalConfig']['pValueThresholdFeatures'] = request.POST.get('pValueThresholdFeatures') configSettings['advance']['selector']['statisticalConfig']['pValueThresholdTarget'] = request.POST.get('pValueThresholdTarget') configSettings['advance']['selector']['statisticalConfig']['varianceThreshold'] = request.POST.get('VarianceThreshold') if problem_type.lower() == 'recommendersystem': configSettings['advance']['recommenderparam']['svd_params']= eval(request.POST.get('svd_params')) configSettings['advance']['associationrule']['modelParams']['apriori'] = eval(request.POST.get('apriori')) configSettings['advance']['textSimilarityConfig'] = eval(request.POST.get('textsimilarity')) if configSettings['basic']['distributedLearning'].lower() == 'true': configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('classDistributedXGBoost')) configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('classDistributedLightGBM')) configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('DistributedXGBoostreg')) configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('DistributedLightGBMreg')) if configSettings['basic']['onlineLearning'].lower() != 'true' and configSettings['basic']['distributedLearning'].lower() != 'true': if (problem_type.lower() == 'classification') or (problem_type.lower() == 'regression') or (problem_type.lower() == 'clustering') or (problem_type.lower() == 'topicmodelling'): if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Logistic Regression'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] = eval(request.POST.get('classification_LogisticRegression')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Naive Bayes'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] = eval(request.POST.get('classification_GaussianNB')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Support Vector Machine'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Support Vector Machine'] = eval(request.POST.get('classification_SVC')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['K Nearest Neighbors'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] = eval(request.POST.get('classification_KNeighborsClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Decision Tree'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] = eval(request.POST.get('classification_DecisionTreeClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Random Forest'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] = eval(request.POST.get('classification_RandomForestClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Gradient Boosting'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] = eval(request.POST.get('classification_GradientBoostingClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Extreme Gradient Boosting (XGBoost)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('classification_ExtremeGradientBoostingClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Light Gradient Boosting (LightGBM)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('classification_LightGradientBoostingClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Categorical Boosting (CatBoost)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Categorical Boosting (CatBoost)'] = eval(request.POST.get('classification_CategoricalBoostingClassifier')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Linear Regression'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] = eval(request.POST.get('regression_LinearRegression')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Lasso'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Lasso'] = eval(request.POST.get('regression_Lasso')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Ridge'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Ridge'] = eval(request.POST.get('regression_Ridge')) if problem_type.lower() == 'topicmodelling' and configSettings['basic']['algorithms']['topicModelling']['LDA'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['topicModellingParams']['LDA']= eval(request.POST.get('topicmodeling_lda')) if problem_type.lower() == 'clustering' and configSettings['basic']['algorithms']['clustering']['KMeans'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['clusteringModelParams']['KMeans']= eval(request.POST.get('cluster_kmeans')) if problem_type.lower() == 'clustering' and configSettings['basic']['algorithms']['clustering']['DBSCAN'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['clusteringModelParams']['DBSCAN']= eval(request.POST.get('cluster_DBSCAN')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Decision Tree'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] = eval(request.POST.get('regression_DecisionTreeRegressor')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Random Forest'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] = eval(request.POST.get('regression_RandomForestRegressor')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Extreme Gradient Boosting (XGBoost)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('regression_XGBoostRegressor')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Light Gradient Boosting (LightGBM)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('regression_LightGBMRegressor')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Categorical Boosting (CatBoost)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Categorical Boosting (CatBoost)'] = eval(request.POST.get('regression_CatBoostRegressor')) configSettings['advance']['mllearner_config']['modelparamsfile'] = request.POST.get('ModelParamFile') configSettings['advance']['mllearner_config']['optimizationMethod'] = request.POST.get('OptimizationMethod') configSettings['advance']['mllearner_config']['optimizationHyperParameter'][ 'iterations'] = request.POST.get('iterations') configSettings['advance']['mllearner_config']['optimizationHyperParameter'][ 'trainTestCVSplit'] = request.POST.get('trainTestCVSplit') configSettings['advance']['mllearner_config']['thresholdTunning'] = request.POST.get('thresholdTunning') configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] = request.POST.get('EnsembleStacking') configSettings['advance']['mllearner_config']['Voting (Ensemble)'] = request.POST.get('EnsembleVoting') configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['enable'] = request.POST.get('ensemple_bagging_lr_enable') if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['param'] = eval(request.POST.get('classi_ensemple_bagging_lr_param')) configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['enable'] = request.POST.get('ensemple_bagging_naivebayes_enable') if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['param'] = eval(request.POST.get('classi_ensemple_bagging_naivebayes_param')) configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['enable'] = request.POST.get('ensemple_bagging_svm_enable') if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['param'] = eval(request.POST.get('classi_ensemple_bagging_svm_param')) configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['enable'] = request.POST.get('ensemple_bagging_knn_enable') if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['param'] = eval(request.POST.get('classi_ensemple_bagging_knn_param')) configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] = request.POST.get('ensemple_bagging_dt_enable') if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['param'] = eval(request.POST.get('classi_ensemple_bagging_dt_param')) configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['enable'] = request.POST.get('ensemple_bagging_rf_enable') if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['param'] = eval(request.POST.get('classi_ensemple_bagging_rf_param')) configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['enable'] = request.POST.get('ensemple_bagging_lir_enable') if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['param'] = eval(request.POST.get('reg_ensemple_bagging_lir_param')) configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] = request.POST.get('ensemple_bagging_dit_enable') if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['param'] = eval(request.POST.get('reg_ensemple_bagging_dit_param')) configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['enable'] = request.POST.get('ensemple_bagging_ridge_enable') if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['enable'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['param'] = eval(request.POST.get('reg_ensemple_bagging_ridge_param')) if problem_type.lower() == 'classification': if configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] = eval(request.POST.get('ensamblestackingClassifierparams')) if problem_type.lower() == 'regression': if configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] == 'True': configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] = eval(request.POST.get('ensamblestackingRegressorparams')) configSettings['basic']['filterExpression'] = request.POST.get('filterExpression') #configSettings['advance']['mllearner_config']['trainPercentage'] = request.POST.get('trainPercentage') if (problem_type.lower() == 'classification') or (problem_type.lower() == 'regression'): configSettings['advance']['modelEvaluation']['smcStrategy'] = request.POST.get('smcStrategy') configSettings['advance']['modelEvaluation']['smcMaxDepth'] = request.POST.get('smcMaxDepth') configSettings['advance']['modelEvaluation']['smcCondition'] = request.POST.get('smcCondition') configSettings['advance']['modelEvaluation']['miCondition'] = request.POST.get('miCondition') if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Neural Network'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] = eval( request.POST.get('dl_classification_SNN')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network'] = eval( request.POST.get('dl_classification_RNN')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (GRU)'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (GRU)'] = eval( request.POST.get('dl_classification_GRURNN')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (LSTM)'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)'] = eval( request.POST.get('dl_classification_LSTMRNN')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Convolutional Neural Network (1D)'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Convolutional Neural Network (1D)'] = eval( request.POST.get('dl_classification_CNN')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification'].get('Neural Architecture Search') == 'True': configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Architecture Search'] = eval( request.POST.get('dl_classification_NAS')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Neural Network'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] = eval( request.POST.get('dl_regression_SNN')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] = eval( request.POST.get('dl_regression_RNN')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network (GRU)'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] = eval( request.POST.get('dl_regression_GRURNN')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network (LSTM)'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] = eval( request.POST.get('dl_regression_LSTMRNN')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Convolutional Neural Network (1D)'] == 'True': configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Convolutional Neural Network (1D)'] = eval( request.POST.get('dl_regression_CNN')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression'].get('Neural Architecture Search') == 'True': configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Architecture Search'] = eval( request.POST.get('dl_regression_NAS')) #configSettings['advance']['dllearner_config']['optimizationMethod'] = request.POST.get('DLOptimizationMethod') else: if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Logistic Regression'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Logistic Regression'] = eval(request.POST.get('OnlineLogisticRegression')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Decision Tree Classifier'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Decision Tree Classifier'] = eval(request.POST.get('OnlineDecisionTreeClassifier')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Softmax Regression'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Softmax Regression'] = eval(request.POST.get('OnlineSoftmaxRegression')) if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online KNN Classifier'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online KNN Classifier'] = eval(request.POST.get('OnlineKNNClassifier')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online Linear Regression'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Linear Regression'] = eval(request.POST.get('OnlineLinearRegression')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online Decision Tree Regressor'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Decision Tree Regressor'] = eval(request.POST.get('OnlineDecisionTreeRegressor')) if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online KNN Regressor'] == 'True': configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online KNN Regressor'] = eval(request.POST.get('OnlineKNNRegressor')) configSettings['advance']['profiler']['targetEncodingParams'] = eval(request.POST.get('targetEncodingParams')) configSettings['advance']['profiler']['outlierDetectionParams'] = eval(request.POST.get('outlierDetectionParams')) if problem_type.lower() == 'objectdetection': configSettings['advance']['objectDetection']['pretrainedModel']= request.POST.get('objectdetectionpretrainedmodel') configSettings['advance']['objectDetection']['n_epoch'] = int(request.POST.get('objectDetection_n_epoch')) configSettings['advance']['objectDetection']['batch_size'] = int(request.POST.get('objectDetection_batch_size')) if problem_type.lower() == 'timeseriesforecasting': #task 11997 #task 13052 configSettings['advance']['timeSeriesForecasting']['fix_seasonality'] = request.POST.get('seasionality') # task 13052 configSettings['advance']['timeSeriesForecasting']['fix_stationarity'] =request.POST.get('stationarity') # task 13052 configSettings['advance']['timeSeriesForecasting']['modelParams']['ARIMA'] = eval(request.POST.get('ARIMA')) #task 11997 configSettings['advance']['timeSeriesForecasting']['modelParams']['FBPROPHET'] = eval(request.POST.get('FBPROPHET')) #task 11997 configSettings['advance']['timeSeriesForecasting']['modelParams']['LSTM'] = eval(request.POST.get('TSLSTM')) #task 11997 configSettings['advance']['timeSeriesForecasting']['modelParams']['Encoder_Decoder_LSTM_MVI_UVO'] = eval(request.POST.get('TSLSTMencoderdecoder')) configSettings['advance']['timeSeriesForecasting']['modelParams']['MLP'] = eval(request.POST.get('TSMLP')) #task 11997 if problem_type.lower() == 'timeseriesanomalydetection': configSettings['advance']['timeSeriesAnomalyDetection']['modelParams']['AutoEncoder'] = eval(request.POST.get('autoEncoderAD')) #task 11997 configSettings['advance']['timeSeriesAnomalyDetection']['modelParams']['DBScan'] = eval(request.POST.get('dbscanAD')) #task 13316 if problem_type.lower() == 'anomalydetection': configSettings['advance']['anomalyDetection']['modelParams']['IsolationForest'] = eval(request.POST.get('IsolationForest')) configSettings['advance']['anomalyDetection']['modelParams']['oneclassSVM'] = eval(request.POST.get('oneclassSVM')) configSettings['advance']['anomalyDetection']['modelParams']['DBScan'] = eval(request.POST.get('DBScanAD')) updatedConfigSettingsJson = json.dumps(configSettings) f.seek(0) f.write(updatedConfigSettingsJson) f.truncate() f.close() errormsg = 'NA' request.session['ModelStatus'] = 'Not Trained' except Exception as e: import sys exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) errormsg = 'Input value error' print(e) if 'NoOfRecords' in request.session: records = request.session['NoOfRecords'] else: records = 'NA' if request.session['datatype'] in ['Video', 'Image','Document']: folderLocation = str(request.session['datalocation']) dataFilePath = os.path.join(folderLocation, request.session['csvfullpath']) else: dataFilePath = str(request.session['datalocation']) # dataFilePath = configSettings['basic']['dataLocation'] #df = pd.read_csv(dataFilePath, encoding='latin1') featuresList = configSettings['basic']['featureList'] config = {} config['modelName'] = configSettings['basic']['modelName'] config['modelVersion'] = configSettings['basic']['modelVersion'] config['datetimeFeatures'] = configSettings['basic']['dateTimeFeature'] config['sequenceFeatures'] = configSettings['basic']['indexFeature'] config['FeaturesList'] = featuresList config['unimportantFeatures'] = list(set(featuresList) - set(configSettings['basic']['trainingFeatures'])) config['targetFeature'] = configSettings['basic']['targetFeature'] scoring = configSettings['basic']['scoringCriteria'] scoringCriteria = "" for k in scoring.keys(): if configSettings['basic']['scoringCriteria'][k] == 'True': scoringCriteria = k break config['scoringCriteria'] = scoringCriteria temp = {} temp['ModelName'] = configSettings['basic']['modelName'] temp['Version'] = configSettings['basic']['modelVersion'] selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] context = {'tab': 'advconfig', 'config': config, 'temp': temp, 'advconfig': configSettings, 'noOfRecords': records, 'advance_status_msg': 'Configuration Done', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'errormsg':errormsg, 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], 'selected': 'modeltraining'} return context elif submittype == 'AdvanceDefault': try: MachineLearningModels = [] configFile = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json') f = open(configFile, "r") configSettings = f.read() f.close() updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r+") configSettingsData = f.read() updateconfigSettingsJson = json.loads(configSettingsData) configSettingsJson = json.loads(configSettings) temp = {} temp['ModelName'] = request.session['UseCaseName'] temp['Version'] = request.session['ModelVersion'] config = {} config['modelName'] = request.session['UseCaseName'] config['modelVersion'] = request.session['ModelVersion'] config['datetimeFeatures'] = updateconfigSettingsJson['basic']['dateTimeFeature'] config['sequenceFeatures'] = updateconfigSettingsJson['basic']['indexFeature'] config['FeaturesList'] = updateconfigSettingsJson['basic']['trainingFeatures'] config['unimportantFeatures'] = '' config['targetFeature'] = updateconfigSettingsJson['basic']['targetFeature'] problemtypes = updateconfigSettingsJson['basic']['analysisType'] problem_type = "" for k in problemtypes.keys(): if updateconfigSettingsJson['basic']['analysisType'][k] == 'True': problem_type = k break selectAlgo = "" if problem_type in ['classification','regression','timeSeriesForecasting', 'timeSeriesAnomalyDetection', 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition']: #task 11997 for key in updateconfigSettingsJson['basic']['algorithms'][problem_type]: if updateconfigSettingsJson['basic']['algorithms'][problem_type][key] == 'True': if selectAlgo != "": selectAlgo += ',' selectAlgo += key if problem_type not in ['classification','regression']: break for key in updateconfigSettingsJson['basic']['algorithms'][problem_type]: if updateconfigSettingsJson['basic']['algorithms'][problem_type][key] == 'True': MachineLearningModels.append(key) if problem_type == 'objectDetection': from AION import pretrainedModels ptmObj = pretrainedModels() obModels = ptmObj.get_info(selectAlgo) else: obModels = {} problemType = problem_type selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] request.session['currentstate'] = 2 if request.session['finalstate'] <= 2: request.session['finalstate'] = 2 outlierDetection = 'False' updateconfigSettingsJson['advance'] = configSettingsJson['advance'] for x in list(updateconfigSettingsJson['advance']['profiler']['outlierDetection'].keys()): if updateconfigSettingsJson['advance']['profiler']['outlierDetection'][x] == 'True': outlierDetection = 'True' if outlierDetection == 'False': updateconfigSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'True' else: updateconfigSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'False' updateconfigSettingsJson = advanceConfigfields(updateconfigSettingsJson) #print(configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['ExtremeGradientBoostingClassifier']) updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = 'None' normalizationtypes = updateconfigSettingsJson['advance']['profiler']['normalization'] for k in normalizationtypes.keys(): if updateconfigSettingsJson['advance']['profiler']['normalization'][k] == 'True': updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = k break #---------------- default Hypermarameter changes--- ----------Usnish-------------- hyperparamFile = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config', 'hyperparam_config.json')) with open(hyperparamFile) as json_file: hyperparamConfig = json.load(json_file) context = {'tab': 'advconfig','temp': temp,'advconfig': updateconfigSettingsJson, 'config': config, 'selected_use_case': selected_use_case,'MachineLearningModels':MachineLearningModels, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"obModels":obModels,"problemType":problemType, 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], 'selected': 'modeltraning','advance_help':ht.advance_help,'hyperparamConfig':hyperparamConfig} return context except Exception as e: print(e) def llmadvancesettings(configSettings,request): algo = '' for x in list(configSettings['basic']['algorithms']['llmFineTuning'].keys()): if configSettings['basic']['algorithms']['llmFineTuning'][x] == 'True': algo = x if algo == 'LLaMA-2': configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['fineTuningMethod'] = request.POST.get('llama2fullfinemethod') configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['epochs'] = request.POST.get('llama2epochs') configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['learning_rate'] = request.POST.get('llama2learningrate') if request.POST.get('llama2fullfinemethod') != 'Full Fine-Tuning': configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['lora_rank'] = request.POST.get('llama2lorarank') configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['lora_alpha'] = request.POST.get('llama2loraalpha') if algo == 'LLaMA-2-Chat': configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['fineTuningMethod'] = request.POST.get('llama2chatfullfinemethod') configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['epochs'] = request.POST.get('llmllama2chatepochs') configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['learning_rate'] = request.POST.get('llama2chatlearningrate') if request.POST.get('llama2chatfullfinemethod') != 'Full Fine-Tuning': configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['lora_rank'] = request.POST.get('llama2chatlorarank') configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['lora_alpha'] = request.POST.get('llama2chatloraalpha') if algo == 'CodeLLaMA-2': configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['fineTuningMethod'] = request.POST.get('CodeLLaMA2fullfinemethod') configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['epochs'] = request.POST.get('CodeLLaMA2epochs') configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['learning_rate'] = request.POST.get('CodeLLaMA2learningrate') if request.POST.get('CodeLLaMA2fullfinemethod') != 'Full Fine-Tuning': configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['lora_rank'] = request.POST.get('CodeLLaMA2lorarank') configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['lora_alpha'] = request.POST.get('CodeLLaMA2loraalpha') if algo == 'Falcon': configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['fullFineTuning'] = request.POST.get('falconfullfinetuning') configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['epochs'] = request.POST.get('falconepochs') configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['learning_rate'] = request.POST.get('falconlearningrate') configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['lora_rank'] = request.POST.get('falconlorarank') configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['lora_alpha'] = request.POST.get('falconloraalpha') return configSettings def advanceConfigfields(configSettingsJson): try: configSettingsJson['advance']['mllearner_config']['EnsembleStacking'] = \ configSettingsJson['advance']['mllearner_config']['Stacking (Ensemble)'] configSettingsJson['advance']['mllearner_config']['EnsembleVoting'] = \ configSettingsJson['advance']['mllearner_config']['Voting (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'LogisticRegression'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GaussianNB'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['SVC'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'Support Vector Machine'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'KNeighborsClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'DecisionTreeClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'RandomForestClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'GradientBoostingClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'ExtremeGradientBoostingClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'LightGradientBoostingClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'CategoricalBoostingClassifier'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][ 'Categorical Boosting (CatBoost)'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SimpleRNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ 'Recurrent Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['GRURNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ 'Recurrent Neural Network (GRU)'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['LSTMRNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ 'Recurrent Neural Network (LSTM)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleStacking'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'LogisticRegression'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'Logistic Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'NaiveBayes'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'Naive Bayes'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'SVM'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'Support Vector Machine'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'KNN'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'K Nearest Neighbors'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'DecisionTree'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'Decision Tree'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'RandomForest'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][ 'Random Forest'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Recurrent Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Recurrent Neural Network (GRU)'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Recurrent Neural Network (LSTM)'] configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DQN'] = \ configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network'] configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DDQN'] = \ configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams'][ 'Dueling Deep Q Network'] configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DQN'] = \ configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network'] configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DDQN'] = \ configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams'][ 'Dueling Deep Q Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['CNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ 'Convolutional Neural Network (1D)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LinearRegression'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ 'DecisionTreeRegressor'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ 'RandomForestRegressor'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['XGBoostRegressor'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ 'Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LightGBMRegressor'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ 'Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['CatBoostRegressor'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][ 'Categorical Boosting (CatBoost)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleStacking'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ 'LinearRegression'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ 'Linear Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ 'DecisionTree'] = \ configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][ 'Decision Tree'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['NAS'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Neural Architecture Search'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['NAS'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][ 'Neural Architecture Search'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Recurrent Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Recurrent Neural Network (GRU)'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Recurrent Neural Network (LSTM)'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['CNN'] = \ configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][ 'Convolutional Neural Network (1D)'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'OnlineLogisticRegression'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'Online Logistic Regression'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'OnlineDecisionTreeClassifier'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'Online Decision Tree Classifier'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'OnlineSoftmaxRegression'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'Online Softmax Regression'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'OnlineKNNClassifier'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][ 'Online KNN Classifier'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ 'OnlineLinearRegression'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ 'Online Linear Regression'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ 'OnlineDecisionTreeRegressor'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ 'Online Decision Tree Regressor'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ 'OnlineKNNRegressor'] = \ configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][ 'Online KNN Regressor'] configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] = \ configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] = \ configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] if 'llmFineTuning' in configSettingsJson['advance']: configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2'] = \ configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2'] configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2Chat'] = \ configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2-Chat'] configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA2'] = \ configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA-2'] configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2'] = \ configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2'] configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2Chat'] = \ configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat'] configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = \ configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2'] configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \ configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2'] configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2Chat'] = \ configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2-Chat'] configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA2'] = \ configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA-2'] if 'distributedlearner_config' in configSettingsJson['advance']: configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ 'DistributedXGBoost'] = \ configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ 'Distributed Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ 'DistributedLightGBM'] = \ configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][ 'Distributed Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ 'DistributedXGBoost'] = \ configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ 'Distributed Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ 'DistributedLightGBM'] = \ configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][ 'Distributed Light Gradient Boosting (LightGBM)'] problem_type = "" problemtypes = configSettingsJson['basic']['analysisType'] for k in problemtypes.keys(): if configSettingsJson['basic']['analysisType'][k] == 'True': problem_type = k break deepLearning = 'False' machineLearning = 'False' reinforcementLearning = 'False' selectAlgo = "" if problem_type.lower() in ['classification','regression']: for key in configSettingsJson['basic']['algorithms'][problem_type]: if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Neural Architecture Search']: deepLearning = 'True' if key in ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest','Support Vector Machine','K Nearest Neighbors','Gradient Boosting','Extreme Gradient Boosting (XGBoost)','Light Gradient Boosting (LightGBM)','Categorical Boosting (CatBoost)','Linear Regression','Lasso','Ridge','Decision Tree','Random Forest','Bagging (Ensemble)']: machineLearning = 'True' if key in ['Deep Q Network','Dueling Deep Q Network']: reinforcementLearning = 'True' elif problem_type.lower() in ['clustering','topicmodelling']:#clustering(Bug 12611) machineLearning = 'True' configSettingsJson['basic']['deepLearning'] = deepLearning configSettingsJson['basic']['machineLearning'] = machineLearning configSettingsJson['basic']['reinforcementLearning'] = reinforcementLearning except Exception as e: print(e) return (configSettingsJson) def basicconfignex(request): #pemfilename = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','modelTraining','static','key','AION_GPU.pem')) try: updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r+") configSettingsData = f.read() configSettingsJson = json.loads(configSettingsData) #---------------- default Hypermarameter changes-------------Usnish-------------- hyperparamFile = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config', 'hyperparam_config.json')) with open(hyperparamFile) as json_file: hyperparamConfig = json.load(json_file) #---------------- default Hypermarameter changes end-------------Usnish-------------- # ------------------ Debiasing Changes ------------------ categorical_features = [] class_list = [] MachineLearningModels = [] check_traget = configSettingsJson['basic']['targetFeature'] selectedDebiasingFeature = 'None' selectedDebiasingClass = 'None' selectedDebiasingAlgorithm = '' problemtypes = configSettingsJson['basic']['analysisType'] problem_type = "" for k in problemtypes.keys(): if configSettingsJson['basic']['analysisType'][k] == 'True': problem_type = k break if request.method == 'GET': for key in configSettingsJson['basic']['algorithms'][problem_type]: if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': MachineLearningModels.append(key) else: MachineLearningModels = request.POST.getlist('MachineLearningModels') if problem_type.lower() in ['classification','regression']: if check_traget != '': try: if 'deBiasing' in configSettingsJson['advance']['profiler']: deBiasing = configSettingsJson['advance']['profiler']['deBiasing'] selectedDebiasingFeature = deBiasing.get('FeatureName','None') selectedDebiasingClass = deBiasing.get('ClassName','None') selectedDebiasingAlgorithm = deBiasing.get('Algorithm','') if selectedDebiasingFeature != 'None': df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8',encoding_errors= 'replace') classeslist = [] classeslist = df[selectedDebiasingFeature].unique().tolist() for item in classeslist: class_list.append(item) else: class_list.append('None') except: pass feature_dict = configSettingsJson['advance']['profiler']['featureDict'] for feature_config in feature_dict: if feature_config.get('type', '') == 'categorical' and feature_config['feature'] != check_traget: categorical_features.append(feature_config['feature']) # ------------------ ------------------ #print(categorical_features) temp = {} temp['ModelName'] = request.session['UseCaseName'] temp['Version'] = request.session['ModelVersion'] config = {} config['modelName'] = request.session['UseCaseName'] config['modelVersion'] = request.session['ModelVersion'] config['datetimeFeatures'] = configSettingsJson['basic']['dateTimeFeature'] config['sequenceFeatures'] = configSettingsJson['basic']['indexFeature'] config['FeaturesList'] = configSettingsJson['basic']['trainingFeatures'] config['unimportantFeatures'] = '' config['targetFeature'] = configSettingsJson['basic']['targetFeature'] deepLearning = 'False' machineLearning = 'False' reinforcementLearning = 'False' selectAlgo = "" print(problem_type) if problem_type.lower() in ['classification','regression']: for key in configSettingsJson['basic']['algorithms'][problem_type]: if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Neural Architecture Search']: deepLearning = 'True' if key in ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest','Support Vector Machine','K Nearest Neighbors','Gradient Boosting','Extreme Gradient Boosting (XGBoost)','Light Gradient Boosting (LightGBM)','Categorical Boosting (CatBoost)','Linear Regression','Lasso','Ridge','Decision Tree','Random Forest','Bagging (Ensemble)']: machineLearning = 'True' if key in ['Deep Q Network','Dueling Deep Q Network']: reinforcementLearning = 'True' elif problem_type.lower() in ['clustering','topicmodelling']:#clustering(Bug 12611) machineLearning = 'True' configSettingsJson['basic']['deepLearning'] = deepLearning configSettingsJson['basic']['machineLearning'] = machineLearning configSettingsJson['basic']['reinforcementLearning'] = reinforcementLearning if problem_type in ['classification','regression','timeSeriesForecasting', 'timeSeriesAnomalyDetection', 'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition']: #task 11997 for key in configSettingsJson['basic']['algorithms'][problem_type]: if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True': if selectAlgo != "": selectAlgo += ',' selectAlgo += key if problem_type not in ['classification','regression']: break if problem_type == 'objectDetection': from AION import pretrainedModels ptmObj = pretrainedModels() obModels = ptmObj.get_info(selectAlgo) else: obModels = {} problemType = problem_type selected_use_case = request.session['UseCaseName'] ModelVersion = request.session['ModelVersion'] ModelStatus = request.session['ModelStatus'] request.session['currentstate'] = 2 #configSettingsJson['advance']['remoteTraining']['ssh']['keyFilePath'] = pemfilename if request.session['finalstate'] <= 2: request.session['finalstate'] = 2 outlierDetection = 'False' for x in list(configSettingsJson['advance']['profiler']['outlierDetection'].keys()): if configSettingsJson['advance']['profiler']['outlierDetection'][x] == 'True': outlierDetection = 'True' if outlierDetection == 'False': configSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'True' else: configSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'False' if 'distributedLearning' not in configSettingsJson['basic']: configSettingsJson['basic']['distributedLearning'] = 'False' configSettingsJson['advance']['mllearner_config']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['Stacking (Ensemble)'] configSettingsJson['advance']['mllearner_config']['EnsembleVoting']=configSettingsJson['advance']['mllearner_config']['Voting (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['LogisticRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GaussianNB'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['SVC'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Support Vector Machine'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['KNeighborsClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['DecisionTreeClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['RandomForestClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['ExtremeGradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['LightGradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['CategoricalBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Categorical Boosting (CatBoost)'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (GRU)'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']=configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['LogisticRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Logistic Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['NaiveBayes'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Naive Bayes'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['SVM'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Support Vector Machine'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['KNN'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['K Nearest Neighbors'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['DecisionTree'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Decision Tree'] configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['RandomForest'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Random Forest'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network'] configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DDQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Dueling Deep Q Network'] configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network'] configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DDQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Dueling Deep Q Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['CNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Convolutional Neural Network (1D)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LinearRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['DecisionTreeRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['RandomForestRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['XGBoostRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LightGBMRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['CatBoostRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Categorical Boosting (CatBoost)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']=configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['LinearRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['Linear Regression'] configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['DecisionTree'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['Decision Tree'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['NAS'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'].get('Neural Architecture Search') configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['NAS'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'].get('Neural Architecture Search') configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['CNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Convolutional Neural Network (1D)'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineLogisticRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Logistic Regression'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineDecisionTreeClassifier'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Decision Tree Classifier'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineSoftmaxRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Softmax Regression'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineKNNClassifier'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online KNN Classifier'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineLinearRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Linear Regression'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineDecisionTreeRegressor'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Decision Tree Regressor'] configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineKNNRegressor'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online KNN Regressor'] configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] = configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] = configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] if 'llmFineTuning' in configSettingsJson['advance']: configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2'] configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2Chat'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2-Chat'] configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA2'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA-2'] configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2'] configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2Chat'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat'] configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2'] configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \ configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2'] configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2Chat'] = \ configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2-Chat'] configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA2'] = \ configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA-2'] if 'distributedlearner_config' in configSettingsJson['advance']: configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['DistributedXGBoost'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['DistributedLightGBM'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['DistributedXGBoost'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['DistributedLightGBM'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['Distributed Light Gradient Boosting (LightGBM)'] configSettingsJson['advance']['profiler']['normalizationMethod'] = 'None' normalizationtypes = configSettingsJson['advance']['profiler']['normalization'] for k in normalizationtypes.keys(): if configSettingsJson['advance']['profiler']['normalization'][k] == 'True': configSettingsJson['advance']['profiler']['normalizationMethod'] = k break context = {'temp': temp, 'advconfig': configSettingsJson, 'MachineLearningModels':MachineLearningModels,'hyperparamConfig':hyperparamConfig,'config': config, 'selected_use_case': selected_use_case, 'categorical_features': categorical_features, 'selectedDebiasingFeature': selectedDebiasingFeature, 'selectedDebiasingAlgorithm': selectedDebiasingAlgorithm, 'Class_list': class_list, 'selectedDebiasingClass': selectedDebiasingClass, #Debiasing Changes 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"obModels":obModels,"problemType":problemType, 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], 'selected': 'modeltraning','advance_help':ht.advance_help} return context except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context={'erroradvance':'Fail to load advance config Json file'} return context
aionpipelinets.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import kfp import kfp.dsl as dsl import json from pathlib import Path class aionpipelinets(): containerRegistry = str() containerLabel = str() containerSecret = str() pipelineName = 'AION MLOps Pipeline {0}' exeCmd = 'python' codeFile = 'aionCode.py' mntPoint = '/aion' inputArg = '-i' msIP = '0.0.0.0' port = '8094' cachingStrategy = 'P0D' deafultVolume = '2Gi' volName = 'aion-pvc' volMode = 'ReadWriteMany' fileExt = '.tar.gz' fileName = 'aion_mlops_pipeline_{0}' containerMM = 'modelmonitoring' containerDI = 'dataingestion' containerDT = 'datatransformation' containerFE = 'featureengineering' containerMR = 'modelregistry' containerMS = 'modelserving' containerImage = '{0}/{1}:{2}' models = {} nameSeprator = '-' modelsLiteral = 'models' modelNameLiteral = 'modelname' msTemplate = '{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "{{workflow.name}}-{0}"}, "spec": {"containers": [{"name": "{0}", "image": "{1}", "command": ["python"], "args": ["aionCode.py", "-ip", "{2}", "-pn", "{3}"],"volumeMounts": [{"name": "aion-pvc", "mountPath": "{4}"}], "ports": [{"name": "http", "containerPort": {3}, "protocol": "TCP"}]}], "imagePullSecrets": [{"name": "{5}"}], "volumes": [{"name": "aion-pvc", "persistentVolumeClaim": {"claimName": "{{workflow.name}}-{6}"}}]}}' def __init__(self, models, containerRegistry, containerLabel, containerSecret=str()): self.models = models self.containerRegistry = containerRegistry self.containerLabel = containerLabel self.containerSecret = containerSecret @dsl.pipeline( name=pipelineName.format(containerLabel), description=pipelineName.format(containerLabel), ) def aion_mlops(self, inputUri=str(), volSize=deafultVolume): vop = dsl.VolumeOp( name=self.volName + self.nameSeprator + self.containerLabel, resource_name=self.volName, modes=[self.volMode], size=volSize ) mm = dsl.ContainerOp( name=self.containerMM, image=self.containerImage.format(self.containerRegistry,self.containerMM,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, self.inputArg, inputUri, ], pvolumes={self.mntPoint: vop.volume} ) mm.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy di = dsl.ContainerOp( name=self.containerDI, image=self.containerImage.format(self.containerRegistry,self.containerDI,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: mm.pvolume} ) di.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy dt = dsl.ContainerOp( name=self.containerDT, image=self.containerImage.format(self.containerRegistry,self.containerDT,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: di.pvolume} ) dt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy fe = dsl.ContainerOp( name=self.containerFE, image=self.containerImage.format(self.containerRegistry,self.containerFE,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: dt.pvolume} ) fe.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy dictMT = {} listMTOps = [] for model in self.models[self.modelsLiteral]: modelName = model[self.modelNameLiteral] mt=dsl.ContainerOp( name=modelName, image=self.containerImage.format(self.containerRegistry,modelName,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes={self.mntPoint: fe.pvolume}) mt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy listMTOps.append(mt) dictMT[self.mntPoint]=mt.pvolume mr = dsl.ContainerOp( name=self.containerMR, image=self.containerImage.format(self.containerRegistry,self.containerMR,self.containerLabel), command=self.exeCmd, arguments=[ self.codeFile, ], pvolumes=dictMT ).after(*tuple(listMTOps)) mr.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy msJson = self.msTemplate.replace(str({0}),self.containerMS).replace(str({1}),self.containerImage.format(self.containerRegistry,self.containerMS,self.containerLabel)).replace(str({2}),self.msIP).replace(str({3}),self.port).replace(str({4}),self.mntPoint).replace(str({5}),self.containerSecret).replace(str({6}),self.volName) ms = dsl.ResourceOp( name=self.containerMS + self.nameSeprator + self.containerLabel, k8s_resource=json.loads(msJson), ) ms.after(mr) def compilepl(self, targetPath=str()): filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt if targetPath != str(): filePath = Path(targetPath, filePath) kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath)) def executepl(self, kfhost=str()): client = kfp.Client(kfhost) client.create_run_from_pipeline_func(self.aion_mlops,arguments={})
llm_textdatalabelling.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import pandas as pd import requests import re import json import sys import time from appbe.aion_config import get_llm_data from appbe.dataPath import LOG_LOCATION from appbe.log_ut import logg import logging import openai import tiktoken openai.api_key = '' openai.api_base = '' openai.api_type = '' openai.api_version = '' deployment_name="Text-Datvinci-03" def generateLabelPerRecord(OrgData): OrgData['LabelFromGPT'] = OrgData['Head_Description'].apply(lambda x: \ generate_gpt3_response\ ("I am giving you the title and short description \ in the format [Title:Description], \ give me the related low level topics in one word in the \ format[Topic: your primary topic] along with top 5 important keywords in the \ format[Keywords: keywords]'{}' ".format(x))) #Cleaning the output as it is from ChatGPT OrgData['temp1'] = OrgData['LabelFromGPT'].apply(lambda x: (x.split('Topic:')[1]).replace(']','')) OrgData['LabelFromGPT'] = OrgData['temp1'].apply(lambda x: (x.split('Keywords:')[0]).replace(']','').rstrip()) OrgData['Keywords'] = OrgData['temp1'].apply(lambda x: (x.split('Keywords:')[1]).replace(']','')) OrgData = OrgData.drop(['temp1','Head_Description'], axis=1) return OrgData def generateLabelForChunkedRecords(OrgData): import io # OrgData = OrgData.head(120) Head_Description = {"Head_Description": [] } Head_Description2 = {"Head_Description": [] } Head_Description['Head_Description'] = OrgData['Head_Description'] strt_ind = 0 brk_ind = 0 # encoding = tiktoken.get_encoding('p50k_base') encoding = tiktoken.encoding_for_model("text-davinci-003") chunks = [] _cur_token_count = 0 _chunk_token_count = 0 for ind in Head_Description['Head_Description'].index: tokenized_text = encoding.encode(Head_Description['Head_Description'][ind]) _cur_token_count = len(tokenized_text) if _cur_token_count >= 600: OrgData['Head_Description'][ind] = OrgData['Head_Description'][ind][:1000] upto_ind = ind + 1 Head_Description2['Head_Description'] = OrgData['Head_Description'][brk_ind:ind] _chunk_token_count = encoding.encode(Head_Description2['Head_Description'].to_string()) if len(_chunk_token_count) >= 1200: brk_ind = ind # print(brk_ind) chunks.append(ind-1) _start_count = 0 if len(chunks) == 0: output = generate_gpt3_response("I am giving you datatable of text records \ for each record give me the related low level topics in one word as a data column called Topic\ and important top five keywords as a data column called Keywords. \ Provide me record number as Record and these two data columns as datatable for each record in the given datatable and number of records should be equivalent to the number of records in the given datatable of text records. '{}' ".format(Head_Description['Head_Description'])) out = io.StringIO(output[2:]) df = pd.read_csv(out, sep='\t') else: chunks.append(len(Head_Description['Head_Description'])) for ind_val in chunks: _cur_ind_val = ind_val _recordsSent = 0 Head_Description = {"Head_Description": [] } if _start_count == 0: Head_Description['Head_Description'] = OrgData['Head_Description'][strt_ind:_cur_ind_val].to_string() _recordsSent = len(OrgData['Head_Description'][strt_ind:_cur_ind_val]) else: Head_Description['Head_Description'] = OrgData['Head_Description'][_pre_ind_val:_cur_ind_val].to_string() _recordsSent = len(OrgData['Head_Description'][_pre_ind_val:_cur_ind_val]) _pre_ind_val = ind_val # if _start_count <= 5: output = generate_gpt3_response("I am giving you datatable of text records \ for each record give me the related low level topics in one word as a data column called Topic\ and important top five keywords as a data column called Keywords. \ Provide me record number as Record and these two data columns as datatable for each record in the given datatable and number of records should be equivalent to the number of records in the given datatable of text records. '{}' ".format(Head_Description['Head_Description'])) out = io.StringIO(output[2:]) if _start_count == 0: df = pd.read_csv(out, sep='\t') else: df_tmp = pd.read_csv(out, sep='\t') if len(df_tmp) > _recordsSent: df_tmp = df_tmp.head(_recordsSent) # df = df.append(df_tmp, ignore_index=True) df = pd.concat([df, df_tmp], ignore_index=True) _start_count += 1 OrgData['LabelFromGPT'] = df['Topic'] OrgData['Keywords'] = df['Keywords'] OrgData = OrgData.drop(['Head_Description'], axis=1) return OrgData # Text Data Labelling using LLM related changes # -------------------------------------------------------- def generateTextLabel(request, DATA_FILE_PATH): log = logging.getLogger('log_ux') key,url,api_type,api_version = get_llm_data() openai.api_key = key openai.api_base = url openai.api_type = api_type openai.api_version = api_version try: features = request.POST.getlist('InputFeatures') datapath = request.session['textdatapath'] OrgData = pd.read_csv(datapath) # OrgData = OrgData.head(2000) OrgData.fillna("", inplace = True) OrgData['Head_Description'] = OrgData[features[0]] if (len(features) > 1): for indx in range(len(features)): if (indx > 0): OrgData['Head_Description'] = OrgData['Head_Description'] + " "+ OrgData[features[indx]] # OrgData = generateLabelPerRecord(OrgData) OrgData = generateLabelForChunkedRecords(OrgData) df = OrgData filetimestamp = str(int(time.time())) datasetName = 'AION_TextLabelled' + filetimestamp+'.csv' dataFile = os.path.join(DATA_FILE_PATH,datasetName) df.to_csv(dataFile) request.session['texttopicdatapath'] = dataFile df_json = df.to_json(orient="records") df_json = json.loads(df_json) from appbe.dataPath import DATA_DIR from appbe.sqliteUtility import sqlite_db file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') newdata = {} newdata['datapath'] = [dataFile] newdata['datasetname'] = [datasetName] sqlite_obj.write_data(pd.DataFrame.from_dict(newdata), 'dataingest') ################################################ context = {'data_topic':df_json, 'selected':'DataOperations'} return context except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() errormsg = str(e) if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg: errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.' if 'Max retries exceeded with url' in errormsg: errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.' fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context = {'error': 'Failed to communicate LLM','LLM' : 'openAI', 'selected':'DataOperations', 'errormessage':errormsg} log.info('generateTextLabel -- Error : Failed to generate Text-Label.. '+str(e)) log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return context #function to return the queried response def generate_gpt3_response(user_text, print_output=False): """ Query OpenAI GPT-3 for the specific key and get back a response :type user_text: str the user's text to query for :type print_output: boolean whether or not to print the raw output JSON """ time.sleep(2) completions = openai.Completion.create( # engine='Text-Datvinci-03', # Determines the quality, speed, and cost. engine='text-davinci-003', engine=deployment_name, # Determines the quality, speed, and cost. engine='text-davinci-003', temperature=0, # Level of creativity in the response prompt=user_text, # What the user typed in max_tokens=2000, # Maximum tokens in the prompt AND response n=1, # The number of completions to generate stop=None, # An optional setting to control response generation ) # Displaying the output can be helpful if things go wrong if print_output: print(completions) # Return the first choice's text # print(completions.choices[0].text) return completions.choices[0].text # --------------------------------------------------------
gcsbuckets.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import os import rsa import boto3 #usnish import pandas as pd import time def add_new_GCSBucket(request): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf')) with open(file_path, 'r') as f: data = json.load(f) f.close() if data == '': data = [] except: data = [] print(request.POST["aionreferencename"]) print(request.POST["serviceaccountkey"]) print(request.POST["bucketname"]) if request.POST["aionreferencename"] =='' or request.POST["serviceaccountkey"] == '' or request.POST["bucketname"] == '' : return 'error' newdata = {} newdata['Name'] = request.POST["aionreferencename"] newdata['GCSServiceAccountKey'] = request.POST["serviceaccountkey"] newdata['GCSbucketname'] = request.POST["bucketname"] data.append(newdata) with open(file_path, 'w') as f: json.dump(data, f) f.close() return 'success' def get_gcs_bucket(): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] return data def read_gcs_bucket(name,filename,DATA_FILE_PATH): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] found = False print(data) for x in data: if x['Name'] == name: GCSServiceAccountKey = x['GCSServiceAccountKey'] GCSbucketname = x['GCSbucketname'] found = True break print(found) print(name) try: if found: import io from google.cloud import storage storage_client = storage.Client.from_service_account_json(GCSServiceAccountKey) print(GCSServiceAccountKey) print(GCSbucketname) bucket = storage_client.get_bucket(GCSbucketname) blob = bucket.blob(filename) data = blob.download_as_string() df = pd.read_csv(io.BytesIO(data), encoding = 'utf-8', sep = ',',encoding_errors= 'replace') return 'Success',df except Exception as e: print(e) return 'Error', pd.DataFrame()
installPackage.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import shutil import subprocess import sys import glob from pathlib import Path import json from django.http import FileResponse from django.http import HttpResponse from importlib.metadata import version COMMON_PACKAGES = "'setuptools >=62.3.0','pandas==1.5.3','numpy==1.24.2','joblib==1.2.0','Cython==0.29.33','scipy==1.10.1',' scikit-learn==1.2.1','word2number==1.1','category_encoders==2.6.0'" DL_COMMON_PACKAGE = "'tensorflow==2.11.0'" TEXT_PACKAGES = "'spacy==3.5.0','nltk==3.8.1','textblob==0.15.3','demoji==1.1.0','bs4==0.0.1','text-unidecode==1.3','pyspellchecker==0.6.2','contractions==0.1.73','protobuf==3.19.6','lxml'" def createPackagePackage(request,id,version,usecasedetails,Existusecases): from appbe.pages import get_usecase_page #print('2') usecasedetail = usecasedetails.objects.get(id=id) models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version) modelid = models[0].id p = Existusecases.objects.get(id=modelid) deploymentfolder = str(p.DeployPath) modelname = p.ModelName.usecaseid version = p.Version deployed_code = 'AION' dockerimage = os.path.join(deploymentfolder,'publish','docker_image') dockersetup = os.path.join(deploymentfolder,'publish','docker_setup') tempPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'temp_'+modelname+'_'+str(version)) try: shutil.rmtree(tempPath,ignore_errors=True) except: pass shutil.copytree(deploymentfolder,tempPath) shutil.rmtree(os.path.join(tempPath,'publish'), ignore_errors=True) try: Path(os.path.join(deploymentfolder,'publish')).mkdir(parents=True, exist_ok=True) os.mkdir(dockersetup) except: shutil.rmtree(dockersetup,ignore_errors=True) os.mkdir(dockersetup) try: os.mkdir(dockerimage) except: shutil.rmtree(dockerimage,ignore_errors=True) os.mkdir(dockerimage) shutil.copytree(tempPath, os.path.join(dockersetup,deployed_code)) shutil.rmtree(tempPath) docker_setup = os.path.join(dockersetup,'AION') try: os.mkdir(dockerimage) except: pass requirementfilename = os.path.join(dockersetup,'requirements.txt') installfilename = os.path.join(dockersetup,'install.py') dockerfile = os.path.join(dockersetup,'Dockerfile') dockerdata='FROM python:3.10-slim-buster' dockerdata+='\n' dockerdata+='WORKDIR /app' dockerdata+='\n' dockerdata+='COPY AION AION' dockerdata+='\n' dockerdata+='''RUN apt-get update \ && apt-get install -y build-essential manpages-dev \ && apt-get install -y libgomp1 \ && python -m pip install --no-cache-dir -r AION/requirements.txt ''' f = open(dockerfile, "w") f.write(str(dockerdata)) f.close() try: try: import docker client = docker.from_env() client.containers.list() except: status,context,action = get_usecase_page(request,usecasedetails,Existusecases) context['Status'] = 'Error' context['Msg'] = 'Docker should be installed and running on your machine. To build the docker image manually, the setup script is available at the following location: \\n'+dockersetup.replace('\\', '/') return context command = 'docker pull python:3.10-slim-buster' os.system(command); subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+str(version),"."], cwd=dockersetup) subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+str(version)+".tar",modelname.lower()+":"+str(version)], cwd=dockersetup) dockerfilepath = os.path.join(dockersetup,modelname.lower()+"_"+str(version)+".tar") shutil.copyfile(dockerfilepath, os.path.join(dockerimage,modelname.lower()+"_"+str(version)+".tar")) shutil.rmtree(dockersetup) msg = 'Done' Status = 'SUCCESS' except Exception as e: msg = 'Error in docker images creation. To build manually docker image setup available in following location: '+dockersetup.replace('\\', '\\\\') Status = 'Fail' status,context,action = get_usecase_page(request,usecasedetails,Existusecases) context['Status'] = Status context['Msg'] = msg return context def downloadPackage(request,id,version,usecasedetails,Existusecases): try: if 'downloadstatus' in request.session: if request.session['downloadstatus'] == 'Downloading': return HttpResponse(json.dumps("Error Creating Package"), content_type="application/error") request.session['downloadstatus'] = 'Downloading' usecasedetail = usecasedetails.objects.get(id=id) models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version) modelid = models[0].id p = Existusecases.objects.get(id=modelid) deployPath = str(p.DeployPath) if os.path.isdir(os.path.join(deployPath,'publish','package')): for f in os.listdir(os.path.join(deployPath,'publish','package')): if f.endswith('whl'): os.remove(os.path.join(deployPath,'publish','package',f)) usecasename = p.ModelName.usecaseid Version = p.Version deployed_code = usecasename targetname = usecasename+'_'+str(Version) whl_dir_name = 'WHEEL_'+usecasename+'_'+str(Version) deployLocation = os.path.join (deployPath,'..',whl_dir_name) try: os.makedirs(deployLocation) except OSError as e: shutil.rmtree(deployLocation) os.makedirs(deployLocation) shutil.copytree(deployPath,os.path.join(deployLocation,deployed_code)) initstring = 'import os' initstring += '\n' initstring += 'import sys' initstring += '\n' initstring += 'sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))' filename = os.path.join(deployLocation,deployed_code,'__init__.py') f = open(filename, "w") f.write(str(initstring)) f.close() textdata=0 learner_type = 'ml' requirementfile = os.path.join(deployPath,'requirements.txt') install_requires = '' if os.path.exists(requirementfile): fileobj = open(requirementfile, 'r') requirePackages = fileobj.readlines() fileobj.close() for package in requirePackages: if install_requires != '': install_requires = install_requires+',' install_requires = install_requires+'\''+package.strip()+'\'' setup_string = 'from setuptools import setup,find_packages' setup_string += '\n' setup_string += 'setup(name=\''+deployed_code+'\',' setup_string += '\n' setup_string += 'version=\'1\',' setup_string += '\n' setup_string += 'packages = find_packages(),' setup_string += '\n' setup_string += 'install_requires = ['+install_requires+'],' setup_string += '\n' setup_string += 'package_data={"'+deployed_code+'.pytransform":["*.*"],"'+deployed_code+'":["*.sav","*.json"],"":["*","*/*","*/*/*"]}' setup_string += '\n' setup_string += ')' filename = os.path.join(deployLocation,'setup.py') f = open(filename, "w") f.write(str(setup_string)) f.close() subprocess.check_call([sys.executable, "setup.py", "bdist_wheel"], cwd=deployLocation) shutil.copytree(os.path.join(deployLocation,'dist'),os.path.join(deployPath,'publish','package'),dirs_exist_ok=True) shutil.rmtree(deployLocation) if os.path.isdir(os.path.join(deployPath,'publish','package')): for f in os.listdir(os.path.join(deployPath,'publish','package')): if f.endswith('whl'): package = f zip_file = open(os.path.join(deployPath,'publish','package',package), 'rb') request.session['downloadstatus'] = 'Done' return FileResponse(zip_file) except Exception as e: print(e) request.session['downloadstatus'] = 'Done' return HttpResponse(json.dumps("Error Creating Package"), content_type="application/error") def installPackage(model,version,deployedPath): deployedPath = os.path.join(deployedPath,'publish','package') whlfilename='na' if os.path.isdir(deployedPath): for file in os.listdir(deployedPath): if file.endswith(".whl"): whlfilename = os.path.join(deployedPath,file) if whlfilename != 'na': subprocess.check_call([sys.executable, "-m", "pip", "uninstall","-y",model]) subprocess.check_call([sys.executable, "-m", "pip", "install","--no-dependencies",whlfilename]) status,pid,ip,port = checkModelServiceRunning(model) if status == 'Running': stopService(pid) startService(model,ip,port) return('Success') else: return('Installation Package not Found') def getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases): usecasedetail = usecasedetails.objects.get(id=id) models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version) return(models[0].id) def stopService(pid): import psutil p = psutil.Process(int(pid)) p.terminate() def checkModelServiceRunning(package_name): from os.path import expanduser home = expanduser("~") if platform.system() == 'Windows': modelServices = os.path.join(home,'AppData','Local','HCLT','AION','services') else: modelServices = os.path.join(home,'HCLT','AION','target','services') filename = package_name+'_service.py' modelservicefile = os.path.join(modelServices,filename) status = 'Not Initialized' ip = '' port = '' pid = '' if os.path.exists(modelservicefile): status = 'Not Running' import psutil for proc in psutil.process_iter(): pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections']) if 'python' in pinfo['name']: if filename in pinfo['cmdline'][1]: status = 'Running' pid = pinfo['pid'] for x in pinfo['connections']: ip = x.laddr.ip port = x.laddr.port return(status,pid,ip,port) def startService(package_name,ip,portNo): file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','bin','model_service.py')) from os.path import expanduser home = expanduser("~") if platform.system() == 'Windows': modelServices = os.path.join(home,'AppData','Local','HCLT','AION','services') else: modelServices = os.path.join(home,'HCLT','AION','target','services') if not os.path.isdir(modelServices): os.makedirs(modelServices) filename = package_name+'_service.py' modelservicefile = os.path.join(modelServices,filename) status = 'File Not Exist' if os.path.exists(modelservicefile): status = 'File Exist' r = ([line.split() for line in subprocess.check_output("tasklist").splitlines()]) for i in range(len(r)): if filename in r[i]: status = 'Running' if status == 'File Not Exist': shutil.copy(file_path,modelservicefile) with open(modelservicefile, 'r+') as file: content = file.read() file.seek(0, 0) line = 'from '+package_name+' import aion_performance' file.write(line+"\n") line = 'from '+package_name+' import aion_drift' file.write(line+ "\n") line = 'from '+package_name+' import featureslist' file.write(line+ "\n") line = 'from '+package_name+' import aion_prediction' file.write(line+ "\n") file.write(content) file.close() status = 'File Exist' if status == 'File Exist': command = "python "+modelservicefile+' '+str(portNo)+' '+str(ip) os.system('start cmd /c "'+command+'"') def checkInstalledPackge(package_name): import importlib.util spec = importlib.util.find_spec(package_name) if spec is None: return('Not Installed','','') else: if len(spec.submodule_search_locations) > 0: displaypath = os.path.join(spec.submodule_search_locations[0],'etc','display.json') with open(displaypath) as file: config = json.load(file) file.close() if 'usecasename' in config: modelName = config['usecasename'] else: modelName = 'NA' if 'version' in config: version = config['version'] else: version = 'NA' return('Installed',modelName,version)
leaderboard.py
import pandas as pd import numpy as np def get_leaderboard(file_content): matched_lines = [line.replace('Model:-', '') for line in file_content.split('\n') if "Model:-" in line] df = pd.DataFrame(columns = ['Model', 'Iterations', 'Score (%)', 'Score Type', 'Best Score (%)']) import re try: for line in matched_lines: if 'Model Name::' in line: MODEL = line.split('::') model = MODEL[1] if 'ScoringType::' in line: S = line.split('::') #SC = ScorTyp[1] if 'make_scorer'in line: ST = line.split('make_scorer') ScorTyp = ST[1] df['Score Type'] = np.where(df['Model'] == model, ScorTyp,df['Score Type']) if 'Validation Score::' in line: BS = line.split('::') BestSc = round(float(BS[1]), 4)*100 BestSc = abs(BestSc) df['Best Score (%)'] = np.where(df['Model'] == model, BestSc, df['Best Score (%)']) if 'Iteration::' in line: l = line.split('::') word = re.findall(r'\[(.*?)\]', l[1]) if ';, score=' in line: sc = line.split('score=') SCR = sc[1].split(' ') Score = round(float(SCR[0]), 4)*100 Score = abs(Score) # df = df.concat({'Model': model, 'Iterations': word,'Score (%)': Scor,'Score Type': '', 'Best Score (%)': 0}, ignore_index=True) newdf = pd.DataFrame([{'Model': model, 'Iterations': word,'Score (%)': Score,'Score Type': '', 'Best Score (%)': 0}]) df = pd.concat([df,newdf],axis=0, ignore_index=True) LIST = [] for i in range(int(len(df['Score (%)'])/5)): l = (sum(df['Score (%)'][5*i:5*(i+1)])/5) #LIST.concat(l) LIST.append(l) for i in range(len(LIST)): df['Score (%)'][5*i:5*(i+1)]=LIST[i] CL = [line.replace('------->Type of Model :classification', 'Model :classification') for line in file_content.split('\n') if "------->Type of Model :classification" in line] for l in CL: if 'Model :classification' in l: df = df.sort_values(by = ['Best Score (%)'], ascending=False) RE = [line.replace('------->Type of Model :regression', 'Model :regression') for line in file_content.split('\n') if "------->Type of Model :regression" in line] for l in RE: if 'Model :regression' in l: df = df.sort_values(by = ['Best Score (%)']) except Exception as e: print(e) return df if __name__ == "__main__": file_path = r"C:\Users\richard.mochahari\AppData\Local\Programs\HCLTech\AION\data\target\AI0335\1\log\model_training_logs.log" my_file = open(file_path, 'r') file_content = my_file.read() my_file.close() print(get_leaderboard(file_content))
publishDataBase.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
encryptedPackage.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os.path import time import subprocess import sys from appbe.aion_config import kafka_setting from appbe.aion_config import running_setting from appbe import installPackage from appbe import compute from appbe.models import getusercasestatus import json import pandas as pd import ntpath import shutil import platform from pathlib import Path from appbe.dataPath import DATA_DIR LOG_FILE_PATH = os.path.join(DATA_DIR,'logs') def encrptpackage_command(request,Existusecases,usecasedetails): command = request.POST.get('encryptedsubmit') kafkaSetting = kafka_setting() ruuningSetting = running_setting() computeinfrastructure = compute.readComputeConfig() modelID = request.POST.get('modelID') p = Existusecases.objects.get(id=modelID) usecasename = p.ModelName.UsecaseName usecaseid = p.ModelName.usecaseid runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename) installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename) try: tacking_url =request.get_host() except Exception as e: tacking_url = '127.0.0.1' usecasedetail = usecasedetails.objects.get(id=p.ModelName.id) usecase = usecasedetails.objects.all() models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS') for model in models: model.scoringCreteria = 'NA' model.score = 'NA' model.deploymodel = 'NA' if os.path.isdir(str(model.DeployPath)): modelPath = os.path.join(str(model.DeployPath),'etc','output.json') try: with open(modelPath) as file: outputconfig = json.load(file) file.close() if outputconfig['status'] == 'SUCCESS': model.scoringCreteria = outputconfig['data']['ScoreType'] model.score = outputconfig['data']['BestScore'] model.deploymodel = outputconfig['data']['BestModel'] model.modelType = outputconfig['data']['ModelType'] model.maacsupport = 'True' model.flserversupport = 'False' supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"] if model.deploymodel in supportedmodels: model.flserversupport = 'True' else: model.flserversupport = 'False' supportedmodels = ["Logistic Regression", "Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge","Extreme Gradient Boosting (XGBoost)","Light Gradient Boosting (LightGBM)","Categorical Boosting (CatBoost)"] if model.deploymodel in supportedmodels: model.maacsupport = 'True' else: model.maacsupport = 'False' supportedmodels = ["Extreme Gradient Boosting (XGBoost)"] if model.deploymodel in supportedmodels: model.encryptionsupport = 'True' else: model.encryptionsupport = 'False' except Exception as e: pass if command.lower() == 'secureclient': try: encryptedclient = os.path.join(str(p.DeployPath),'publish','SecureClient') shutil.rmtree(encryptedclient, ignore_errors=True) logPath = os.path.join(encryptedclient,'logs') scriptPath = os.path.join(encryptedclient,'script') modelPath = os.path.join(encryptedclient,'model') Path(modelPath).mkdir(parents=True, exist_ok=True) Path(encryptedclient).mkdir(parents=True, exist_ok=True) Path(logPath).mkdir(parents=True, exist_ok=True) Path(scriptPath).mkdir(parents=True, exist_ok=True) encryptedclientOrg = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','utilities','encryptedPackage')) modelProfiler = os.path.normpath(os.path.join(str(p.DeployPath),'script','inputprofiler.py')) modelselector = os.path.normpath(os.path.join(str(p.DeployPath),'aion_predict.py')) preprocessmodel = os.path.normpath(os.path.join(str(p.DeployPath),'model','preprocess_pipe.pkl')) # shutil.copy2(modelProfiler,scriptPath) # shutil.copy2(modelselector,scriptPath) ## For bug 15975 if os.path.exists(modelProfiler): shutil.copy2(modelProfiler,scriptPath) if os.path.exists(modelselector): shutil.copy2(modelselector,scriptPath) if os.path.exists(preprocessmodel): shutil.copy2(preprocessmodel,modelPath) if model.modelType.lower() == 'classification': try: opfile = os.path.normpath(os.path.join(encryptedclientOrg,'Readme.txt')) shutil.copy2(opfile,encryptedclient) opfile = os.path.normpath(os.path.join(encryptedclientOrg,'requirements.txt')) shutil.copy2(opfile,encryptedclient) except: #failed to copy readme,requirements.txt files pass opfile = os.path.normpath(os.path.join(encryptedclientOrg,'client','heMulticlass.py')) shutil.copy2(opfile,scriptPath) opfile = os.path.normpath(os.path.join(encryptedclientOrg,'client','aion_hemulticlient.py')) shutil.copy2(opfile,encryptedclient) os.rename(os.path.join(encryptedclient,'aion_hemulticlient.py'),os.path.join(encryptedclient,'aion_sclient.py')) elif model.modelType.lower() == 'regression': try: opfile = os.path.normpath(os.path.join(encryptedclientOrg,'Readme.txt')) shutil.copy2(opfile,encryptedclient) opfile = os.path.normpath(os.path.join(encryptedclientOrg,'requirements.txt')) shutil.copy2(opfile,encryptedclient) except Exception as e: print(e) #failed to copy readme,requirements.txt files pass opfile = os.path.normpath(os.path.join(encryptedclientOrg,'client','heRegression.py')) shutil.copy2(opfile,scriptPath) opfile = os.path.normpath(os.path.join(encryptedclientOrg,'client','aion_heregressionclient.py')) shutil.copy2(opfile,encryptedclient) os.rename(os.path.join(encryptedclient,'aion_hemulticlient.py'),os.path.join(encryptedclient,'aion_sclient.py')) except Exception as e: Status = 'Error' Msg = 'Secure client error: Check log file for more details' Status = 'SUCCESS' Msg = 'Secure Client Code Generated at '+encryptedclient path= encryptedclient #Task 9981 elif command.lower() == 'secureserver': try: configPath = os.path.join(str(p.DeployPath),'etc','secure_config.json') modelpath = usecasename+'_'+str(p.Version)+'.sav' config = {'model_name':modelpath} with open(configPath, "w") as outfile: json.dump(config, outfile) encryptedclientOrg = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','utilities','encryptedPackage')) if model.modelType.lower() == 'classification': opfile = os.path.normpath(os.path.join(encryptedclientOrg,'server','heMulticlass.py')) shutil.copy2(opfile,str(p.DeployPath)) try: os.remove(os.path.join(str(p.DeployPath),'aion_spredict.py')) except OSError: pass os.rename(os.path.join(str(p.DeployPath),'heMulticlass.py'),os.path.join(str(p.DeployPath),'aion_spredict.py')) Status = 'SUCCESS' Msg = 'Secure rest end point enabled http://'+str(tacking_url)+'/api/spredict?usecaseid='+usecaseid+'&version='+str(p.Version) except Exception as e: Status = 'Error' Msg = 'Secure rest end point error: Check log file for more details' nouc = 0 from appbe.pages import get_usecase_page status,context,action = get_usecase_page(request,usecasedetails,Existusecases) context['Status'] = Status context['Msg'] = Msg if command.lower() == 'secureclient': #Task 9981 context['path'] = path ''' selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) context = {'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'pid':pid,'ip':ip,'port':port,'usecaseid':p.ModelName.id,'Status':Status,'Msg':Msg} ''' return(context) def download_sclient(request,context): #Task 9981 import os from django.http import HttpResponse, Http404 try: file_name = 'SecureClient_'+request.POST.get('modelsignature') path = context['path'] file_path = shutil.make_archive(file_name, 'zip', path) if os.path.exists(file_path): with open(file_path, 'rb') as fh: response = HttpResponse(fh.read(),content_type='application/x-zip-compressed') response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path) os.remove(file_path) return response except: raise Http404
labels.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os from pathlib import Path def label_filename(request): filename = 'LabeledData.csv' labelPath = os.path.join(request.session['datalocation'],'AION','Labels') Path(labelPath).mkdir(parents=True, exist_ok=True) filePath = os.path.join(labelPath,filename) return filePath
checkConfiguration.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import pandas as pd def get_true_option(d, default_value=None): if isinstance(d, dict): for k, v in d.items(): if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): return k return default_value def get_true_options(d): options = [] if isinstance(d, dict): for k, v in d.items(): if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): options.append(k) return options def check_datetime(config): dateTime = config['basic']['dateTimeFeature'] if dateTime == '' or dateTime.lower()=='na': return False return True def check_dtype(d): flag= 1 for item in d: if item["type"].lower() != "text" and item["type"].lower() != "index": flag = 0 break return flag def check_text(d): #task 12627 flag= 0 for item in d: if item["type"].lower() == "text": flag = 1 break return flag def check_labelencoding(ftr_dict_list, target_ftr): for ftr_dict in ftr_dict_list: if ftr_dict['feature']!=target_ftr and ftr_dict['type'].lower()=='categorical' and ftr_dict['categoryEncoding'].lower()!='labelencoding': return False return True class timeseries(): def __init__(self,config): self.config=config if self.config['basic']['analysisType']['timeSeriesForecasting'].lower()=='true': #task 11997 self.problemType = 'timeSeriesForecasting' elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true': self.problemType = 'timeSeriesAnomalyDetection' #task 11997 def validate_basic_config(self,status='pass',msg=None): #task 12627 date_time_status = check_datetime(self.config) text_status = check_text(self.config['advance']['profiler']['featureDict']) if not date_time_status and text_status: msg = 'For time series problem,\\n* One feature should be in datetime format\\n* Text feature not supported ' return 'error', msg elif not date_time_status: msg = 'For time series problem, one feature should be in datetime format' return 'error', msg elif text_status: msg = 'For time series problem, text feature not supported ' return 'error', msg selected_algos = get_true_options(self.config['basic']['algorithms'][self.problemType]) #task 11997 if isinstance(self.config['basic']['targetFeature'],str): targetFeature = list(self.config['basic']['targetFeature'].split(',')) if self.problemType=='timeSeriesForecasting': #task 11997 if len(targetFeature) > 1: if 'ARIMA' in selected_algos: status = 'error' msg = "ARIMA is not supported for multilabel (target) feature" return status, msg if "FBPROPHET" in selected_algos: status = 'error' msg = "FBPROPHET is not supported for multiLabel (target) feature" return status, msg if 'MLP' in selected_algos: status = 'error' msg = "MLP is not supported for multiLabel (target) feature" return status, msg if len(targetFeature) == 1 and 'VAR' in selected_algos: status = 'error' msg = "VAR is not supported for singleLabel (target) feature" return status, msg elif self.problemType=='timeSeriesAnomalyDetection': #task 11997 anomChecker = anomaly(self.config) status, msg = anomChecker.validate_basic_config() return status, msg class anomaly(): def __init__(self,config): self.config = config if self.config['basic']['analysisType']['anomalyDetection'].lower()=='true': #task 11997 self.problemType = 'anomalyDetection' elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true': #task 11997 self.problemType = 'timeSeriesAnomalyDetection' def validate_basic_config(self,status='pass',msg=None): #task 12627 date_time_status = check_datetime(self.config) targetFeature = self.config['basic']['targetFeature'] if self.problemType=='anomalyDetection' and date_time_status: status = 'error' msg = 'Date feature detected. For anomaly detection on time series change problem type to Time Series Anomaly Detection or drop Date feature' return status, msg if targetFeature.lower()!= 'na' and targetFeature!= "" and self.config['basic']['inlierLabels'] == '': status = 'error' msg = 'Please provide inlier label in case of supervised anomaly detection' return status, msg class survival(): def __init__(self,config): self.config = config self.problemType= 'survivalAnalysis' def validate_basic_config(self): dateTimeStatus = check_datetime(self.config) labelencoding_status = check_labelencoding(self.config['advance']['profiler']['featureDict'], self.config['basic']['targetFeature']) if not dateTimeStatus and not labelencoding_status: msg = 'For survival analysis problem,\\n* One feature should be in datetime format\\n* Encoding of categorical features should be of label encoding ' return 'error', msg elif not dateTimeStatus: msg = 'One feature should be in datetime format for survival analysis problem. Please select it from model feature' return 'error', msg elif not labelencoding_status: msg = 'Categorical features are expected to be label encoded for survival analysis problem. Please select it from feature encoding' return 'error', msg else: return 'pass', " " class associationrule(): def __init__(self,config): self.config=config def validate_basic_config(self,status='pass', msg=None): if self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == 'na' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == 'na': return "error","Make sure to configure invoice feature and item feature" elif self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] == self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']: return "error","Make sure to invoice feature and item feature is configure correctly" else: return "pass", " " class itemrating(): #task 6081 def __init__(self,config): self.config = config def validate_basic_config(self): data_loc = self.config['basic']['dataLocation'] data_length = len(pd.read_csv(data_loc)) if data_length >= 1000000: return 'error', "Recommender System can handle data up to 1 million records. Please try with a smaller dataset." else: return "pass"," " class documentsimilarity(): def __init__(self,config): self.config=config def validate_basic_config(self,status='pass', msg=None): flag = check_dtype(self.config['advance']['profiler']['featureDict']) if flag == 1: return "pass", " " else: msg="Make sure to change the feature type from Categorical to Text and drop Numerical features for document similarity" return "error", msg def validate(config): try: problem_type = get_true_option(config['basic']['analysisType']) status = 'pass' msg = '' if 'timeseries' in problem_type.lower(): #task 11997 obj = timeseries(config) elif problem_type.lower() == 'survivalanalysis': obj = survival(config) elif problem_type.lower() == 'anomalydetection': obj = anomaly(config) elif problem_type.lower() in ['similarityidentification','contextualsearch']: obj = documentsimilarity(config) elif problem_type.lower() == 'recommendersystem': if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'].lower() == 'true': obj = associationrule(config) elif config['basic']['algorithms']['recommenderSystem']['ItemRating'].lower() == 'true': #task 6081 obj = itemrating(config) else: return 'pass',"" else: return 'pass',"" status,msg= obj.validate_basic_config() print(status, msg, 'io') return(status,msg) except Exception as e: print(e) def start_check(config): return validate(config)
service_url.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys def read_service_url_params(request): hosturl =request.get_host() url='http://'+hosturl+'/api/' return url def read_monitoring_service_url_params(request): hosturl =request.get_host() file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) file = open(file_path, "r") data = file.read() file.close() service_url = '127.0.0.1' service_port='60050' for line in data.splitlines(): if 'aion_service_url=' in line: service_url= line.split('=',1)[1] if 'aion_service_port=' in line: service_port= line.split('=',1)[1] url='http://'+hosturl+'/api/' return url def read_performance_service_url_params(request): hosturl =request.get_host() file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) file = open(file_path, "r") data = file.read() file.close() service_url = '127.0.0.1' service_port='60050' for line in data.splitlines(): if 'aion_service_url=' in line: service_url= line.split('=',1)[1] if 'aion_service_port=' in line: service_port= line.split('=',1)[1] url='http://'+hosturl+'/api/' return url def read_pattern_anomaly_url_params(request): hosturl =request.get_host() file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) file = open(file_path, "r") data = file.read() file.close() service_url = '127.0.0.1' service_port='60050' for line in data.splitlines(): if 'aion_service_url=' in line: service_url= line.split('=',1)[1] if 'aion_service_port=' in line: service_port= line.split('=',1)[1] url='http://'+hosturl+'/api/pattern_anomaly_predict/' return url def read_pattern_anomaly_setting_url_params(request): hosturl =request.get_host() file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config')) file = open(file_path, "r") data = file.read() file.close() service_url = '127.0.0.1' service_port='60050' for line in data.splitlines(): if 'aion_service_url=' in line: service_url= line.split('=',1)[1] if 'aion_service_port=' in line: service_port= line.split('=',1)[1] url='http://'+hosturl+'/api/pattern_anomaly_settings/' return url
data_io.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' #Standard Library modules import sqlite3 import pandas as pd from pathlib import Path class sqlite_writer(): def __init__(self, target_path): self.target_path = Path(target_path) database_file = self.target_path.stem + '.db' self.db = sqlite_db(self.target_path, database_file) def file_exists(self, file): if file: return self.db.table_exists(file) else: return False def read(self, file): return self.db.read(file) def write(self, data, file): self.db.write(data, file) def close(self): self.db.close() class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem + '.db' db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() self.tables = [] def table_exists(self, name): if name in self.tables: return True elif name: query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() if len(listOfTables) > 0 : self.tables.append(name) return True return False def read(self, table_name,condition=''): if condition == '': return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) else: return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def update(self,table_name,updates,condition): update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' self.cursor.execute(update_query) self.conn.commit() return True def write(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def delete(self, name): pass def close(self): self.conn.close()
help_Text.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' # def exploratorory_help(): # # # # return (data_overview_tip, feature_importance_tip, correlation_analysis_tip, exploratory_analysis_tip, data_deep_drive_tip, drift_tip) drift_tip = 'A data distribution represents a list of all of the possible values of each of the variables as provided in the data. Based on how the data values are distributed, it can be mapped to some well-known distribution curves so that the nature of the distribution can be shown.' data_overview_tip = 'Data Overview give users a quick understanding of the distribution of values across the features and provides summary statistics of the features. It helps to uncover several uncommon and common issues such as unexpected feature values, missing feature values and data skew.' timeseries_analysis_tip = "Time Series Analysis provides information about the stationarity and seasonality of each of the features in the ingested data." feature_importance_tip = 'Feature Importance provides a features and grades the features on a scale of relative importance' correlation_analysis_tip = 'Correlation Analysis provides the strength of relationships among various features. Values range from 0 (least correlation) to 1 (highest correlation). A high correlation means that two or more variables have a strong relationship with each other, while a weak correlation means that the variables are hardly related.' exploratory_analysis_tip = 'This provides an unsupervised clustering view of the data and provides insights on how the data is distributed. It helps profile the attributes of different clusters and gives insight into underlying patterns of different clusters and find similarities in the data points.' data_deep_drive_tip = 'Data Deep Dive provides an interactive interface for exploring the relationship between data points across all the different features of a dataset. Each individual item in the visualization represents a data point. Data can be grouped and binned in multiple dimensions based on their feature values.' pair_graph_tip = 'It is used to present the correlations between two selected features.' fair_metrics_tip = 'It provides interface to detect the bias in data associated with a sensitive or protected attribute and used for training.' hopkins_tip =['Since the value is in between (0.0, 0.3), it indicates that the data has a high tendency to cluster.','Since the value is around 0.5, it indicates that the data distriution is random.','Since the value is in between (0.7, 0.99), it indicates that the data is regularly spaced.'] basic_help={'RowFiltering':'You can easily filter rows based on whether the column match a condition or not'} advance_help = {'NumericFillMethod':'This is used to handle the null values present in the numerical dataset.','NumericFillMethod_Median':'Replace with middle value of the data set. Efficient and not affected by outliers.','NumericFillMethod_Mean':'Replace with average value of the columns. Affected by outliers.','NumericFillMethod_Max':'Replace all nulls with maximum value in the column.','NumericFillMethod_KNN':'This implements KNN algorithm to replace the null','NumericFillMethod_Zero':'Replace the null with 0 value','NumericFillMethod_Drop':'To remove all the null values in the dataset','NumericFillMethod_Min':'Replace all null with minimum value present in the column','CategoricalFillMethod':'This is used to handle the null values present in the categorical dataset.','CategoricalFillMethod_Mode':'Replace with most common values in the dataset. Suggested for categorical columns.','CategoricalFillMethod_Zero':'Replace the null with 0 value.','CategoricalFillMethod_KNN':'This implements KNN algorithm to replace the null','CategoricalFillMethod_Drop':'To remove all the null values in the dataset.','OutlierDetection':'An unusual data point that differs significantly from other data points.','OutlierDetection_IQR':'Identifying the outliers with interquatile range by dividing the data into quartiles.','OutlierDetection_Zscore':'If the z score of a data point is more than 3, it indicates that the data point is an outlier.','OutlierDetection_Isolation':'Randomly sub-sampled data is processed in a tree structure based on randomly selected features.','MissValueRatio':'Permitted Missing Value Ratio i.e., Number of missing values by total number of obervation. If the number of missing value in a columns is more than ratio than the columns will be assumped as empty column','NumericFeatureRatio':'In case column is mix of number and text value. If the number of numeric columns to number of rows ratio is greator than the value mentioned it is assumed as numeric columns and remaining rows which have text values will be removed','NormalStandard':'Standardize features by removing the mean and scaling to unit variance.','NormalMinMax':'This scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.','NormalLogNormal':'When a feature does not follow a linear distributio, that helps minimize skewness and map any distribution to a normal one as close as possible.','RemoveNoise':'Used to remove the noise present in the text data. Noise like special characters, unicode, emojis, hyperlinks,hashtags, html parameters etc.','ExpandContractions':'Contractions are words or combinations of words that are shortened by dropping letters and replacing them by an apostrophe.','Normalize':'Normalization is the process of converting a token into its base form. In the normalization process, the inflectional form of a word is removed so that the base form can be obtained.','Lemmatization':'It is a more effective option than stemming because it converts the word into its root word, rather than just stripping the suffices.','Stemming':'It refers to the removal of suffices, like ing,ly,s etc. by a simple rule-based approach.','NGrams':'The combination of multiple words used together.','PosTags':'The process of classifying words into their parts of speech and labeling them accordingly is known as part-of-speech tagging, or simply POS-tagging.','FeatureSelection':'Feature selection is for filtering irrelevant or redundant features from your dataset. The key difference between feature selection and extraction is that feature selection keeps a subset of the original features while feature extraction creates brand new ones.','FeatureEngineering':'Feature extraction is for creating a new, smaller set of features that stills captures most of the useful information. Again, feature selection keeps a subset of the original features while feature extraction creates new ones.','PCA':'Principle Component Analysis (PCA) is a common feature extraction method in data science. Technically, PCA finds the eigenvectors of a covariance matrix with the highest eigenvalues and then uses those to project the data into a new subspace of equal or less dimensions.','StatisticalBased':'Features are selected on the basis of statistics measures. This method does not depend on the learning algorithm and chooses the features as a pre-processing step. The filter method filters out the irrelevant feature and redundant columns from the model by using different metrics through ranking.','ModelBased':'Different tree-based methods of feature selection help us with feature importance to provide a way of selecting features. Here, feature importance specifies which feature has more importance in model building or has a great impact on the target variable.','CorrelationThreshold':'Correlation Threshold for Statistican Based Feature Selection. Correlation relation analysis done on input features vs target feature and features having correlation value grather then threshold picks for training','PValue':'P Value again for Statistical Based Feature Selection','Variance':'For Feature Selection, features should have higher variance from threshold.','Normalization':'The goal of normalization is to change the values of numeric columns in the dataset to use a common scale , without distoring differences in the ranges of values or losing information.','SVD':'The singular value decomposition (SVD) provides another way to factorize a matrix, into singular vectors and singular values. The SVD allows us to discover some of the same kind of information as the eigendecomposition.','ReplaceAcro':'Replace any abrivations into its full form Eg:{"DM":"DirectMessage"}', 'Factoranalysis':' This algorithm creates factors from the observed variables to represent the common variance i.e. variance due to correlation among the observed variables.','ICA':'ICA stands for Independent Components Analysis and it is a linear dimension reduction method, which transforms the dataset into columns of independent components.','optimizationmethod':'Optimization is the process where we train the model iteratively that results in a maximum and minimum function evaluation.','Random':'Random search is a method in which random combinations of hyperparameters are selected and used to train a model. The best random hyperparameter combinations are used. Random search bears some similarity to grid search.','Grid':'Grid search is essentially an optimization algorithm which lets to select the best parameters for your optimization problemfrom a list of parameter options that provided, hence automating the trial-and-error method.','Bays':'Bayesian optimisation in turn takes into account past evaluations when choosing the hyperparameter set to evaluate next. This approach typically requires less iterations to get to the optimal set of hyperparameter values.','Stopwords':'Stop words are commonly eliminated which are commonly used that they carry very little useful information. They are passed in a list ["Stopword1","Stopword2"]','Tokenization':'It is essentially splitting a phrase, sentence, paragraph, or an entire text document into smaller units, such as individual words or terms. Choose the library for tokenization','Lemma':'In lemmatization, the transformation uses a dictionary to map different variants of a word back to its root format.','Stopwords1':'Stop words are commonly eliminated which are commonly used that they carry very little useful information.Select from the below library to remove them', 'Genetic':'The genetic algorithm repeatedly modifies a population of individual solutions. At each step, the genetic algorithm selects individuals at random from the current population to be parents and uses them to produce the children for the next generation. Over successive generations, the population evolves toward an optimal solution.','CV':'Cross-validation is a resampling procedure used to evaluate machine learning models on a limited data sample. The procedure has a single parameter called k that refers to the number of groups that a given data sample is to be split into.','Ensemble':'Ensemble learning is a general meta approach to machine learning that seeks better predictive performance by combining the predictions from multiple models.','EnsembleStatus':'Enable or disable according to the preference','TargetEncoding':'Target encoding is the process of replacing a categorical value with the mean of the target variable','OneHotEndoding':'Encode categorical features as a one-hot numeric array.','LabelEncoding':'Encode target labels with value between 0 and n_classes-1.','SMCStrategy':'A most_frequent model - The default. In regression the prediction is equal to the mean value, in classification the prediction is equal to the most common value.\n A uniform model - In regression, selects a random value from the y range. In classification, selects one of the labels by random.\n A stratified model - Draws the prediction from the distribution of the labels in the train.\n A tree model - Trains a simple decision tree with a given depth. The depth can be customized using the max_depth parameter.','SMCGain':'The gain is calculated as:\ngain = (model score - simple score)/(perfect score - simple score)','SMCTreeDepth':'the max depth of the tree (used only if simple model type is tree).','MIcondition':'Measure model average inference time (in seconds) per sample'}
create_dummy_dataset.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import random import string from sklearn import datasets import pandas as pd import names # pip install names import time import numpy as np import argparse import json import os import platform import time import sys from appbe.dataPath import CONFIG_FILE_PATH def randStr(chars = 'XYZABCDE', N=2): return ''.join(random.choice(chars) for _ in range(N)) def load_json_config(file): with open(file, 'r') as openfile: json_object = json.load(openfile) for key, value in json_object.items(): print(key, value) return json_object def gen_data_classification(number_samples=10000, number_numerical_features=25, file_name='file_class.csv', number_categorical_features=2, number_text_features=2, missing_proportion=0.1, number_informative=20, number_class=2, weights=[0.5,0.5], shift=0.0, value_range_dict={0:(1, 2)}): # TO-DO: need to add min max vlinear/non-linear try: features, output = datasets.make_classification( n_samples=number_samples, n_features=number_numerical_features, n_informative=number_informative, n_classes=number_class, weights = weights, # 20% of the targets will be 0, 80% will be 1. default is 50/50 shift=shift, ) columns = [] # Numerical Features for i in range(number_numerical_features): columns.append('Feature_' + str(i)) features = pd.DataFrame(features, columns=columns) # Setting min max value for features for col_name in features.columns: for key, value in value_range_dict.items(): if (str(features.columns.get_loc(col_name)) == key): for item in features[col_name].values: if item < value[0]: features.loc[features[col_name] == item, col_name] = random.uniform(value[0],value[1]) if item > value[1]: features.loc[features[col_name] == item, col_name] = random.uniform(value[0],value[1]) df_list = [] df_list.append(features) # Add Categorical Features for j in range(number_categorical_features): categorical_feature1_list = [] number_categories_per_feature = random.randint(2, 5) for i in range(number_categories_per_feature): categorical_feature1_list.append(randStr(N=3)) print("Categories of Categorical Feature " + str(j) + ": ", categorical_feature1_list) categorical_feature1 = [] for k in range(number_samples): categorical_feature1.append(random.choice(categorical_feature1_list)) categorical_feature1 = pd.DataFrame(categorical_feature1, columns=['Categorical'+str(j)]) df_list.append(categorical_feature1) # Add Text Features for l in range(number_text_features): text_feature = [] for k in range(number_samples): text_feature.append(names.get_full_name()) # text_feature.append(r.get_random_word()) text_feature = pd.DataFrame(text_feature, columns=['Name'+str(l)]) # text_feature = pd.DataFrame(text_feature, columns=['Word' + str(l)]) df_list.append(text_feature) output = pd.DataFrame(output, columns=['Target']) df_list.append(output) df_final = pd.concat(df_list, axis=1) for col in df_final.columns: # df_final.loc[df_final.sample(frac=0.1).index, col] = np.NaN df_final.loc[df_final[col].sample(frac=missing_proportion).index, col] = np.NaN # Check to see proportion of NaN values: # df.isnull().sum() / len(df) df_final.to_csv(file_name) return True except Exception as e: print(e) return False def gen_data_regression( number_samples=10000, number_numerical_features=25, file_name='file_regress.csv', number_categorical_features=2, number_text_features=2, missing_proportion=0.1, number_informative=10, number_target=1, bias=0.0, noise=0.0, value_range_dict={1:(5, 10)} ): try: features, output = datasets.make_regression( n_samples=number_samples, n_features=number_numerical_features, n_informative=number_informative, n_targets=number_target, bias=bias, noise=noise, ) columns = [] for i in range(number_numerical_features): columns.append('Feature_' + str(i)) features = pd.DataFrame(features, columns=columns) for col_name in features.columns: for key, value in value_range_dict.items(): if (str(features.columns.get_loc(col_name)) == key): for item in features[col_name].values: if item < value[0]: features.loc[features[col_name] == item, col_name] = random.uniform(value[0],value[1]) if item > value[1]: features.loc[features[col_name] == item, col_name] = random.uniform(value[0],value[1]) df_list = [] df_list.append(features) for j in range(number_categorical_features): categorical_feature1_list = [] number_categories_per_feature = random.randint(2, 5) for i in range(number_categories_per_feature): categorical_feature1_list.append(randStr(N=3)) print("Categories of Categorical Feature " + str(j) + ": ", categorical_feature1_list) categorical_feature1 = [] for k in range(number_samples): categorical_feature1.append(random.choice(categorical_feature1_list)) categorical_feature1 = pd.DataFrame(categorical_feature1, columns=['Categorical' + str(j)]) df_list.append(categorical_feature1) for l in range(number_text_features): text_feature = [] for k in range(number_samples): text_feature.append(names.get_full_name()) text_feature = pd.DataFrame(text_feature, columns=['Name'+str(l)]) df_list.append(text_feature) output = pd.DataFrame(output, columns=['Target']) df_list.append(output) df_final = pd.concat(df_list, axis=1) for col in df_final.columns: # df_final.loc[df_final.sample(frac=0.1).index, col] = np.NaN df_final.loc[df_final[col].sample(frac=missing_proportion).index, col] = np.NaN # Check to see proportion of NaN values: # df.isnull().sum() / len(df) df_final.to_csv(file_name) return True except Exception as e: print(e) return False def gen_data_series(univariate="True", start_time='2000-01-01 00:00', end_time='2022-12-31 00:00', number_samples=10000, number_numerical_features=25, file_name='file_regress.csv', number_categorical_features=2, # number_text_features=2, missing_proportion=0.1, number_informative=10, number_target=1, bias=0.0, noise=0.0, value_range_dict={1:(5, 10)} ): try: if univariate == "True": number_numerical_features = 1 number_categorical_features = 0 features, output = datasets.make_regression( n_samples=number_samples, n_features=number_numerical_features, n_informative=number_informative, n_targets=number_target, bias=bias, noise=noise, ) columns = [] # Numerical Features for i in range(number_numerical_features): columns.append('Feature_' + str(i)) features = pd.DataFrame(features, columns=columns) # Setting min max value for features for col_name in features.columns: for key, value in value_range_dict.items(): if (str(features.columns.get_loc(col_name)) == key): for item in features[col_name].values: if item < value[0]: features.loc[features[col_name] == item, col_name] = random.uniform(value[0],value[1]) if item > value[1]: features.loc[features[col_name] == item, col_name] = random.uniform(value[0],value[1]) df_list = [] df_list.append(features) # Add Categorical Features for j in range(number_categorical_features): categorical_feature1_list = [] number_categories_per_feature = random.randint(2, 5) for i in range(number_categories_per_feature): categorical_feature1_list.append(randStr(N=3)) print("Categories of Categorical Feature " + str(j) + ": ", categorical_feature1_list) categorical_feature1 = [] for k in range(number_samples): categorical_feature1.append(random.choice(categorical_feature1_list)) categorical_feature1 = pd.DataFrame(categorical_feature1, columns=['Categorical'+str(j)]) df_list.append(categorical_feature1) # df2['date'] = pd.date_range(start='1890-01-01', freq="sec",periods=len(df2)) time_feature = pd.date_range(start=start_time, end=end_time, periods=number_samples) #freq="1sec" time_feature = pd.DataFrame(time_feature, columns=['Date']) # df_list.append(time_feature) df_list.insert(0, time_feature) output = pd.DataFrame(output, columns=['Feature_' + str(number_numerical_features)]) if univariate != "True": df_list.append(output) df_final = pd.concat(df_list, axis=1) for col in df_final.columns: # df_final.loc[df_final.sample(frac=0.1).index, col] = np.NaN df_final.loc[df_final[col].sample(frac=missing_proportion).index, col] = np.NaN # Check to see proportion of NaN values: # df.isnull().sum() / len(df) df_final.to_csv(file_name) return True except Exception as e: print(e) return False def data_generated_csv(): datajson = os.path.join(CONFIG_FILE_PATH, 'data_generated.json') with open(datajson, 'r+') as f: dictionary = json.load(f) # f.close() if dictionary.get('problemType') == 'classification': number_samples = dictionary.get("number_samples") number_numerical_features = dictionary.get("number_numerical_features") number_categorical_features = dictionary.get("number_categorical_features") number_text_features = dictionary.get("number_text_features") missing_proportion = dictionary.get("missing_proportion") number_informative = dictionary.get("number_informative") number_class = dictionary.get("number_class") weights = dictionary.get("weights") shift = dictionary.get("shift") data_path = dictionary.get("data_path") value_range_dict = dictionary.get("value_range_dict") gen_data_classification(number_samples=number_samples, number_numerical_features=number_numerical_features, file_name=data_path, number_categorical_features=number_categorical_features, number_text_features=number_text_features, missing_proportion=missing_proportion, number_informative=number_informative, number_class=number_class, weights=weights, shift=shift, value_range_dict=value_range_dict) elif dictionary.get('problemType') == 'regression': number_samples = dictionary.get("number_samples") number_numerical_features = dictionary.get("number_numerical_features") number_categorical_features = dictionary.get("number_categorical_features") number_text_features = dictionary.get("number_text_features") missing_proportion = dictionary.get("missing_proportion") number_informative = dictionary.get("number_informative") number_target = dictionary.get("number_target") bias = dictionary.get("bias") noise = dictionary.get("noise") data_path = dictionary.get("data_path") value_range_dict = dictionary.get("value_range_dict") gen_data_regression(number_samples=number_samples, number_numerical_features=number_numerical_features, file_name=data_path, number_categorical_features=number_categorical_features, number_text_features=number_text_features, missing_proportion=missing_proportion, number_informative=number_informative, number_target=number_target, bias=bias, noise=noise, value_range_dict=value_range_dict) elif dictionary.get('problemType') == 'timeseriesforecasting': #task 11997 data_path = dictionary.get("data_path") is_univariate = dictionary.get("univariate") number_samples = dictionary.get("number_samples") number_numerical_features = dictionary.get("number_numerical_features") number_categorical_features = dictionary.get("number_categorical_features") missing_proportion = dictionary.get("missing_proportion") number_informative = dictionary.get("number_informative") number_target = dictionary.get("number_target") bias = dictionary.get("bias") noise = dictionary.get("noise") value_range_dict = dictionary.get("value_range_dict") gen_data_series(univariate=is_univariate, number_samples=number_samples, number_numerical_features=number_numerical_features, file_name=data_path, number_categorical_features=number_categorical_features, # number_text_features=2, missing_proportion=missing_proportion, number_informative=number_informative, number_target=number_target, bias=bias, noise=noise, value_range_dict=value_range_dict) if __name__ == "__main__": data_generated_csv()
gcsbucketsDB.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import sqlite3 from pathlib import Path import json import os import rsa import boto3 #usnish import pandas as pd import time import sqlite3 class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() def table_exists(self, name): query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() return len(listOfTables) > 0 def read_data(self, table_name): query = f"SELECT * FROM {table_name}" row = self.cursor.execute(query).fetchall() return list(row) #return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def delete_record(self,table_name,col_name, col_value): try: query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'" self.conn.execute(query) self.conn.commit() return 'success' except Exception as e : print(str(e)) print("Deletion Failed") return 'error' def get_data(self,table_name,col_name,col_value): query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'" row = self.cursor.execute(query).fetchone() if(row == None): return [] return list(row) def write_data(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def close(self): self.conn.close() def add_new_GCSBucket(request): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') print(request.POST["aionreferencename"]) print(request.POST["serviceaccountkey"]) print(request.POST["bucketname"]) if request.POST["aionreferencename"] =='' or request.POST["serviceaccountkey"] == '' or request.POST["bucketname"] == '' : return 'error' newdata = {} newdata['Name'] = [request.POST["aionreferencename"]] newdata['GCSServiceAccountKey'] = [request.POST["serviceaccountkey"]] newdata['GCSbucketname'] = [request.POST["bucketname"]] name = request.POST["aionreferencename"] if sqlite_obj.table_exists("gcsbucket"): if(len(sqlite_obj.get_data("gcsbucket",'Name',name))>0): return 'error1' sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'gcsbucket') except: return 'error' def get_gcs_bucket(): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') temp_data = sqlite_obj.read_data('gcsbucket') data = [] for x in temp_data: data_dict = {} data_dict['Name'] = x[0] data_dict['GCSServiceAccountKey'] = x[1] data_dict['GCSbucketname'] = x[2] data.append(data_dict) except Exception as e: print(e) data = [] return data def read_gcs_bucket(name,filename,DATA_FILE_PATH): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') data = sqlite_obj.get_data("gcsbucket",'Name',name) except: data = [] found = False if len(data)!=0: GCSServiceAccountKey = data[1] GCSbucketname = data[2] found = True #print(found) #print(name) try: if found: import io from google.cloud import storage #print(GCSServiceAccountKey) #print(GCSbucketname) try: storage_client = storage.Client.from_service_account_json(GCSServiceAccountKey) bucket = storage_client.get_bucket(GCSbucketname) blob = bucket.blob(filename) data = blob.download_as_string() df = pd.read_csv(io.BytesIO(data), encoding = 'utf-8', sep = ',',encoding_errors= 'replace') except Exception as e: return "Error",str(e), pd.DataFrame() return 'Success',"",df except Exception as e: print(e) return 'Error',"Please check bucket configuration",pd.DataFrame() def remove_gcs_bucket(name): from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') return sqlite_obj.delete_record('gcsbucket','Name',name)
dataIngestion.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import pandas as pd import requests from io import StringIO import json import time import shutil import sys from appbe import compute from appbe.aion_config import kafka_setting from appbe.aion_config import running_setting from appbe.s3bucketsDB import get_s3_bucket from appbe.gcsbucketsDB import get_gcs_bucket from appbe.azureStorageDB import get_azureStorage from appbe.aion_config import eda_setting from appbe.s3bucketsDB import read_s3_bucket from appbe.gcsbucketsDB import read_gcs_bucket from appbe.azureStorageDB import read_azureStorage from appbe.validatecsv import csv_validator import time from appbe.dataPath import LOG_LOCATION from appbe.dataPath import DATA_FILE_PATH from appbe.log_ut import logg import logging def langchain_splittext(filename): try: from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter loader = PyPDFLoader(filename) pages = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50) texts = text_splitter.split_documents(pages) return(texts) except Exception as e: print(e) def pd_lanfchain_textsplitter(datalocation,data): try: document=[] for i in range(len(data)): filename = os.path.join(datalocation,data.loc[i,"File"]) out = langchain_splittext(filename) for doc in out: print(doc.page_content) document.append(doc.page_content) my_data = pd.DataFrame({'instruction': document}) n = 1 my_data["response"] = my_data["instruction"].tolist()[n:] + my_data["instruction"].tolist()[:n] filetimestamp = str(int(time.time())) filename = os.path.join(DATA_FILE_PATH, 'LLMTuning_' + filetimestamp+'.csv') my_data.to_csv(filename,index=False) return(filename) except Exception as e: print(e) def getimpfeatures(dataFile, numberoffeatures,delimiter,textqualifier): imp_features = [] if numberoffeatures > 20: try: from appbe.eda import ux_eda eda_obj = ux_eda(dataFile,delimiter,textqualifier,optimize=1) if eda_obj.getNumericFeatureCount() >= 2: pca_map = eda_obj.getPCATop10Features() imp_features = pca_map.index.values.tolist() except Exception as e: print(e) pass return imp_features def pdf2text(inpFileName): try: from pypdf import PdfReader reader = PdfReader(inpFileName) number_of_pages = len(reader.pages) text="" OrgTextOutputForFile="" for i in range(number_of_pages) : page = reader.pages[i] text1 = page.extract_text() text=text+text1 import nltk tokens = nltk.sent_tokenize(text) for sentence in tokens: sentence=sentence.replace("\n", " ") if len(sentence.split()) < 4 : continue if len(str(sentence.split(',')).split()) < 8 : continue if any(chr.isdigit() for chr in sentence) : continue OrgTextOutputForFile= OrgTextOutputForFile+str(sentence.strip()) #print("\n\n\n\nOrgTextOutputForFile------------->\n\n\n",OrgTextOutputForFile) return (OrgTextOutputForFile) except Exception as e: print("Encountered exception. {0}".format(e)) def getcommonfields(): computeinfrastructure = compute.readComputeConfig() from appbe.aion_config import settings usecasetab = settings() kafkaSetting = kafka_setting() ruuningSetting = running_setting() context = {'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab,'azurestorage':get_azureStorage()} return context def getusercasestatus(request): if 'UseCaseName' in request.session: selected_use_case = request.session['UseCaseName'] else: selected_use_case = 'Not Defined' if 'ModelVersion' in request.session: ModelVersion = request.session['ModelVersion'] else: ModelVersion = 0 if 'ModelStatus' in request.session: ModelStatus = request.session['ModelStatus'] else: ModelStatus = 'Not Trained' return selected_use_case,ModelVersion,ModelStatus def delimitedsetting(delimiter='',textqualifier='',other=''): if delimiter != '': if delimiter.lower() == 'tab' or delimiter.lower() == '\t': delimiter = '\t' elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';': delimiter = ';' elif delimiter.lower() == 'comma' or delimiter.lower() == ',': delimiter = ',' elif delimiter.lower() == 'space' or delimiter.lower() == ' ': delimiter = ' ' elif delimiter.lower() == 'other' or other.lower() != '': if other != '': delimiter = other else: delimiter = ',' elif delimiter != '': delimiter = delimiter else: delimiter = ',' else: delimiter = ',' if textqualifier == '': textqualifier = '"' return delimiter,textqualifier def multipleZipExtraction(data,DATA_FILE_PATH): from zipfile import ZipFile try: import glob filetimestamp = str(int(time.time())) extracted_data = os.path.join(DATA_FILE_PATH, 'extracted_' + filetimestamp) os.mkdir(extracted_data) with ZipFile(data, 'r') as zObject: zObject.extractall(extracted_data) csv_files = glob.glob(r'{}\*.{}'.format(extracted_data,'csv')) df_csv_append = pd.DataFrame() for file in csv_files: df = pd.read_csv(file) df_csv_append = df_csv_append.append(df, ignore_index=True) for f in os.listdir(extracted_data): os.remove(os.path.join(extracted_data, f)) #os.mkdir(extracted_data) combined_data = os.path.join(extracted_data,filetimestamp+'.csv') df_csv_append.to_csv(combined_data) return combined_data except Exception as e: if os.path.exists(extracted_data): shutil.rmtree(extracted_data) #print (e) return '' def tarFileExtraction(data,DATA_FILE_PATH): try: import tarfile filetimestamp = str(int(time.time())) extracted_data = os.path.join(DATA_FILE_PATH, 'extracted_' + filetimestamp) os.mkdir(extracted_data) if data.endswith('tar'): file = tarfile.open(data) file.extractall(extracted_data) file.close() for f in os.listdir(extracted_data): if f.endswith('csv') or f.endswith('tsv'): dataFile = os.path.join(extracted_data,f) return dataFile except Exception as e: if os.path.exists(extracted_data): shutil.rmtree(extracted_data) print (e) return '' # ------ changes for the bug 10379 starts---------------- By Usnish ------ def checkRamAfterLoading(dataPath): import psutil availableRam = psutil.virtual_memory()[1]/1e9 filesize = os.path.getsize(dataPath)/1e9 return availableRam < 2*filesize def checkRamBeforeLoading(dataPath): import psutil filesize = os.path.getsize(dataPath)/1e9 totalRam = psutil.virtual_memory()[0] / 1e9 if( filesize > 0.8 * totalRam): return "File size is larger than the 80% of Total RAM." return "" # ------ changes for the bug 10379 ends---------------- By Usnish ------ # ---------- 10012:Decision Threshold related Changes S T A R T ---------- # This method is used to check If -> # 80% of available RAM size is greater than ingested data (or not). def checkRAMThreshold(dataPath): import psutil availableRam = psutil.virtual_memory()[1]/1e9 filesize = os.path.getsize(dataPath)/1e9 return (0.8 * availableRam) > filesize # ---------------------- E N D ---------------------- # Text Data Labelling using LLM related changes # -------------------------------------------------------- def ingestTextData(request, DATA_FILE_PATH): log = logging.getLogger('log_ux') try: Datapath = request.FILES['DataFilePath'] from appbe.eda import ux_eda ext = str(Datapath).split('.')[-1] request.session['uploadfiletype'] = 'Local' request.session['datatype'] = 'Normal' filetimestamp = str(int(time.time())) if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) else: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) with open(dataFile, 'wb+') as destination: for chunk in Datapath.chunks(): destination.write(chunk) destination.close() dataPath = dataFile request.session['textdatapath'] = dataPath # import pdb # pdb.set_trace() # check_df = pd.read_csv(dataPath) eda_obj = ux_eda(dataPath) check_df = eda_obj.getdata() df_top = check_df.head(10) df_json = df_top.to_json(orient="records") df_json = json.loads(df_json) # featuresList = check_df.columns.tolist() features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures() noTextFeature = False if len(textFeature) == 0: noTextFeature = True context = {'raw_data':df_json, 'featuresList':textFeature, 'selected':'DataOperations', 'noTextFeature':noTextFeature} return context except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context = {'error': 'Failed to read data','emptycsv' : 'emptycsv'} log.info('Text Data Ingestion -- Error : Failed to read data, '+str(e)) log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return context # ---------------------- E N D --------------------------- def ingestDataFromFile(request,DATA_FILE_PATH): log = logging.getLogger('log_ux') delimiter,textqualifier = delimitedsetting(request.POST.get('delimiters'),request.POST.get('qualifier'),request.POST.get('delimiters_custom_value')) request.session['delimiter'] = delimiter request.session['textqualifier'] = textqualifier context = getcommonfields() selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request) context.update({'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,}) try: t1 = time.time() request.session['uploadfiletype'] = '' request.session['uploadLocation'] = '' data_is_large = False check_df = pd.DataFrame() if request.method == 'POST': if 'ModelVersion' in request.session: ModelVersion = request.session['ModelVersion'] else: ModelVersion = 0 if 'ModelName' not in request.session: movenext = False request.session['currentstate'] = 0 context.update({'tab': 'tabconfigure', 'error': 'Please Create/Select the Use Case First', 'movenext': movenext,'currentstate': request.session['currentstate']}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please Create/Select the Use Case First') return context else: type = request.POST.get("optradio") if type == "s3Bucket": try: request.session['uploadfiletype'] = 'S3Bucket' bucketname = request.POST.get('s3bucketname') fileName = request.POST.get('s3file') if fileName != '': status,msg,check_df = read_s3_bucket(bucketname,fileName,DATA_FILE_PATH) if status == 'Success': filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile, index=False) request.session['datalocation'] = dataFile else : request.session['currentstate'] = 0 #usnish context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : ' + str(msg)) return context else: #usnish request.session['currentstate'] = 0 context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name') return context except Exception as e: request.session['currentstate'] = 0 context.update({'error': str(e),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+ str(e)) return context '''request.session['datalocation'] = "S3"''' # -------------------------------- Graviton-Integration Changes S T A R T -------------------------------- elif type == "graviton": try: dataServiceId = request.POST.get('dataservice') metadataId = request.POST.get('metadata') data = [] from appbe.aion_config import get_graviton_data graviton_url,graviton_userid = get_graviton_data() gravitonURL = graviton_url gravitonUserId = graviton_userid # url = 'https://xenius.azurewebsites.net/api/getdata?userid=1&dataserviceid='+str(dataserviceId) +'&metadataid=' +str(metadataId) url = gravitonURL + 'getdata?userid=' + gravitonUserId +'&dataserviceid='+str(dataServiceId) +'&metadataid=' +str(metadataId) print(url) response = requests.get(url) statuscode = response.status_code if statuscode == 200: json_dictionary = json.loads(response.content) data = json_dictionary['result'] firstElement = next(iter(data[0].keys())) check_df = pd.DataFrame.from_dict(data[0][firstElement]) filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile, index=False) request.session['uploadfiletype'] = 'Graviton' request.session['datalocation'] = str(dataFile) except Exception as e: print(e) request.session['currentstate'] = 0 context.update({'error':'Check log file for more details','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error :'+str(e)) return context # ------------------------------------------------ E N D ------------------------------------------------- elif type == "azurestorage": try: request.session['uploadfiletype'] = 'AzureStorage' azurename = request.POST.get('azurename') directoryname = request.POST.get('azuredirectory') if directoryname != '': status,msg,check_df = read_azureStorage(azurename,directoryname,DATA_FILE_PATH) if status == 'Success': filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile, index=False) '''request.session['datalocation'] = "S3"''' request.session['datalocation'] = dataFile else : request.session['currentstate'] = 0 #usnish context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' +str(msg)) return context else: #usnish request.session['currentstate'] = 0 context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name') return context except Exception as e: print(e) request.session['currentstate'] = 0 context.update({'error': 'File does not exist','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, '+str(e)) return context elif type == "googleBucket": try: request.session['uploadfiletype'] = 'GCPBucket' bucketname = request.POST.get('gcpbucketname') fileName = request.POST.get('file1') if fileName != '': status,msg,check_df = read_gcs_bucket(bucketname,fileName,DATA_FILE_PATH) if status == 'Success': filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile, index=False) '''request.session['datalocation'] = "S3"''' request.session['datalocation'] = dataFile else : request.session['currentstate'] = 0 #usnish context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+str(msg)) return context else: #usnish request.session['currentstate'] = 0 context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name') return context except Exception as e: request.session['currentstate'] = 0 context.update({'error': 'File does not exist','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, ' + str(e)) return context elif type == "url": try: request.session['uploadfiletype'] = 'URL' url_text = request.POST.get('urlpathinput') log.info('Data ingesttion from URL..') request.session['uploadLocation'] = url_text url = url_text check_df = pd.read_csv(url) filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile,index=False) request.session['datalocation'] = dataFile except Exception as e: request.session['currentstate'] = 0 e = str(e) print(e) if e.find("tokenizing")!=-1: error = "This is not an open source URL to access data" context.update({'error': error, 'ModelVersion': ModelVersion, 'emptycsv': 'emptycsv'}) elif e.find("connection")!=-1: error = "Can not access the URL through HCL network, please try with other network" context.update({'error': error, 'ModelVersion': ModelVersion, 'emptycsv': 'emptycsv'}) else: error = 'Please provide a correct URL' context.update({'error': error,'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+error + ', '+str(e)) return context elif type == "nifi": try: request.session['uploadfiletype'] = 'Nifi' log.info('Data ingesttion from Nifi..') url_text = request.POST.get('nifiurlpathinput') request.session['uploadLocation'] = url_text response = requests.get(url_text) csv_str = response.content.decode('utf-8') check_df = pd.read_csv(StringIO(csv_str)) filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile,index=False) request.session['datalocation'] = dataFile except requests.exceptions.ConnectionError: request.session['currentstate'] = 0 context.update({'error': 'Connection Error','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error:Connection Error') return context except Exception as e: print(e) request.session['currentstate'] = 0 e = str(e) context.update({'error': e,'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+str(e)) return context elif type == "tblaiondata": try: dataset = request.POST.get('datasetname') print('dataset',dataset) from appbe.dataPath import DATA_DIR from appbe.sqliteUtility import sqlite_db file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') temp_data = sqlite_obj.read_data('dataingest') dataFile = '' for x in temp_data: if x[1] == dataset: dataFile = x[0] check_df = pd.read_csv(dataFile) request.session['datalocation'] = dataFile except Exception as e: request.session['currentstate'] = 0 context.update({'error': 'Failed to read data','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, ' + str(e)) return context else: if request.FILES: Datapath = request.FILES['DataFilePath'] if Datapath.size > 31457280: context.update({'tab': 'tabconfigure','error': 'Upload limit is 30 MB only, use local file option for larger file','currentstate': request.session['currentstate'], 'ModelVersion': ModelVersion}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : Upload limit is 30 MB only, use local file option for larger file') return context ext = str(Datapath).split('.')[-1] request.session['uploadfiletype'] = 'Local' request.session['datatype'] = 'Normal' filetimestamp = str(int(time.time())) if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) else: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) with open(dataFile, 'wb+') as destination: for chunk in Datapath.chunks(): destination.write(chunk) destination.close() dataPath = dataFile else: dataPath = request.POST.get('localfilePath') #print(os.path.getsize(dataPath)) # 10012:Decision Threshold related Changes - S T A R T #removed few lines related to the check to not allow data to be ingested # E N D if request.POST.get('optfiletype') == 'avro': try: import pandavro as pdx if os.path.isdir(dataPath): for f in os.listdir(dataPath): if f.endswith('avro'): processed_df = pdx.read_avro(f) if not df.empty: df = df.append(processed_df, ignore_index=True) else: df = pd.DataFrame(processed_df) elif os.path.isfile(dataPath): import pandavro as pdx df = pdx.read_avro(dataPath) filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') df.to_csv(dataFile, index=False) request.session['datalocation'] = str(dataFile) except Exception as e: print(e) elif request.POST.get('optfiletype') == 'parquet': if os.path.isdir(dataPath): for f in os.listdir(dataPath): if f.endswith('parquet'): processed_df = pd.read_parquet(f, engine='pyarrow') if not df.empty: df = df.append(processed_df, ignore_index=True) else: df = pd.DataFrame(processed_df) elif os.path.isfile(dataPath): df = pd.read_parquet(dataPath, engine='pyarrow') filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') df.to_csv(dataFile, index=False) request.session['datalocation'] = str(dataFile) elif request.POST.get('optfiletype') == 'dilimeted': if os.path.isdir(dataPath): for f in os.listdir(dataPath): if f.endswith('csv') or f.endswith('tsv'): processed_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') if not df.empty: df = df.append(processed_df, ignore_index=True) else: df = pd.DataFrame(processed_df) filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') df.to_csv(dataFile, index=False,sep=delimiter,quotechar=textqualifier) request.session['datalocation'] = str(dataFile) else: dataFile = dataPath request.session['uploadfiletype'] = 'Local' request.session['datatype'] = 'Normal' FileReadingstatus = True request.session['currentstate'] = 0 if dataPath.endswith('tar'): dataFile = tarFileExtraction(dataPath,DATA_FILE_PATH) if dataPath.endswith('zip'): dataFile = multipleZipExtraction(dataPath,DATA_FILE_PATH) if dataFile == '': FileReadingstatus = False msg = 'Please provide a file name' elif dataFile.endswith(".xls") or dataFile.endswith(".xlsx"): FileReadingstatus = False msg = 'Please provide a dilimited file' elif not os.path.isfile(dataFile): FileReadingstatus = False msg = 'File does not exist' else: check_df = pd.DataFrame(); try: try: cvobj = csv_validator() valid_header, validrows, rownumbers = cvobj.validate_header(dataFile,delimiter,textqualifier) request.session['datalocation'] = str(dataFile) if not validrows: FileReadingstatus = False msg = 'Data Format issue' else: if valid_header: check_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,nrows=100,encoding_errors= 'replace') request.session['datalocation'] = str(dataFile) else: check_df = pd.read_csv(dataFile, header=None, encoding='utf8', prefix='X',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace') filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') check_df.to_csv(dataFile, index=False) request.session['datalocation'] = str(dataFile) except Exception as e: print(e) check_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,nrows=100) request.session['datalocation'] = str(dataFile) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+str(e)) except UnicodeDecodeError: FileReadingstatus = False msg = 'Only utf8 file encoding supported' log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error:'+msg) except pd.errors.EmptyDataError: FileReadingstatus = False msg = 'File is empty' log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:'+msg) except pd.errors.ParserError: FileReadingstatus = False msg = 'File Parsng Error' log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+msg) except FileNotFoundError: FileReadingstatus = False msg = 'File does not exist' request.session['currentstate'] = 0 log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+msg) except Exception as e: msg = 'File Read Error' FileReadingstatus = False print(e) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str( ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + msg+', '+str(e)) if check_df.empty and FileReadingstatus: FileReadingstatus = False msg = 'Date file is empty' if not FileReadingstatus: context.update({'tab': 'tabconfigure','error': msg,'currentstate': request.session['currentstate'], 'ModelVersion': ModelVersion}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+msg) return context # -------------------------------- 10012:Decision Threshold related Changes S T A R T ------------------------------- data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation']) msg = "" if data_is_under_RAM_threshold == False: msg = "AION will not be able to train on data set provided as it is bigger than available RAM, Please choose distributed learning for further processing." # ------------------------------------------------------ E N D ------------------------------------------------------ check_df.rename(columns=lambda x: x.strip(), inplace=True) featuresList = check_df.columns.tolist() numberoffeatures = len(featuresList) imp_features = getimpfeatures(dataFile,numberoffeatures,delimiter,textqualifier) samplePercentage = 100 samplePercentval = 0 showRecommended = False sample_size = int(eda_setting()) dflength = len(check_df) if dflength > sample_size: samplePercentage = round(float((sample_size/dflength) * 100),2) samplePercentval = samplePercentage / 100 showRecommended = True df_top = check_df.head(10) df_json = df_top.to_json(orient="records") df_json = json.loads(df_json) statusmsg = 'Data File Uploaded Successfully ' request.session['currentstate'] = 0 request.session['finalstate'] = 0 request.session['datatype'] = 'Normal' records = check_df.shape[0] request.session['NoOfRecords'] = records statusmsg = 'Data File Uploaded Successfully' t2 = time.time() log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str( round(t2 - t1)) + ' sec' + ' : ' + 'Success') # EDA Subsampling changes context.update({'range':range(1,101),'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList,'tab': 'tabconfigure', 'data': df_json, 'status_msg': statusmsg, 'selected': 'modeltraning','imp_features':imp_features,'numberoffeatures':numberoffeatures, 'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'], 'exploratory': False}) if msg!="": context.update({'data_size_alert': msg}) return context except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) request.session['currentstate'] = 0 context.update({'error': 'Failed to read data','emptycsv' : 'emptycsv'}) log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : Failed to read data, '+str(e)) log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return context
llm_generateQnA.py
import os import re import json import time import sys import tiktoken import openai import requests from appbe.aion_config import get_llm_data import logging import pdfplumber from docx import Document openai.api_key = '' openai.api_base = '' openai.api_type = '' openai.api_version = '' deployment_name="GPT-35-Turbo" model_name='gpt-3.5-turbo' set_tokens_limit = 500 set_tokens_limit_offline = 400 set_prompt="You are an expert user generating questions and answers. You will be passed a page extracted from a documentation. Generate a numbered list of questions as Q. and equivelant answer as A. for every question based *solely* on the given text." # QnA Generator using LLM related changes # -------------------------------------------------------- def ingestDataForQA(request, DATA_FILE_PATH): log = logging.getLogger('log_ux') try: Datapath = request.FILES['DataFileQnA'] from appbe.eda import ux_eda ext = str(Datapath).split('.')[-1] request.session['uploadfiletype'] = 'Local' request.session['datatype'] = 'Normal' filetimestamp = str(int(time.time())) if ext.lower() in ['txt','pdf','docx']: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext) else: dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp) with open(dataFile, 'wb+') as destination: for chunk in Datapath.chunks(): destination.write(chunk) destination.close() dataPath = dataFile request.session['textdatapathQA'] = dataPath llm_choice = request.POST.get("llm_choice") _result = '' # if llm_choice == 'Haystack': # _result = generateQA_Haystack(request, DATA_FILE_PATH) if llm_choice == 'Offline': _result = generateQA_Offline(request, DATA_FILE_PATH) else: _result = generateQA_OpenAI(request, DATA_FILE_PATH) return _result except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context = {'error': 'Failed to read data','emptytxt' : 'emptytxt'} log.info('Text Data Ingestion -- Error : Failed to read data, '+str(e)) log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return context # ---------------------- E N D --------------------------- def generateQA_OpenAI(request, DATA_FILE_PATH): log = logging.getLogger('log_ux') try: file_path = request.session['textdatapathQA'] # Read the file content if file_path.endswith('.pdf'): pdf_file=pdfplumber.open(file_path) file_content = " ".join([x.extract_text() for x in pdf_file.pages]) elif file_path.endswith('.docx'): doc_file=Document(file_path) file_content = " \n".join([x.text for x in doc_file.paragraphs]) else: with open(file_path, "r", encoding="utf-8",errors = "ignore") as file: file_content = file.read() text = file_content.strip() #text = text.strip() extracted_QnA = [] chunk_counter = 0 num_tokens_text = count_tokens_text(text) if num_tokens_text > set_tokens_limit: for sub_text in split_text(text): chunk_counter = chunk_counter + 1 _result = extract_questions_from_splittedtext(sub_text) print(f"Currently executed chunk no is - {chunk_counter}.") extracted_QnA.extend(_result) else: _prompt = set_prompt msg = [ {"role": "system", "content": _prompt}, {"role": "user", "content": text} ] extracted_QnA = run_model(msg) quesCount = len(extracted_QnA) context = {'extracted_QnA':extracted_QnA, 'quesCount':quesCount} filetimestamp = str(int(time.time())) output_filepath = os.path.join(DATA_FILE_PATH,'AION_QnA' + filetimestamp+'.txt') # Save the extracted questions as a JSON file with open(output_filepath, 'w') as output_file: json.dump(extracted_QnA, output_file, indent=4) print(f"QnAs have been saved to {output_filepath}.") request.session['QnAfilepath'] = output_filepath return context except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() errormsg = str(e) if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg: errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.' if 'The API type provided in invalid' in errormsg: errormsg = "The API type provided is invalid. Please select one of the supported API types:'azure', 'azure_ad' or 'open_ai'" if 'Max retries exceeded with url' in errormsg: errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.' fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context = {'error': 'Failed to generate QnA List using openAI','LLM' : 'openAI', 'selected':'DataOperations', 'errormessage':errormsg} log.info('generateQA_OpenAI -- Error : Failed to generate QnA List using openAI.. '+str(e)) log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return context def run_model(msg): key,url,api_type,api_version = get_llm_data() openai.api_key = key openai.api_base = url openai.api_type = api_type openai.api_version = api_version completions = openai.ChatCompletion.create(engine=deployment_name, temperature=0.0, max_tokens=2000, n=1, stop=None, messages=msg) # return completions.choices[0].message.content _questionList = completions.choices[0].message.content question_pattern = re.compile(r"^Q\s*\d+\.\s*(.+)$", re.MULTILINE) questions = question_pattern.findall(_questionList) answer_pattern = re.compile(r"^A\s*\d+\.\s*(.+)$", re.MULTILINE) answers = answer_pattern.findall(_questionList) if (len(questions) > 0) and (not re.search(r"[.!?)]$", questions[-1].strip())): print(f"WARNING: Popping incomplete question: '{questions[-1]}'") questions.pop() extracted_QnA = [] for question, answer in zip(questions, answers): extracted_QnA.append({'question': question, 'answer': answer}) return extracted_QnA def count_tokens_text(text): import tiktoken model_type = model_name encoding = tiktoken.encoding_for_model(model_type) encoded_text = encoding.encode(text) return len(encoded_text) def extract_questions_from_splittedtext(text): _prompt = set_prompt msg = [ {"role": "system", "content": _prompt}, {"role": "user", "content": text} ] _ques_ans_List = run_model(msg) return _ques_ans_List def split_text(text): lines = text.split('\n') current_section = '' sections = [] _lastsection = 0 for line in lines: num_tokens_text = count_tokens_text(''.join([current_section,line])) if num_tokens_text < set_tokens_limit: current_section = ''.join([current_section,line]) else: sections.append(current_section) current_section = line _lastsection = 1 if _lastsection == 1: sections.append(current_section) return sections # --------------------------------------------------------------------------------- # def generateQA_Haystack(request, DATA_FILE_PATH): file_path = request.session['textdatapathQA'] # Read the file content with open(file_path, "r", encoding="utf-8") as file: file_content = file.read() text = file_content.strip() text = text.strip() docs = [] num_tokens_text = count_tokens_text(text) if num_tokens_text > set_tokens_limit: for sub_text in split_text(text): docs.append({"content": sub_text}) else: docs = [{"content": text}] from pprint import pprint from tqdm.auto import tqdm from haystack.nodes import QuestionGenerator, BM25Retriever, FARMReader # from haystack.document_stores import ElasticsearchDocumentStore from haystack.document_stores import InMemoryDocumentStore # from haystack.document_stores import PineconeDocumentStore from haystack.pipelines import ( QuestionGenerationPipeline, RetrieverQuestionGenerationPipeline, QuestionAnswerGenerationPipeline, ) from haystack.utils import print_questions document_store = InMemoryDocumentStore(use_bm25=True) document_store.write_documents(docs) question_generator = QuestionGenerator() # reader = FARMReader("deepset/roberta-base-squad2") # reader.save("my_local_roberta_model") reader_local = FARMReader(model_name_or_path="my_local_roberta_model_1") qag_pipeline = QuestionAnswerGenerationPipeline(question_generator, reader_local) extracted_QnA = [] for idx, document in enumerate(tqdm(document_store)): print(f"\n * Generating questions and answers for document {idx}: {document.content[:100]}...\n") result = qag_pipeline.run(documents=[document]) print_questions(result) answers = [] questions = result['queries'] answerList = result["answers"] for _answers in answerList: for answer in _answers: ans = answer.answer answers.append(ans) for question, answer in zip(questions, answers): extracted_QnA.append({'question': question, 'answer': answer}) quesCount = len(extracted_QnA) context = {'extracted_QnA':extracted_QnA, 'quesCount':quesCount} filetimestamp = str(int(time.time())) output_filepath = os.path.join(DATA_FILE_PATH,'AION_QnA' + filetimestamp+'.txt') # Save the extracted questions as a JSON file with open(output_filepath, 'w') as output_file: json.dump(extracted_QnA, output_file, indent=4) print(f"QnAs have been saved to {output_filepath}.") request.session['QnAfilepath'] = output_filepath return context # --------------------------------------------------------------------------------- # def generateQA_Offline(request, DATA_FILE_PATH): log = logging.getLogger('log_ux') try: file_path = request.session['textdatapathQA'] if file_path.endswith('.pdf'): pdf_file=pdfplumber.open(file_path) file_content = " ".join([x.extract_text() for x in pdf_file.pages]) elif file_path.endswith('.docx'): doc_file=Document(file_path) file_content = " \n".join([x.text for x in doc_file.paragraphs]) else: with open(file_path, "r", encoding="utf-8",errors = "ignore") as file: file_content = file.read() # # Read the file content # with open(file_path, "r", encoding="utf-8") as file: # file_content = file.read() text = file_content.strip() # text = text.strip() docs = [] # num_tokens_text = count_tokens_text(text) # if num_tokens_text > set_tokens_limit: # for sub_text in split_text(text): # docs.append(sub_text) # else: # docs.append(text) model_name = "valhalla/t5-base-qg-hl" num_tokens_text = count_tokens_text_offline(text, model_name) if num_tokens_text > set_tokens_limit_offline: for sub_text in split_text_for_Offline(text, model_name): docs.append(sub_text) else: docs.append(text) from question_generation.pipelines import pipeline extracted_QnA = [] extracted_QnAList = [] nlp = pipeline("question-generation", model = model_name) # nlp = pipeline("question-generation", model="valhalla/t5-base-e2e-qg") # nlp = pipeline("e2e-qg", model="valhalla/t5-base-qg-hl") # nlp = pipeline("multitask-qa-qg", model="valhalla/t5-base-qa-qg-hl") for _text in docs: res = nlp(_text) print(res) extracted_QnAList.extend(res) for _record in extracted_QnAList: extracted_QnA.append({'question': _record['question'], 'answer': _record['answer'].replace('<pad>', '')}) quesCount = len(extracted_QnA) context = {'extracted_QnA':extracted_QnA, 'quesCount':quesCount} filetimestamp = str(int(time.time())) output_filepath = os.path.join(DATA_FILE_PATH,'AION_QnA' + filetimestamp+'.txt') # Save the extracted questions as a JSON file with open(output_filepath, 'w') as output_file: json.dump(extracted_QnA, output_file, indent=4) print(f"T5 based QnAs have been saved to {output_filepath}.") request.session['QnAfilepath'] = output_filepath return context except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() errormsg = str(e) fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) context = {'error': 'Failed to generate QnA List using T5','LLM' : 'T5', 'selected':'DataOperations', 'errormessage':errormsg} log.info('generateQA_Offline -- Error : Failed to generate QnA List using T5.. '+str(e)) log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return context def split_text_for_Offline(text, model_name): lines = text.split('\n') current_section = '' sections = [] _lastsection = 0 for line in lines: num_tokens = count_tokens_text_offline(''.join([current_section,line]), model_name) if num_tokens < set_tokens_limit_offline: current_section = ''.join([current_section,line]) else: sections.append(current_section) current_section = line _lastsection = 1 if _lastsection == 1: sections.append(current_section) return sections def count_tokens_text_offline(text, model_name): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer(text, return_tensors="pt") input_ids = inputs["input_ids"] _token_count = len(input_ids[0]) return _token_count
onlineLearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import shutil import subprocess import sys import time import glob import re from appbe.pages import get_usecase_page import json from django.http import FileResponse def startIncrementallearning(request,usecasedetails,Existusecases,DATA_FILE_PATH): try: modelid = request.POST.get('modelid') #incfilepath = request.POST.get('incfilepath') Datapath = request.FILES['incfilepath'] filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv') with open(dataFile, 'wb+') as destination: for chunk in Datapath.chunks(): destination.write(chunk) # destination.close()#bugfix 11656 incfilepath = dataFile p = Existusecases.objects.get(id=modelid) deployPath = str(p.DeployPath) scriptPath = os.path.abspath(os.path.join(deployPath,'aion_inclearning.py')) request.session['IsRetraining'] = 'No' if not os.path.exists(scriptPath): status,context,action = get_usecase_page(request,usecasedetails,Existusecases) context['Msg'] = 'Incremental/Online learning not supported for this model.For online training select Online Training in basic configuration page and provide with training' else: outputStr = subprocess.check_output([sys.executable, scriptPath, incfilepath]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status,context,action = get_usecase_page(request,usecasedetails,Existusecases) if decoded_data['status'] == 'SUCCESS': msg = decoded_data['Msg'] context['Status'] = 'SUCCESS' context['Msg'] = msg else: msg = decoded_data['Msg'] context['Status'] = 'SUCCESS' context['Msg'] = msg except Exception as e: print(e) try: status,context,action = get_usecase_page(request,usecasedetails,Existusecases) except Exception as msg: context['errorMsg'] = msg return action,context
llmTesting.py
import os import openai from langchain.llms import AzureOpenAI from sentence_transformers.SentenceTransformer import SentenceTransformer import time import datetime import pandas as pd import sys import subprocess import importlib from appbe.aion_config import get_llm_data from appbe.dataPath import DATA_FILE_PATH remote_data_dir = "/home/aion/data/storage/llm_testing_data" remote_data_processeddata_dir = '/home/aion/data/storage/processed_data' remote_config_dir = '/home/aion/data/config' sh_file_path = '/home/aion/llm/sbin/llm_testing.sh' prompt_command = '/home/aion/llm/sbin/llm_testing.sh' PRE_CONTEXT = "Answer the following question in a concise manner.\n" DEFAULT_PARAMS = { 'OPENAI_API_TYPE' : "azure", 'OPENAI_API_BASE' : "", 'OPENAI_API_KEY' : "", 'OPENAI_API_VERSION' : "2023-03-15-preview" } faq="" def getAMIDetails(config,selectedAMI): y = {} for x in config: print(x) if x['id'] == selectedAMI: return x return y class test_LLM(): def __init__(self, deployment_name='Text-Datvinci-03', params=DEFAULT_PARAMS, transformer=None, sentence_txfr_model='sentence-transformers/paraphrase-mpnet-base-v2'): self.deployment_name=deployment_name self.set_params( params) self.transformer = transformer self.sentence_txfr_model = sentence_txfr_model def fiddlerAuditorCheck(self): status = importlib.util.find_spec('auditor') if not status: subprocess.check_call([sys.executable, "-m", "pip","uninstall", "-q","-y","notebook"]) subprocess.check_call([sys.executable, "-m", "pip", "install","-q", "notebook==6.4.5" ]) subprocess.check_call([sys.executable, "-m", "pip", "install","-q","fiddler-auditor==0.0.2"]) subprocess.check_call([sys.executable, "-m", "pip", "install","-q","notebook==7.0.2"]) status = importlib.util.find_spec('auditor') return status def set_params(self, params={}): valid_params = ['OPENAI_API_TYPE','OPENAI_API_KEY','OPENAI_API_BASE','OPENAI_API_VERSION'] for key, value in params.items(): if 'OPENAI_API_TYPE' == key: openai.api_type = value os.environ['OPENAI_API_TYPE'] = openai.api_type elif 'OPENAI_API_KEY' == key: openai.api_key = value os.environ['OPENAI_API_KEY'] = openai.api_key elif 'OPENAI_API_BASE' == key: openai.api_base = value os.environ['OPENAI_API_BASE'] = openai.api_base elif key in valid_params: os.environ[key] = value def run(self,modelName, temperature, similarity_threshold, perturbations_per_sample, prompts, reference_generation,pre_context=PRE_CONTEXT): if not self.fiddlerAuditorCheck(): raise ValueError('Fiddler-auditor is not instlled "python -m pip install fiddler-auditor==0.0.2"') openai_llm = AzureOpenAI(deployment_name=self.deployment_name, temperature=temperature, openai_api_key=openai.api_key) from auditor.perturbations import Paraphrase from auditor.evaluation.expected_behavior import SimilarGeneration from auditor.evaluation.evaluate import LLMEval # For Azure OpenAI, it might be the case the api_version for chat completion # is different from the base model so we need to set that parameter as well. if self.transformer: azure_perturber = self.transformer else: azure_perturber = Paraphrase( model="GPT-35-Turbo", api_version="2023-03-15-preview", num_perturbations=perturbations_per_sample, ) sent_xfmer = SentenceTransformer(self.sentence_txfr_model) similar_generation = SimilarGeneration( similarity_model=sent_xfmer, similarity_threshold=similarity_threshold,) llm_eval = LLMEval( llm=openai_llm, expected_behavior=similar_generation, transformation=azure_perturber,) test_result = llm_eval.evaluate_prompt_correctness( prompt=prompts, pre_context=pre_context, reference_generation=reference_generation, perturbations_per_sample=perturbations_per_sample ) return test_result def runmultiple(self,modelName, temperature, similarity_threshold, perturbations_per_sample, prompts, reference_generation,pre_context=PRE_CONTEXT,faq=faq): if not self.fiddlerAuditorCheck(): raise ValueError('Fiddler-auditor is not instlled "python -m pip install fiddler-auditor==0.0.2"') from auditor.evaluation.expected_behavior import SimilarGeneration from auditor.evaluation.evaluate import LLMEval openai_llm = AzureOpenAI(deployment_name=self.deployment_name, temperature=temperature, openai_api_key=openai.api_key) from auditor.perturbations import Paraphrase # For Azure OpenAI, it might be the case the api_version for chat completion # is different from the base model so we need to set that parameter as well. if self.transformer: azure_perturber = self.transformer else: azure_perturber = Paraphrase( model="GPT-35-Turbo", api_version="2023-03-15-preview", num_perturbations=perturbations_per_sample, ) sent_xfmer = SentenceTransformer(self.sentence_txfr_model) similar_generation = SimilarGeneration( similarity_model=sent_xfmer, similarity_threshold=similarity_threshold,) llm_eval = LLMEval( llm=openai_llm, expected_behavior=similar_generation, transformation=azure_perturber,) rows = faq.shape[0] prompts = list(faq['Question']) listofDf = [] for i in range(rows): test_result = llm_eval.evaluate_prompt_robustness( prompt=prompts[i], pre_context=pre_context, ) try: now = datetime.datetime.now().strftime("%H%M%S") name = str(i)+str(now)+'.html' test_result.save(name) df_iter=pd.read_html(name) df_actual = df_iter[0] listofDf.append(df_actual) except: pass perturbatedDF = pd.concat(listofDf) return perturbatedDF def run_offline_model(self, usecasename,modelName, temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,isfinetuned): from appbe.compute import readComputeConfig from appbe.prediction import get_instance cloud_infra = readComputeConfig() dataFile = os.path.join(DATA_FILE_PATH, 'prompt.csv') remoteFile = os.path.join(remote_data_dir, 'prompt.csv') if not reference_generation: reference_generation = '' prompt = pd.DataFrame([{'prompts':prompts, 'reference_generation':reference_generation}]) prompt.to_csv(dataFile, index=False) hypervisor, instanceid, region, image = get_instance(usecasename) key, url, api_type, api_version = get_llm_data() if hypervisor == 'AWS': aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] currentDirectory = os.path.dirname(os.path.abspath(__file__)) LLM_DIR = os.path.normpath(os.path.join(currentDirectory, '..', 'llm')) if image != '' and image != 'NA': amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) else: amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) if region == '' or region == 'NA': region = amiDetails['regionName'] from llm.aws_instance_api import start_instance # print(aws_access_key_id, aws_secret_key, instanceid, region) status, msg, ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region) if status.lower() == 'success': pem_file = os.path.join(LLM_DIR, amiDetails['ssh']['keyFilePath']) username = amiDetails['ssh']['userName'] # cope file to server for sinfle prompt from AION.llm.ssh_command import copy_files_to_server copy_files_to_server(ip,pem_file,dataFile,'',username,'',remote_data_dir,remote_config_dir) if isfinetuned: command = prompt_command + ' ' + usecasename + ' ' + str(modelName) \ + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \ + str(perturbations_per_sample) + \ ' '+ str(key) + \ ' '+ str(url) + \ ' '+ str(api_type) + \ ' '+ str(api_version)+ \ ' '+ str("single") else: command = prompt_command + ' ' + 'BaseModel' + ' ' + str(modelName) \ + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \ + str(perturbations_per_sample) + \ ' '+ str(key) + \ ' '+ str(url) + \ ' '+ str(api_type) + \ ' '+ str(api_version)+ \ ' '+ str("single") from llm.ssh_command import run_ssh_cmd buf = run_ssh_cmd(ip, pem_file, username, '', '', command) print(buf) return buf def run_multiple_offline_model(self, usecasename,modelName, temperature, similarity_threshold, perturbations_per_sample, faq,isfinetuned): dataFile = os.path.join(DATA_FILE_PATH, 'prompt.csv') remoteFile = os.path.join(remote_data_dir, 'prompt.csv') faq.to_csv(dataFile, index=False) print("This is done") from appbe.compute import readComputeConfig from appbe.prediction import get_instance cloud_infra = readComputeConfig() hypervisor, instanceid, region, image = get_instance(usecasename) key, url, api_type, api_version = get_llm_data() if hypervisor == 'AWS': aws_access_key_id = cloud_infra['awsCredentials']['accessKey'] aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey'] currentDirectory = os.path.dirname(os.path.abspath(__file__)) LLM_DIR = os.path.normpath(os.path.join(currentDirectory, '..', 'llm')) if image != '' and image != 'NA': amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image) else: amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid) if region == '' or region == 'NA': region = amiDetails['regionName'] from llm.aws_instance_api import start_instance # print(aws_access_key_id, aws_secret_key, instanceid, region) status, msg, ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region) if status.lower() == 'success': pem_file = os.path.join(LLM_DIR, amiDetails['ssh']['keyFilePath']) username = amiDetails['ssh']['userName'] #print(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir) from AION.llm.ssh_command import copy_files_to_server copy_files_to_server(ip,pem_file,dataFile,'',username,'',remote_data_dir,remote_config_dir) if isfinetuned: command = prompt_command + ' ' + usecasename + ' ' + str(modelName) \ + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \ + str(perturbations_per_sample) + \ ' '+ str(key) + \ ' '+ str(url) + \ ' '+ str(api_type) + \ ' '+ str(api_version)+ \ ' '+ str("multiple") else: command = prompt_command + ' ' + 'BaseModel' + ' ' + str(modelName) \ + ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \ + str(perturbations_per_sample) + \ ' '+ str(key) + \ ' '+ str(url) + \ ' '+ str(api_type) + \ ' '+ str(api_version)+ \ ' '+ str("multiple") from llm.ssh_command import run_ssh_cmd buf = run_ssh_cmd(ip, pem_file, username, '', '', command) print(buf) return buf
train_output.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import time import subprocess import sys import json import pandas as pd def getDataSetRecordsCount(datalocation): try: records = 0 if os.path.isfile(datalocation): for chunk in pd.read_csv(datalocation, chunksize=20000): records = records+len(chunk) if records == 0: records = 'NA' except Exception as e: print(e) records = 'NA' return records def get_train_model_details(deploy_location,request): updatedConfigFile = request.session['config_json'] f = open(updatedConfigFile, "r") configSettings = f.read() f.close() usename = request.session['usecaseid'].replace(" ", "_") outputfile = os.path.join(deploy_location,usename,str(request.session['ModelVersion']),'etc','output.json') if os.path.isfile(outputfile): f1 = open(outputfile, "r+", encoding="utf-8") outputStr = f1.read() f1.close() resultJsonObj = json.loads(outputStr) trainingStatus = resultJsonObj['status'] if trainingStatus.lower() == 'success': details = resultJsonObj['data'] modelType = details['ModelType'] bestModel = details['BestModel'] return trainingStatus,modelType,bestModel else: return trainingStatus,'NA','NA' else: return 'Not Trained','NA','NA'
generate_json_config.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import os import platform import time import sys from os.path import expanduser from pathlib import Path import ast import pandas as pd from appbe.dataPath import CONFIG_FILE_PATH def generate_json_config(request): from appbe.create_dummy_dataset import gen_data_classification from appbe.create_dummy_dataset import gen_data_regression from appbe.create_dummy_dataset import gen_data_series try: problem_type = request.POST.get('ProblemType') datadict1 = request.POST.get('rangedict') datadict = eval(datadict1) if request.POST.get('univariate') == "True": features = request.POST.get('features') features = '1' catfeatures = request.POST.get('catfeatures') catfeatures = '0' informative = request.POST.get('informative') informative = '1' elif request.POST.get('univariate') == "False": features = request.POST.get('features') catfeatures = request.POST.get('catfeatures') informative = request.POST.get('informative') data_path = request.POST.get('dataypath') number_informative = int(request.POST.get('informative')) number_numerical_features = int(request.POST.get('features')) if os.path.isdir(data_path): raise Exception('Incorrect path. Please include filename. Eg: C:/AION/data.csv') if os.path.isfile(data_path): raise ValueError('The file ({}) exists.'.format(os.path.basename(data_path))) if number_informative > number_numerical_features: raise ValueError('The No. numerical features ({}) must larger than No. informative features ({}).'.format(number_numerical_features, number_informative)) if problem_type == 'classification': status = gen_data_classification(int(request.POST.get('samples')),int(request.POST.get('features')),request.POST.get('dataypath'),int(request.POST.get('catfeatures')),int(request.POST.get('txtfeatures')),float(request.POST.get('proportion')),int(request.POST.get('informative')),int(request.POST.get('class')),[float(val) for val in request.POST.get('weights').split(",")],float(request.POST.get('shift')),datadict) elif problem_type == 'regression': status = gen_data_regression(int(request.POST.get('samples')),int(request.POST.get('features')),request.POST.get('dataypath'),int(request.POST.get('catfeatures')),int(request.POST.get('txtfeatures')),float(request.POST.get('proportion')),int(request.POST.get('informative')),int(request.POST.get('target')),float(request.POST.get('bias')),float(request.POST.get('noise')),datadict) elif problem_type == 'timeseriesforecasting': #task 11997 status = gen_data_series(request.POST.get('univariate'),request.POST.get('starttime'),request.POST.get('endtime'),int(request.POST.get('samples')),int(features),request.POST.get('dataypath'),int(catfeatures),float(request.POST.get('proportion')),int(informative),int(request.POST.get('target')),float(request.POST.get('bias')),float(request.POST.get('noise')),datadict) else: raise Exception("Unsupperted Problem Type.") if status: from appbe.dataPath import DATA_DIR from appbe.sqliteUtility import sqlite_db file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if request.POST["dataypath"] =='' or request.POST["dataset"] == '': return 'error' newdata = {} newdata['datapath'] = [request.POST.get('dataypath')] newdata['datasetname'] = [request.POST.get('dataset')] sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'dataingest') else: raise Exception("Data Genration failed.") except Exception as e: print(e) raise Exception(str(e)) if __name__ == "__main__": generate_json_config('classification') generate_json_config('regression') generate_json_config('timeseriesforecasting') #task 11997
training.py
import json import os import sys import re import numpy as np def check_unsupported_col(config): #bugId14444 unsupported_chars = '[]<>#{}@&' try: featureList = config['basic']['featureList'] return any([x in y for x in unsupported_chars for y in featureList]) except Exception as e: print(str(e)) return False def check_granularity(configSettingsJson,datapath=None): try: from AION.appbe.utils import get_true_option import pandas as pd from pathlib import Path seconds_per_unit = {'second':1,'minute':60,'hour':60 * 60,'day':24 * 60 * 60,'week':7 * 24 * 60 * 60,'month':30 * 24 * 60 * 60,'year':365 * 24 * 60 * 60} if not get_true_option(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['type']): return '' if isinstance( configSettingsJson['basic']['dateTimeFeature'], list): datetime_feature = configSettingsJson['basic']['dateTimeFeature'][0] else: datetime_feature = configSettingsJson['basic']['dateTimeFeature'] if get_true_option(configSettingsJson['basic']['analysisType']) == 'timeSeriesForecasting' and datetime_feature: if not datapath: datapath = configSettingsJson['basic']['dataLocation'] if Path( datapath).exists(): df = pd.read_csv(datapath, nrows=2) datetime = pd.to_datetime(df[ datetime_feature]) if len(datetime) > 1: source_time_delta = (datetime[1] - datetime[0]).total_seconds() granularity_unit = get_true_option(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['unit']) size = int(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['size']) target_time_delta = size * seconds_per_unit[granularity_unit] amplify = int(source_time_delta / target_time_delta) if amplify > 20: return f'Current Granularity setting will amplify the data approx {amplify} times. Depending on your system configuration, this may cause Memory error' return '' except Exception as e: return '' def getStatusCount(matched_lines,total_steps): stepsdone = 0 leaner = True #print(matched_lines) for line in matched_lines: if 'AION feature transformation completed' in line: stepsdone = stepsdone + 1 elif 'AION feature engineering completed' in line: stepsdone = stepsdone + 1 elif 'AION Association Rule completed' in line: stepsdone = stepsdone + 1 elif 'AION Image Classification completed' in line: stepsdone = stepsdone + 1 elif 'AION Association Rule completed' in line: stepsdone = stepsdone + 1 elif 'AION State Transition completed' in line: stepsdone = stepsdone + 1 elif 'AION SurvivalAnalysis completed' in line: stepsdone = stepsdone + 1 elif 'AION Recommender completed' in line: stepsdone = stepsdone + 1 elif 'AION Gluon Stop' in line: stepsdone = stepsdone + 1 elif 'AION Evaluation Stop' in line: stepsdone = stepsdone + 1 elif 'AION Object Detection completed' in line: stepsdone = stepsdone + 1 elif ('training completed' in line) and leaner: stepsdone = stepsdone + 1 leaner = False elif 'Prediction Service completed' in line: stepsdone = stepsdone + 1 elif 'AION TimeSeries Forecasting started' in line: #task 11997 stepsdone = stepsdone + 1 elif 'Distributed Learning Completed' in line: stepsdone = stepsdone + 4 elif 'AION Batch Deployment completed' in line: stepsdone = stepsdone + 2 match_lines = [] for line in matched_lines: count = len(line)-len(line.lstrip()) uline = line.split('...') uline = uline[1] if count == 0: uline = '|... <span style="border: 1px solid black; line-height:2; padding: 2px">'+uline+'</span>' elif count == 8 or count == 1: uline = ' |... <span style="border: 1px dashed darkblue; line-height:2; padding: 2px">'+uline+'</span>' elif count == 16 or count == 2: uline = ' |... <span style="border: 1px dotted darkgray; line-height:2; padding: 2px">'+uline+'</span>' elif count == 32 or count == 3: uline = ' |... <span style="border: 1px dotted lightgray ; line-height:2; padding: 2px">'+uline+'</span>' else: uline = line match_lines.append(uline) stepline = '<b>Stage: ' + str(stepsdone) + '/' + str(total_steps) + ' Complete</b>' match_lines.insert(0, stepline) #print(match_lines) output = "\n".join([status_text for status_text in match_lines]) output = "<pre>{}</pre>".format(output) #print(output) return(output) def calculate_total_interations(config): try: noOfIterations = 0 problemtypes = config['basic']['analysisType'] problem_type = "" for key in problemtypes: if config['basic']['analysisType'][key] == 'True': problem_type = key break if problem_type.lower() in ['classification','regression']: algorithms = config['basic']['algorithms'][problem_type] for key in algorithms: if config['basic']['algorithms'][problem_type][key] == 'True': if key not in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Deep Q Network','Dueling Deep Q Network']: if problem_type.lower() == 'classification': configparam = config['advance']['mllearner_config']['modelParams']['classifierModelParams'][key] else: configparam = config['advance']['mllearner_config']['modelParams']['regressorModelParams'][key] param = paramDefine(configparam,config['advance']['mllearner_config']['optimizationMethod']) interationsum = 1 for x in param.values(): interationsum = interationsum*len(x) if config['advance']['mllearner_config']['optimizationMethod'].lower() == 'random': if interationsum > int(config['advance']['mllearner_config']['optimizationHyperParameter']['iterations']): interationsum = int(config['advance']['mllearner_config']['optimizationHyperParameter']['iterations']) noOfIterations = noOfIterations+interationsum else: if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: if problem_type.lower() == 'classification': configparam = config['advance']['dllearner_config']['modelParams']['classifierModelParams'][key] else: configparam = config['advance']['dllearner_config']['modelParams']['regressorModelParams'][key] interationsum = 1 for j in list(configparam.keys()): if isinstance(configparam[j],(list,dict,tuple,str)): x = configparam[j].split(',') interationsum = interationsum*len(x) noOfIterations = noOfIterations+interationsum elif key in ['Deep Q Network','Dueling Deep Q Network']: if problem_type.lower() == 'classification': configparam = config['advance']['rllearner_config']['modelParams']['classifierModelParams'][key] interationsum = 1 for j in list(configparam.keys()): if isinstance(configparam[j],(list,dict,tuple,str)): x = configparam[j].split(',') interationsum = interationsum*len(x) noOfIterations = noOfIterations+interationsum elif problem_type.lower() in ['llmfinetuning']: algorithms = config['basic']['algorithms'][problem_type] for key in algorithms: if config['basic']['algorithms'][problem_type][key] == 'True': noOfIterations = configparam = config['advance']['llmFineTuning']['modelParams'][key]['epochs'] break else: noOfIterations= 'NA' except Exception as e: print(e) noOfIterations = 'NA' pass return(noOfIterations) def paramDefine(paramSpace, method): paramDict = {} for j in list(paramSpace.keys()): inp = paramSpace[j] try: isLog = False isLin = False isRan = False isList = False isString = False try: # check if functions are given as input and reassign paramspace v = paramSpace[j] if 'logspace' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isLog = True elif 'linspace' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isLin = True elif 'range' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isRan = True elif 'list' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isList = True elif '[' and ']' in paramSpace[j]: paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "") isList = True x = paramSpace[j].split(',') except: x = paramSpace[j] str_arg = paramSpace[j] # check if arguments are string try: test = eval(x[0]) except: isString = True if isString: paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x}) else: res = eval(str_arg) if isLin: y = eval('np.linspace' + str(res)) paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) elif isLog: y = eval('np.logspace' + str(res)) paramDict.update( {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y}) elif isRan: y = eval('np.arange' + str(res)) paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) # check datatype of argument elif isinstance(eval(x[0]), bool): y = list(map(lambda i: eval(i), x)) paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y}) elif isinstance(eval(x[0]), float): res = eval(str_arg) if len(str_arg.split(',')) == 3 and not isList: y = eval('np.linspace' + str(res)) #print(y) paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) else: y = list(res) if isinstance(res, tuple) else [res] paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) else: res = eval(str_arg) if len(str_arg.split(',')) == 3 and not isList: y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res)) else: y = list(res) if isinstance(res, tuple) else [res] paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) except Exception as inst: print(inst) return paramDict def calculate_total_activities(config): req_step = 0 problemtypes = config['basic']['analysisType'] problem_type = "" for key in problemtypes: if config['basic']['analysisType'][key] == 'True': problem_type = key break Modelproblem = problem_type if Modelproblem.lower() in ['classification','regression','clustering','anomalydetection','topicmodelling']: req_step = req_step+4 if Modelproblem.lower() in ['timeseriesforecasting','imageclassification','objectdetection','multilabelprediction','similarityidentification','contextualsearch']: #task 11997 req_step = req_step+2 if Modelproblem.lower() in ['survivalanalysis']: req_step = req_step+3 if Modelproblem.lower() in ['recommendersystem']: if config['basic']['algorithms']['recommenderSystem']['ItemRating'] == 'True': req_step = req_step+3 if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'] == 'True': req_step = req_step+1 if Modelproblem.lower() in ['statetransition']: req_step = req_step+1 return (req_step) def getModelStatus(Existusecases,modelid): model = Existusecases.objects.get(id=modelid) return(model.Status) def changeModelStatus(Existusecases,modelid,status,problemType,deployPath): model = Existusecases.objects.get(id=modelid) model.Status = status model.ProblemType = problemType model.DeployPath = deployPath model.save() def checkversionrunningstatus(modelid,usecasedetails,Existusecases): modelx = Existusecases.objects.get(id=modelid) ConfigPath = str(modelx.ConfigPath) status = 'Running' try: if os.path.exists(ConfigPath): with open(ConfigPath, 'r') as json_file: data = json.load(json_file) json_file.close() deployPath = str(data['basic']['deployLocation']) modelName = data['basic']['modelName'] modelVersion = data['basic']['modelVersion'] modelName = modelName.replace(" ", "_") logfile = os.path.join(deployPath,modelName,str(modelVersion),'log','model_training_logs.log') print(logfile) if os.path.exists(logfile): with open(logfile) as f: contents = f.read() f.close() contents = re.search(r'aion_learner_status:(.*)', str(contents), re.IGNORECASE).group(1) contents = contents.strip() print(contents) if contents != '': resultJsonObj = json.loads(contents) odataFile = str(modelx.TrainOuputLocation) with open(odataFile, 'w') as json_file: json.dump(resultJsonObj, json_file) json_file.close() modelx.Status = resultJsonObj['status'] status = modelx.Status if resultJsonObj['status'] == 'SUCCESS': modelx.DeployPath = str(resultJsonObj['data']['deployLocation']) if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection']: modelx.ProblemType = 'unsupervised' else: modelx.ProblemType = 'supervised' modelx.save() except Exception as e: pass return status def updateLLM_Model_training_logs(deployPath,modelName,modelVersion,model,configPath): from appbe.prediction import get_instance hypervisor,instanceid,region,image = get_instance(modelName+'_'+str(modelVersion)) from llm.llm_tuning import llm_logs cloudconfig = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config','compute_conf.json')) llm_logs(configPath,cloudconfig,instanceid,hypervisor,model) def checkModelUnderTraining(request,usecasedetails,Existusecases): try: models = Existusecases.objects.filter(Status='Running') for model in models: ConfigPath = str(model.ConfigPath) try: if os.path.exists(ConfigPath): with open(ConfigPath, 'r') as json_file: data = json.load(json_file) json_file.close() deployPath = str(data['basic']['deployLocation']) modelName = data['basic']['modelName'] modelVersion = data['basic']['modelVersion'] modelName = modelName.replace(" ", "_") if data['basic']['analysisType']['llmFineTuning'] == 'True': mlmodels ='' algorihtms = data['basic']['algorithms']['llmFineTuning'] for k in algorihtms.keys(): if data['basic']['algorithms']['llmFineTuning'][k] == 'True': if mlmodels != '': mlmodels += ', ' mlmodels += k updateLLM_Model_training_logs(deployPath,modelName,modelVersion,mlmodels,ConfigPath) logfile = os.path.join(deployPath,modelName,str(modelVersion),'log','model_training_logs.log') if os.path.exists(logfile): with open(logfile,encoding="utf-8") as f: contents = f.read() f.close() contents = re.search(r'aion_learner_status:(.*)', str(contents), re.IGNORECASE).group(1) contents = contents.strip() if contents != '': resultJsonObj = json.loads(contents) odataFile = str(model.TrainOuputLocation) with open(odataFile, 'w') as json_file: json.dump(resultJsonObj, json_file) json_file.close() modelx = Existusecases.objects.get(id=model.id) modelx.Status = resultJsonObj['status'] if resultJsonObj['status'] == 'SUCCESS': modelx.DeployPath = str(resultJsonObj['data']['deployLocation']) if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection']: modelx.ProblemType = 'unsupervised' else: modelx.ProblemType = 'supervised' modelx.save() except Exception as e: print(ConfigPath) print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) pass except Exception as e: print(e)
codeclonedetection.py
# -*- coding: utf-8 -*- import os import glob, os import pandas as pd from openai.embeddings_utils import cosine_similarity import numpy as np from openai.embeddings_utils import get_embedding import tiktoken import openai import importlib.util from sklearn.preprocessing import StandardScaler import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics import time from tqdm import tqdm import concurrent.futures from openai.error import RateLimitError, Timeout try: import chromadb from chromadb.api.types import Documents, Embeddings except: #Looks no chromadb installed,just proceed to use csv embedd pass from openai.embeddings_utils import get_embedding import json from openai.embeddings_utils import cosine_similarity from langchain.schema import Document from langchain.vectorstores import Chroma import warnings import logging warnings.simplefilter(action='ignore', category=FutureWarning) """Code clone detection parent class, based on user input data,the class will detect similar code snippets in the python file """ class CodeCloneDetection: #Constructor for base inputs def __init__(self,rootdir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId): self.rootdir=rootdir self.embedd_storage_path=embedd_storage_path self.openai_baseurl=openai_baseurl self.openai_key=openai_key self.openai_api_type=openai_api_type self.openai_api_version=openai_api_version self.ccdreportpath = os.path.join(self.embedd_storage_path, "codeCloneReport") self.generativeai_chat_model=generativeai_chat_model self.generativeai_embedding_engine = generativeai_embedding_engine self.generativeai_embedding_model = generativeai_embedding_model self.generativeai_deploymentId = generativeai_deploymentId try: os.makedirs(self.ccdreportpath, exist_ok = True) except OSError as error: print("Directory 'codeclonedetection' can not be created",self.ccdreportpath) try: self.logpath = os.path.join(self.ccdreportpath,'codeclonelog.log') logging.basicConfig(level=logging.INFO,filename=self.logpath,filemode='w',format='%(message)s') self.log = logging.getLogger() except Exception as e: print("code clone log object creation error.",e) def get_function_name(self,code): """ Extract function name from a line beginning with "def " """ assert code.startswith("def ") return code[len("def "): code.index("(")] def get_until_no_space(self,all_lines, i) -> str: """ Get all lines until a line outside the function definition is found. """ ret = [all_lines[i]] for j in range(i + 1, i + 10000): if j < len(all_lines): if len(all_lines[j]) == 0 or all_lines[j][0] in [" ", "\t", ")"]: ret.append(all_lines[j]) else: break return "\n".join(ret) def chunk_functions(self,function_code, chunk_size): """ To chunk input for gpt models because max token per model is 4090 """ try: # chunk_size = 1900 chunks = [function_code[i:i + chunk_size] for i in range(0, len(function_code), chunk_size)] except Exception as e: self.log.info('Error in chunking input prompt data.') return chunks def get_functions(self,filepath): """ Get all functions in a Python file. """ try: whole_code = open(filepath).read().replace("\r", "\n") all_lines = whole_code.split("\n") for i, l in enumerate(all_lines): if l.startswith("def "): code = self.get_until_no_space(all_lines, i) function_name = self.get_function_name(code) yield {"code": code, "function_name": function_name, "filepath": filepath} except Exception as e: self.log.info("Error in getting function from file. Error message: \n"+str(e)) def get_clone_function_details(self): """ To get available functions from python files """ try: code_root=self.rootdir from glob import glob code_files = [y for x in os.walk(code_root) for y in glob(os.path.join(x[0], '*.py'))] if code_files: all_funcs = [] total_locs = 0 for code_file in code_files: with open(code_file) as f: total_locs += len(f.readlines()) funcs = list(self.get_functions(code_file)) for func in funcs: all_funcs.append(func) return all_funcs,code_root,code_files,total_locs else: self.log.info("no python files available in the dir:"+str(code_root)) return {"pythondiles_error":"No python files are found."} except Exception as e: print("Error in reading the functions from the given directory. Error message: \n",e) self.log.info("Error in reading the functions from the given directory. Error message: \n"+str(e)) def getOpenAICredentials(self): """ To set openai credential using user input """ #Currently only support openai try: package_name = 'openai' lib_name = importlib.util.find_spec(package_name) if lib_name is None: return "openai_pkg_check_failed" else: embedding_model_lib ='openai' # if isinstance(self.openai_baseurl,str) and isinstance(self.openai_key,str) and isinstance(self.openai_api_type,str): os.environ['OPENAI_API_TYPE'] = self.openai_api_type os.environ['OPENAI_API_BASE'] = self.openai_baseurl # os.environ['OPENAI_API_VERSION'] = '2023-05-15' # os.environ['OPENAI_API_VERSION'] = "2022-12-01" os.environ['OPENAI_API_VERSION'] = self.openai_api_version os.environ['OPENAI_API_KEY'] = self.openai_key if (embedding_model_lib.lower()=='openai'): try: openai.api_type=os.getenv('OPENAI_API_TYPE') openai.api_base = os.getenv('OPENAI_API_BASE') openai.api_key = os.getenv('OPENAI_API_KEY') openai.api_version = os.getenv('OPENAI_API_VERSION') except Exception as e: self.log.info("Unable to get openai credentials,please provide proper credentials."+str(e)) return {"error_msg":"openai_environment_error"} except Exception as e: print("Openai credential set and get function error. Error message: \n",e) return openai.api_type,openai.api_base,openai.api_key,openai.api_version def get_embedding_local(self,model: str, text: str) -> list[float]: """ To get embedding data for single user given prompt text""" try: response = openai.Embedding.create( input=text, engine=self.generativeai_embedding_engine) except Exception as e: self.log.info("openai embedding creation error."+str(e)) return result['data'][0]['embedding'] def get_embeddings_pyfiles(self,all_funcs): """ To get embedding for python functions """ try: import tiktoken openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() encoding = tiktoken.encoding_for_model("text-embedding-ada-002") df = pd.DataFrame(all_funcs) df["tokens"] = df["code"].apply(lambda c: len(encoding.encode(c))) embedding_cost = df["tokens"].sum() * (0.0004/1000) EMBEDDING_FILEPATH=self.ccdreportpath+'\code_embeddings.csv' self.log.info("embedding storage location: "+str(EMBEDDING_FILEPATH)) vdb_status = self.get_vdb_status('chromadb') ##Currently chromadb not integrated vdb_status = False if not vdb_status: df['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, engine=self.generativeai_embedding_engine)) df['filepath'] = df['filepath'].apply(lambda x: x.replace(self.rootdir, "")) df.to_csv(EMBEDDING_FILEPATH, index=False) else: df = self.chromadb_embedding(df) """ Please uncomment below, currently assumption is each run we create embedd based on python files dir """ import numpy as np df = pd.read_csv(EMBEDDING_FILEPATH) df["code_embedding"] = df["code_embedding"].apply(eval).apply(np.array) except Exception as e: self.log.info("Error in get_embeddings_pyfiles for embedding conversion process. Error Message: "+str(e)) raise Exception("Error in get_embeddings_pyfiles for embedding conversion process.") return df,embedding_cost def search_functions_vectordb(df,db, code_query, n=3, pprint=True, n_lines=7): """ Search function for user query (prompt content), used for vector database embedding query option. """ try: docs = db.similarity_search_with_score(code_query )[:n] docs = [{"similarities":score, "code": d.page_content, **d.metadata} for d,score in docs] res = pd.DataFrame(docs).drop("_additional", axis=1) ##Uncomment for debug # if pprint: # for r in res.iterrows(): # print(r[1].filepath+" : "+r[1].function_name + " score=" + str(round(r[1].similarities, 3))) # print("\n".join(r[1].code.split("\n")[:n_lines])) # print('-'*70) except Exception as e: self.log.info("Error in search_functions_vectordb to get similarity information based on user query. Error Message: "+str(e)) raise Exception("Error in search_functions_csv to get similarity information based on user query.") return res def search_functions_csv(self,df, code_query, n=3, pprint=True, n_lines=7): """ Search function for user query (prompt content), used for csv embedding query option. """ try: embedding = get_embedding(code_query, engine=self.generativeai_embedding_engine) df['similarities'] = df.code_embedding.apply(lambda x: cosine_similarity(x, embedding)) res = df.sort_values('similarities', ascending=False) ## uncomment for debug purpose # if pprint: # for r in res.iterrows(): # print(r[1].filepath+" : "+r[1].function_name + " score=" + str(round(r[1].similarities, 3))) # print("\n".join(r[1].code.split("\n")[:n_lines])) # print('-'*70) except Exception as e: self.log.info("Error in search_functions_functions_csv to get similarity information based on user query. Error Message: "+str(e)) raise Exception("Error in search_functions_csv to get similarity information based on user query.") return res def get_prediction(self,prompt_data): """ To get prediction for given user data """ try: all_funcs,code_root,code_files,total_locs=self.get_clone_function_details() if not isinstance(all_funcs,type(None)): df,embedding_cost=self.get_embeddings_pyfiles(all_funcs) res = self.search_functions_csv(df, prompt_data, n=3) return res else: return dict({"error":"Empty_root_directory"}) except Exception as e: self.log.info("Error in get prediction for user prompt information. Error Message: "+str(e)) raise Exception("Error in get prediction for user prompt information. .") def get_vdb_status(self,vdb_name): """ To check chromadb python package installed or not""" try: vdb_name = 'chromadb' vdb_status=False lib_name = importlib.util.find_spec(vdb_name) if lib_name is None: vdb_status=False else: vdb_status=True ## Processing the files and create a embedding and save it using csv. except Exception as e: self.log.info("Error in checking chromadb installed or not. Error Message: "+str(e)) raise Exception("Error in checking chromadb installed or not. .") ## Currently vector db (chromadb) not implemented, so vdb_status is set as False vdb_status = False return vdb_status def create_chroma_db(self,documents, name): """ Craete chromadb instance (persistant) """ #get openai status openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() try: from langchain.embeddings.openai import OpenAIEmbeddings embed_function = OpenAIEmbeddings(deployment=self.generativeai_embedding_engine, chunk_size=1) except: from chromadb.utils import embedding_functions embed_function = embedding_functions.OpenAIEmbeddingFunction( api_key=openai.api_key, api_base=openai.api_base, api_type = openai.api_type, model_name=self.generativeai_embedding_model ) try: # chroma_client = chromadb.Client() persist_directory = self.embedd_storage_path chroma_client = chromadb.Client( Settings( persist_directory=persist_directory, chroma_db_impl="duckdb+parquet", ) ) # Start from scratch chroma_client.reset() chroma_client.persist() try: embed_function = OpenAIEmbeddings(deployment=self.generativeai_embedding_engine, chunk_size=1) except: embed_function = OpenAIEmbeddings() db = Chroma.from_documents(documents, embed_function, persist_directory=persist_directory) db.persist() except Exception as e: self.log.info("Error in chromadb based embeding creation. Error Message: "+str(e)) raise Exception("Error in chromadb based embeding creation.") return db,chroma_client def chromadb_embedding(self,df): """ Base chromadb embedding creation and storage function, it calls above create_chroma_db() to create db. """ try: documents = df.apply(lambda x: Document(page_content= x["code"], metadata= {"function_name": x["function_name"], "filepath": x["filepath"]}), axis=1) #setup the chromadb db,chroma_client = self.create_chroma_db(documents,collection_name) try: chromadb_df=pd.DataFrame(db) except: db_json = db.get(include=['embeddings', 'documents', 'metadatas']) chromadb_df = pd.read_json(db_json) self.log.info("chromadb_df records (top ~5 records): "+str(chromadb_df.head(5))) except Exception as e: self.log.info("chromadb embedding error. Error message: "+str(e)) return chromadb_df def num_tokens_from_string(self, string: str) -> int: """ Get number of tokens of text using tiktokens lib.""" encoding = tiktoken.encoding_for_model("text-embedding-ada-002") num_tokens = len(encoding.encode(string)) return num_tokens def validate_code_clone_with_explanation(self,code1, code2, verbose=False): """ Validate clone detection code snippet and get explanation from openai chat model (gpt-3.5-turbo) """ ## Openai using 4 chars as 1 token, same method here followed. Here,we dont need to call tiktoken lib to save cost. if (len(code1)/4 >1900): chunk = self.chunk_functions(code1, 1900) code1 = chunk[0] print("In side , len of code1\n",len(code1)) if (len(code2)/4 >1900): chunk = self.chunk_functions(code2, 1900) code2 = chunk[0] print("In side , len of code2\n",len(code2)) try: SYS_ROLE = "You are a Senior Code Reviewer, who helps in Code review and integration using code clone detection approach." openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() prompt = f"""Given two code snippets, find if they are clones or not with suitable explaination. Four types of clone: 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. Use JSON object format with following keys: IsClone: (True, False) wheather two code snippets are clone or not. CloneType: (Exact clone, Parameterized clone, Never-miss clone, Semantic clone) Choose appropriate clone type or "None". Explanation: A short explanation for the above answer. ### Code Snippets: ## Code 1: {code1} ## Code 2: {code2} ### Answer(Valid JSON object): """ response = openai.ChatCompletion.create(deployment_id=self.generativeai_deploymentId, messages=[{"role": "system", "content": SYS_ROLE}, {"role": "user", "content": prompt},], temperature = 0,max_tokens = 3900,request_timeout=90) text = response['choices'][0]['message']['content'] if verbose: self.log.info("validate_code_clone_with_explanation, text: "+str(text)) except Exception as e: print(" validate_code_clone_with_explanation: \n",e) response = "OpenAI Model Connection" if e.code == "invalid_request" and "token limit" in e.message.lower(): # Implement your logic to reduce the length of messages or split them into smaller parts # Modify messages or take appropriate action self.log.info("Given function is too large and exceeds openai chat model token limit,please review the source file function length. "+str(e)) return response def validate_code_clone_with_explanation_davinci(self,code1, code2, verbose=False): """ Validate clone detection code snippet and get explanation from openai chat model (davinci) """ if (len(code1)/4 >1900): chunk = self.chunk_functions(code1, 1900) code1 = chunk[0] if (len(code2)/4 >1900): chunk = self.chunk_functions(code2, 1900) code2 = chunk[0] try: SYS_ROLE = "You are a Senior Code Reviewer, who helps in Code review and integration. Detecting code clone in the repository." openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials() # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() prompt = f"""Given two code snippets, find if they are clones or not with suitable explaination. Four types of clone: 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. Use JSON object format with following keys: IsClone: (True, False) wheather two code snippets are clone or not. CloneType: (Exact clone, Parameterized clone, Never-miss clone, Semantic clone) Choose appropriate clone type or "None". Explanation: A short explanation for the above answer. ### Code Snippets: ## Code 1: {code1} ## Code 2: {code2} ### Answer(Valid JSON object): """ # response = openai.Completion.create(engine='Text-Datvinci-03', prompt=prompt, temperature=0, max_tokens=1166) response = openai.Completion.create(engine=self.generativeai_chat_model, prompt=prompt, temperature=0, max_tokens=3900) text = response.choices[0]["text"] if verbose: self.log.info("validate_code_clone_with_explanation, text (chatmodel response) "+str(text)) except Exception as e: response = "OpenAI Model Connection Error" if e.code == "invalid_request" and "token limit" in e.message.lower(): # Implement your logic to reduce the length of messages or split them into smaller parts # Modify messages or take appropriate action self.log.info("Given function is too large and exceeds openai chat model token limit,please review the source file function length. Error msg: "+str(e)) return response ## For dbscan based clone detction from python files, we use CodeCloneDetection parent class. (Using inheritance) class CodeCloneDetectionFiles(CodeCloneDetection): """For dbscan based clone detction from python files, we use CodeCloneDetection parent class. (Using inheritance) """ def __init__(self,root_dir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId): super().__init__(root_dir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId) def get_embedd_fns(self): """ To get embedd vector, using parent class methods""" try: ## Processing the files and create a embedding and save it using csv. vdb_status = super().get_vdb_status('chromadb') self.log.info("<------- AION Code Clone Detection started ... ------>\n ") if not vdb_status: openai_api_type,openai_api_base,openai_api_key,openai_api_version = super().getOpenAICredentials() # openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials() all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() if (openai.api_key or openai_api_key): if not isinstance(all_funcs,type(None)): embedded_df,embedding_cost=super().get_embeddings_pyfiles(all_funcs) else: return status except Exception as e: # print("Error in getting embedding vector using openai. Error message: ",e) self.log.info("Error in getting embedding vector using openai. Error message: "+str(e)) raise Exception("Error in getting embedding vector using openai.") return embedded_df,embedding_cost def dbscan_clone_detection(self,df): """ DBScan based code clone similarity detection (for functions in given dir """ try: vdb_status = super().get_vdb_status('chromadb') if not vdb_status: X = np.array(list(df.code_embedding.values)) else: X = np.array(list(df.embeddings.values)) #X = StandardScaler().fit_transform(X) db = DBSCAN(eps=0.2, min_samples=2).fit(X) labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) df["cluster"] = labels cluster_result = [] for i in range(n_clusters_): cluster_df = df.loc[df['cluster'] == i] # with open("{}/cluster_{}.txt".format(self.ccdreportpath,i), "w") as f: for index, row in cluster_df.iterrows(): cluster_result.append({"cluster_id": i,"filepath": row.filepath,"function_name": row.function_name,"code": row.code }) # f.write(f"Source File: {row.filepath}, Function Name: {row.function_name}") #f.write(f"\n{row.code}\n\n{'-'*80}\n\n") cluster_result_df = pd.DataFrame(cluster_result) codeclonereport_df = os.path.join(self.ccdreportpath,'cluster_result.csv') cluster_result_df.to_csv(codeclonereport_df, index=False) return cluster_result_df except Exception as e: self.log.info("Error in dbscan based similar code clone clustering. Error Message: "+str(e)) raise Exception("Error in dbscan based similar code clone clustering.") def make_pairs(self,data_list:list): try: if len(data_list) <=1: return [] return [(data_list[0], d) for d in data_list[1:]] + self.make_pairs(data_list[1:]) except Exception as e: self.log.info("Error in make pairs function, error message: "+str(e)) raise Exception("Error in clone code mapping.") def code_clone_check_with_retry(self,code1,code2, retry_interval=1): """ Call chat models for code clone detection with retry mechanism. """ try: # res = super().validate_code_clone_with_explanation(code1,code2) ##sj if (self.generativeai_embedding_model.lower() =='text-embedding-ada-002' and self.generativeai_chat_model.lower() == 'text-datvinci-03'): res = super().validate_code_clone_with_explanation_davinci(code1,code2) return res elif (self.generativeai_embedding_model.lower() =='text-embedding-ada-002' and self.generativeai_chat_model.lower() == 'gpt-3.5-turbo'): res = super().validate_code_clone_with_explanation(code1,code2) return res except (RateLimitError, Timeout) as e: self.log.info("Calling chat model issue in code clone check function, error message: "+str(e)) time.sleep(retry_interval) return self.code_clone_check_with_retry(code1, code2) def res_formater(self,inp): """ Function to format gpt-3.5 or text-davinci-003 response body. """ try: line = inp.replace('{','') line = line.replace('}','') line = line.replace('"','') end=line.split(',') d1={} l2=[] for l in end: l=l.split(',') for i in l: l1=i.split(":") l2.append(l1) import pandas as pd df=pd.DataFrame(l2) df=df.T df.columns = df.iloc[0] df = df[1:] df.columns = df.columns.str.replace('[#,@,&,\']', '') # df.to_csv('test1.csv', index=False) response=df.iloc[0] fl=response.to_list() clone_status=fl[0] clone_type=fl[1] result=fl[2] except Exception as e: self.log.info("chat model response formatter error. Error message: "+str(e)) return clone_status,clone_type,result def getcloneresult_modelspecific(self,code_clone_check_tasks,embedding_cost): """ get the clone type and associated information from chat model response data. """ try: max_workers = min(len(code_clone_check_tasks), 100) all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() if (self.generativeai_chat_model.lower() == 'text-datvinci-03'): self.log.info("<--- Text-Datvinci-03 chat model based code clone detection. --->") code_clone_result = [] for task in code_clone_check_tasks: response=self.code_clone_check_with_retry(task[0]["code"], task[1]["code"]) with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor: llm_requests = { executor.submit(self.code_clone_check_with_retry, task[0]["code"], task[1]["code"]): task for task in code_clone_check_tasks } with tqdm(total= len(llm_requests)) as progress: for future in concurrent.futures.as_completed(llm_requests): task = llm_requests[future] try: res = future.result() try: my_openai_obj1 = res["choices"][0]["text"] clone_status,clone_type,result = self.res_formater(my_openai_obj1) model_value=res['model'] total_tokens_value=res['usage']['total_tokens'] code_clone_result.append({"task": task, "result":result, "IsClone": clone_status, "CloneType": clone_type, "model":model_value, "total_tokens":total_tokens_value}) except Exception as e: self.log.info("getCloneReport, code_clone_result.append error: "+str(e)) except Exception as exc: self.log.info("getCloneReport error (text davinci chat model): "+str(exc)) progress.update() ## Please uncomment below part if you need to check chat model response body. #codeclonecheckresult_json = os.path.join(self.ccdreportpath,'code_clone_chatmodel_responsebody.json') #with open(codeclonecheckresult_json, "w+") as fp: #json.dump(code_clone_result, fp, indent=2) code_clone_result_json=json.dumps(code_clone_result) clone_report=pd.read_json(code_clone_result_json) cr_totaltokens = clone_report['total_tokens'] total_amt = (cr_totaltokens).sum() * (0.002/1000) clone_report["function1"] = clone_report["task"].apply(lambda x: x[0]["filepath"] + " -> " + x[0]["function_name"]) clone_report["function2"] = clone_report["task"].apply(lambda x: x[1]["filepath"] + " -> " + x[1]["function_name"]) # clone_report["clone_type"] = clone_report["result"].apply(lambda x: x["CloneType"]) clone_report["clone_type"] = clone_report["CloneType"] code_dir = code_root total_files = len(code_files) total_locs = total_locs total_functions = len(all_funcs) total_tokens = clone_report['total_tokens'].sum() total_cost= embedding_cost + clone_report['total_tokens'].sum() * (0.002/1000) total_clones = len(clone_report[clone_report.clone_type != "None"]) code_clone_count_by_df = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql') clone_functions = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1").to_markdown(tablefmt='psql', index=False) code_clone_count_dict = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')) clone_function_dict = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1") ##Final report on code clone detection report_str = f"""Code_directory: {code_dir} Files: {total_files} LOCs: {total_locs} Functions: {total_functions} Total_code_clones_detected: {total_clones} Tokens used: {total_tokens} Total cost(embedding + clone check): {total_cost} Four_types_of_clone: 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. Code_clones_count_by_clone_type: {code_clone_count_by_df} Clone_functions: {clone_functions} """ codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt') with open(codeclonereport_txt, "w") as f: f.write(report_str) report_dict=dict({"Code_directory":code_dir,"total_files":total_files, "total_locs":total_locs,"total_functions":total_functions,"total_clones":total_clones, "total_tokens":total_tokens,"total_cost":total_cost, "Code_clones_count_by_clone_type":code_clone_count_dict,"clone_functions":clone_function_dict}) ## report for chat model is gpt 3.5 turbo elif (self.generativeai_chat_model.lower() == 'gpt-3.5-turbo'): try: self.log.info("<--- gpt-3.5-turbo chat model based code clone detection. --->") code_clone_result = [] for task in code_clone_check_tasks: response=self.code_clone_check_with_retry(task[0]["code"], task[1]["code"]) with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor: llm_requests = { executor.submit(self.code_clone_check_with_retry, task[0]["code"], task[1]["code"]): task for task in code_clone_check_tasks } with tqdm(total= len(llm_requests)) as progress: for future in concurrent.futures.as_completed(llm_requests): task = llm_requests[future] try: res = future.result() my_openai_obj1 = res["choices"][0]["message"]['content'] clone_status,clone_type,result = self.res_formater(my_openai_obj1) # result = json.loads(res['choices'][0]['message']['content']) total_tokens = res["usage"]["total_tokens"] code_clone_result.append({"task": task, "result":result , "CloneType": clone_type, "total_tokens": total_tokens}) except Exception as exc: self.log.info("gpt 3.5 chat model error: "+str(exc)) progress.update() except Exception as e: print("In gpt3.5,getcloneresult_modelspecific fn exception : \n",e) import traceback print("traceback, In gpt3.5,getcloneresult_modelspecific fn exception \n",traceback.print_exc()) ## Please uncomment below part if you need to check chat model response body. #codeclonecheckresult_json = os.path.join(self.ccdreportpath,'code_clone_chatmodel_responsebody.json') #with open(codeclonecheckresult_json, "w+") as fp: #json.dump(code_clone_result, fp, indent=2) try: code_clone_result_json=json.dumps(code_clone_result) clone_report = pd.read_json(code_clone_result_json) codeclone_total_amt = clone_report["total_tokens"].sum() * (0.002/1000) clone_report["function1"] = clone_report["task"].apply(lambda x: x[0]["filepath"] + " -> " + x[0]["function_name"]) clone_report["function2"] = clone_report["task"].apply(lambda x: x[1]["filepath"] + " -> " + x[1]["function_name"]) # clone_report["clone_type"] = clone_report["result"].apply(lambda x: x["CloneType"]) clone_report["clone_type"] = clone_report["CloneType"] code_dir = code_root total_files = len(code_files) total_locs = total_locs total_functions = len(all_funcs) total_tokens = clone_report["total_tokens"].sum() except Exception as e: self.log.info("Error in getting clone report: "+str(e)) total_cost= embedding_cost + clone_report["total_tokens"].sum() * (0.002/1000) total_clones = len(clone_report[clone_report.clone_type != "None"]) code_clone_count_by_df = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql') clone_functions = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1").to_markdown(tablefmt='psql', index=False) code_clone_count_dict = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')) clone_function_dict = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1") report_str = f"""Code_directory: {code_dir} Files: {total_files} LOCs: {total_locs} Functions: {total_functions} Total code clones detected: {total_clones} Tokens used: {total_tokens} Total cost(embedding + clone check): {total_cost} Four types of clone: 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. 5. None: Not a clone, discard this one. Code_clones_count_by_clone_type: {code_clone_count_by_df} Clone_functions: {clone_functions} """ codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt') with open(codeclonereport_txt, "w") as f: f.write(report_str) report_dict=dict({"Code_directory":code_dir,"total_files":total_files, "total_locs":total_locs,"total_functions":total_functions,"total_clones":total_clones, "total_tokens":total_tokens,"total_cost":total_cost, "Code_clones_count_by_clone_type":code_clone_count_dict,"clone_functions":clone_function_dict}) except Exception as e: self.log.info("Error in clone type and information retrival process .Error message: "+str(e)) return code_clone_result,report_str,report_dict def getCloneReport(self): """ To get the clone report from the given python directory """ try: self.log.info("To get clone report, we are calling embedding and chat model.") import time vdb_status = super().get_vdb_status('chromadb') start_time = time.time() # self.log.info("code clone detection start time."+str(start_time)) if not vdb_status: embedded_df,embedding_cost = self.get_embedd_fns() cluster_df = self.dbscan_clone_detection(embedded_df) cluster_df_group = cluster_df.groupby("cluster_id") len_cluster_df_group = len(cluster_df_group) code_clone_check_tasks = [] for name, group in cluster_df_group: res = self.make_pairs(group.to_dict(orient="records")) code_clone_check_tasks += res #For text-embedding-ada-002 and gpt 3.5 chat model code_clone_result,report_str,report_dict = self.getcloneresult_modelspecific(code_clone_check_tasks,embedding_cost) end_time = time.time() total_time_taken = end_time - start_time self.log.info("Total time taken for code clone detction: "+str(total_time_taken)) self.log.info("<------------- Final code clone report: -------------------> \n"+str(report_str)) report_df = pd.DataFrame.from_dict(report_dict, orient="index").reset_index() report_df.columns = ['ccd_properties', 'Values'] report_df=report_df.T codecloneresult_df = os.path.join(self.ccdreportpath,'code_clone_report_df.csv') report_df.to_csv(codecloneresult_df) return report_str,report_dict,report_df,json.dumps(report_str) else: #Below code indended for vector db. all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() df = pd.DataFrame(all_funcs) df['filepath'] = df['filepath'].apply(lambda x: x.replace(code_root, "")) chromadb_df=super().chromadb_embedding(df) df = self.dbscan_clone_detection(chromadb_df) cluster_df_group = cluster_df.groupby("cluster_id") len_cluster_df_group = len(cluster_df_group) code_clone_check_tasks = [] for name, group in cluster_df_group: res = self.make_pairs(group.to_dict(orient="records")) code_clone_check_tasks += res code_clone_result = [] max_workers = min(len(code_clone_check_tasks), 100) with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor: llm_requests = { executor.submit(self.code_clone_check_with_retry, task[0]["code"], task[1]["code"]): task for task in code_clone_check_tasks } with tqdm(total= len(llm_requests)) as progress: for future in concurrent.futures.as_completed(llm_requests): task = llm_requests[future] try: res = future.result() code_clone_result.append({"task": task, "result": json.loads(res['choices'][0]['message']['content']), "total_tokens": res["usage"]["total_tokens"]}) except Exception as exc: print('%r generated an exception: %s' % (task, exc)) progress.update() with open("code_clone_check_result.json", "w+") as fp: json.dump(code_clone_result, fp, indent=2) code_clone_result_json=json.dumps(code_clone_result) clone_report=pd.read_json(code_clone_result_json) total_amt = clone_report["total_tokens"].sum() * (0.002/1000) clone_report["function1"] = clone_report["task"].apply(lambda x: x[0]["filepath"] + " -> " + x[0]["function_name"]) clone_report["function2"] = clone_report["task"].apply(lambda x: x[1]["filepath"] + " -> " + x[1]["function_name"]) clone_report["clone_type"] = clone_report["result"].apply(lambda x: x["CloneType"]) all_funcs,code_root,code_files,total_locs = super().get_clone_function_details() code_dir = code_root total_files = len(code_files) total_locs = total_locs total_functions = len(all_funcs) total_tokens = clone_report["total_tokens"].sum() # total_cost= embedding_cost + clone_report["total_tokens"].sum() * (0.002/1000) total_clones = len(clone_report[clone_report.clone_type != "None"]) code_clone_count_by_df = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql') clone_functions = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1").to_markdown(tablefmt='psql', index=False) code_clone_count_dict = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')) clone_function_dict = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1") ##Final report on code clone detection report_str = f"""Code_directory: {code_dir} Files: {total_files} LOCs: {total_locs} Functions: {total_functions} Total code clones detected: {total_clones} Tokens used: {total_tokens} Four types of clone: 1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces. 2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone. 3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones. 4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone. Code_clones_count_by_clone_type: {code_clone_count_by_df} Clone_functions: {clone_functions} """ with open("code_clone_report.txt", "w") as f: f.write(report_str) # print(report_str) self.log.info("<------------- Final code clone report: -------------------> \n"+str(report_str)) self.log.info("<------------- clone_functions code clone report: -------------------> \n"+str(clone_functions)) report_dict=dict({"Code_directory":code_dir,"total_files":total_files, "total_locs":total_locs,"total_functions":total_functions,"total_clones":total_clones, "total_tokens":total_tokens, "Code_clones_count_by_clone_type":code_clone_count_dict,"clone_functions": clone_function_dict}) report_df= pd.DataFrame([report_dict.keys(), report_dict.values()]).T report_df.columns = ["Code_directory", "total_files","total_locs","total_functions","total_clones","total_tokens","Code_clones_count_by_clone_type","clone_functions"] report_df.to_csv("code_clone_report_df.csv") return report_str,report_dict,report_df,json.dumps(report_str) except Exception as e: self.log.info("Error in clone detection function call. Error Message: \n"+str(e)) raise Exception("Error in clone detection function.") #For testing and code instance privacy if __name__=='__main__': ## For testing purpose.Uncomment n use. root_directory = r"C:\AION_Works\Anomaly_Detection\anomalydetectionpackage\code_clone_testing_pyfiles\code_clone_testing_pyfiles_large" embedd_storage_path = r"C:\AION_Works\ccddir" generativeai_credentials={'openai_baseurl':"", 'openai_key':"", 'openai_api_type':"", 'openai_api_version':"", 'generativeai_embedding_engine':"", 'generativeai_embedding_model':"", 'generativeai_chat_model':"", 'generativeai_deploymentId':""} openai_baseurl = generativeai_credentials['openai_baseurl'] openai_key = generativeai_credentials['openai_key'] openai_api_type = generativeai_credentials['openai_api_type'] openai_api_version = generativeai_credentials['openai_api_version'] generativeai_embedding_engine = generativeai_credentials['generativeai_embedding_engine'] generativeai_embedding_model = generativeai_credentials['generativeai_embedding_model'] generativeai_chat_model = generativeai_credentials['generativeai_chat_model'] generativeai_deploymentId = generativeai_credentials['generativeai_deploymentId'] codeclonedetection_obj = CodeCloneDetectionFiles(root_directory,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId) report_str,report_dict,report_json = codeclonedetection_obj.getCloneReport() print("End of code clone detection....\n")
azureStorageDB.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import sqlite3 from pathlib import Path import json import os import rsa import boto3 #usnish import pandas as pd import time import sqlite3 class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() def table_exists(self, name): query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() return len(listOfTables) > 0 def read_data(self, table_name): query = f"SELECT * FROM {table_name}" row = self.cursor.execute(query).fetchall() return list(row) #return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def delete_record(self,table_name,col_name, col_value): try: query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'" self.conn.execute(query) self.conn.commit() return 'success' except Exception as e : print(str(e)) print("Deletion Failed") return 'error' def get_data(self,table_name,col_name,col_value): query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'" row = self.cursor.execute(query).fetchone() if(row == None): return [] return list(row) def write_data(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def close(self): self.conn.close() def add_new_azureStorage(request): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') if request.POST["azurename"] =='' or request.POST["azureaccountkey"] == '' or request.POST["containername"] == '' : return 'error' newdata = {} newdata['azurename'] = [request.POST["azurename"]] newdata['azureaccountkey'] = [request.POST["azureaccountkey"]] newdata['containername'] = [request.POST["containername"]] name = request.POST["azurename"] if sqlite_obj.table_exists("azurebucket"): if(len(sqlite_obj.get_data('azurebucket','azurename',name))>0): return 'error1' sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'azurebucket') except: return 'error' def get_azureStorage(): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') temp_data = sqlite_obj.read_data('azurebucket') data = [] for x in temp_data: data_dict = {} data_dict['azurename'] = x[0] data_dict['azureaccountkey'] = x[1] data_dict['containername'] = x[2] data.append(data_dict) except Exception as e: print(e) data = [] return data def read_azureStorage(name,directoryname,DATA_FILE_PATH): try: from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') data = sqlite_obj.get_data('azurebucket','azurename',name) except: data = [] found = False if len(data)!=0: storage_account_name = str(data[0]) storage_account_key = str(data[1]) azure_container_name = data[2] found = True try: if found: root_dir = str(directoryname) from azure.storage.filedatalake import DataLakeServiceClient import io import pandavro as pdx from detect_delimiter import detect try: service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", storage_account_name), credential=storage_account_key) print(azure_container_name) file_system_client = service_client.get_file_system_client(azure_container_name) print(root_dir) file_paths = file_system_client.get_paths(path=root_dir) main_df = pd.DataFrame() for path in file_paths: if not path.is_directory: file_client = file_system_client.get_file_client(path.name) file_ext = os.path.basename(path.name).split('.', 1)[1] if file_ext in ["csv", "tsv"]: with open(csv_local, "wb") as my_file: download = file_client.download_file() download.readinto(my_file) with open(csv_local, 'r') as file: data = file.read() row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t']) processed_df = pd.read_csv(csv_local, sep=row_delimiter) if file_ext == "parquet": download = file_client.download_file() stream = io.BytesIO() download.readinto(stream) processed_df = pd.read_parquet(stream, engine='pyarrow') if file_ext == "avro": with open(avro_local, "wb") as my_file: download = file_client.download_file() download.readinto(my_file) processed_df = pdx.read_avro(avro_local) if not main_df.empty: main_df = main_df.append(processed_df, ignore_index=True) else: main_df = pd.DataFrame(processed_df) except Exception as e: msg = str(e).split(".")[0] print(msg) return 'Error',str(msg), pd.DataFrame() return "Success","",main_df except: return 'Error',"Please check bucket configuration", pd.DataFrame() def remove_azure_bucket(name): from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR,'sqlite') sqlite_obj = sqlite_db(file_path,'config.db') return sqlite_obj.delete_record('azurebucket','azurename',name)
labelling_utils.py
import json import os import random import time from avro.datafile import DataFileReader from avro.io import DatumReader from pyarrow.parquet import ParquetFile from snorkel.labeling.model import LabelModel from snorkel.labeling import PandasLFApplier, LFAnalysis import pandas as pd import pandavro as pdx import pyarrow as pa import numpy as np import platform from os.path import expanduser home = expanduser("~") if platform.system() == 'Windows': DATA_FILE_PATH = os.path.join(home,'AppData','Local','Programs','HCLTech','AION','data','storage') else: DATA_FILE_PATH = os.path.join(home,'HCLT','AION','data') def get_join(condition): if condition["join"] == 'and': return "&" elif condition["join"] == 'or': return "|" else: return "" def create_labelling_function(rule_list, label_list): lfs_main_func = 'def lfs_list_create():\n' lfs_main_func += '\tfrom snorkel.labeling import labeling_function\n' lfs_main_func += '\timport numpy as np\n' lfs_main_func += '\timport json\n' lfs_main_func += '\tABSTAIN = -1\n' lfs_main_func += '\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\n' lfs_list = '\tlfs_list=[' for rule in rule_list: lfs_list += 'lf_' + rule["rule_name"] + ',' lfs = '\t@labeling_function()\n' lfs += '\tdef lf_' + rule["rule_name"] + '(data):\n' lfs += '\t\treturn np.where(' for condition in rule["conditions"]: if "string" in condition["sel_datatype"]: if condition["sel_condition"] in ["==", "!="]: cond_statement = '(data["' + condition["sel_column"] + '"]' + condition[ "sel_condition"] + '("' + str(condition["input_value"]) + '"))' + get_join(condition) else: cond_statement = '(data["' + condition["sel_column"] + '"].' + condition[ "sel_condition"] + '("' + str(condition["input_value"]) + '"))' + get_join(condition) else: cond_statement = '(data["' + condition["sel_column"] + '"]' + condition["sel_condition"] + \ str(condition["input_value"]) + ')' + get_join(condition) lfs += cond_statement lfs += ', labels.index("' + rule["label"] + '"), ABSTAIN)\n' lfs_main_func += lfs if lfs_list.endswith(","): lfs_list = lfs_list.rstrip(lfs_list[-1]) lfs_list += ']\n' else: lfs_list += ']\n' lfs_main_func += lfs_list lfs_main_func += '\treturn lfs_list\n' lfs_main_func += 'lfs_list_create()' f = open(os.path.join(DATA_FILE_PATH, 'lfs_list.txt'), 'w') f.write(lfs_main_func) f.close() return lfs_main_func def label_dataset(rule_list, file_ext, label_list, not_satisfy_label): file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext) if file_ext in ["csv", "tsv"]: df = pd.read_csv(file_path) elif file_ext == "json": df = pd.json_normalize(pd.read_json(file_path).to_dict("records")) elif file_ext == "avro": reader = DataFileReader(open(file_path, "rb"), DatumReader()) schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) elif file_ext == "parquet": df = pd.read_parquet(file_path, engine="pyarrow") labelling_functions = create_labelling_function(rule_list, label_list) exec(labelling_functions) lfs = eval('lfs_list_create()') applier = PandasLFApplier(lfs) l_data = applier.apply(df) label_model = LabelModel(cardinality=len(label_list) + 1, verbose=True) label_model.fit(l_data, n_epochs=500, log_freq=50, seed=123) df["label"] = label_model.predict(L=l_data, tie_break_policy="abstain") df.loc[df["label"] == -1, "label"] = not_satisfy_label for item in label_list: df.loc[df["label"] == label_list.index(item), "label"] = item if file_ext in ["csv", "tsv"]: df.to_csv(os.path.join(DATA_FILE_PATH, "result_file." + file_ext), index=False) elif file_ext == "parquet": df.to_parquet(os.path.join(DATA_FILE_PATH, "result_file." + file_ext), engine="pyarrow", index=False) elif file_ext == "avro": pdx.to_avro(os.path.join(DATA_FILE_PATH, "result_file." + file_ext), df) else: raise ValueError("Invalid file format") num_records = len(df.index) size_take = 100 if num_records <= size_take: size_take = num_records display_df = df.sample(n=size_take) return display_df.to_html(classes='table table-striped text-left', justify='left', index=False) def create_sample_function(rule, label_list, not_satisfy_label): lfs_main_func = 'def lf_rule_apply(data):\n' lfs_main_func += '\timport numpy as np\n' lfs_main_func += '\tABSTAIN = -1\n' lfs_main_func += '\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\n' lfs = '\treturn np.where(' for condition in rule["conditions"]: if "string" in condition["sel_datatype"]: if condition["sel_condition"] in ["==", "!="]: cond_statement = '(data["' + condition["sel_column"] + '"]' + condition["sel_condition"] + '("' + str( condition["input_value"]) + '"))' + get_join(condition) else: cond_statement = '(data["' + condition["sel_column"] + '"].str.' + condition[ "sel_condition"] + '("' + str(condition["input_value"]) + '"))' + get_join(condition) print(cond_statement) else: cond_statement = '(data["' + condition["sel_column"] + '"]' + condition["sel_condition"] + \ str(condition["input_value"]) + ')' + get_join(condition) lfs += cond_statement lfs += ', "' + rule["label"] + '", "' + not_satisfy_label + '")\n' lfs_main_func += lfs return lfs_main_func def get_sample_result_of_individual_rule(rule_json, file_ext, label_list, not_satisfy_label): file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext) size_take = 100 if file_ext in ["csv", "tsv"]: num_records = sum(1 for line in open(file_path)) - 1 if num_records > size_take: skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take)) else: skip = 0 df = pd.read_csv(file_path, skiprows=skip) elif file_path.endswith(".json"): df = pd.read_json(file_path) df = pd.json_normalize(df.to_dict("records")) elif file_path.endswith(".avro"): reader = DataFileReader(open(file_path, "rb"), DatumReader()) schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) elif file_path.endswith(".parquet"): pf = ParquetFile(file_path) take_rows = next(pf.iter_batches(batch_size=size_take)) df = pa.Table.from_batches([take_rows]).to_pandas() # file_content = pd.read_parquet(file_path, engine="pyarrow") else: raise ValueError("Invalid file format") rule_applier_func = create_sample_function(rule_json, label_list, not_satisfy_label) exec(rule_applier_func) df[rule_json["rule_name"]] = eval('lf_rule_apply')(df) return df.to_html(classes='table table-striped text-left', justify='left', index=False) def create_sample_function_ver2(rule_json, label_list, not_satisfy_label): lfs_main_func = 'def lf_rule_apply(data):\n' lfs_main_func += '\timport numpy as np\n' lfs_main_func += '\tABSTAIN = -1\n' lfs_main_func += '\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\n' counter = 0 for condition in rule_json["conditions"]: lfs_return = condition["sel_label"] if counter > 0: lfs_return_condition = '\telif' else: lfs_return_condition = '\tif' for label_condition in condition["label_condition"]: if label_condition["sel_datatype"] == "string": if label_condition["sel_condition"] == "contains": lfs_return_condition += '((' + str(label_condition["input_value"]) + ') in data["' + \ label_condition["sel_column"] + '"])' + get_join(label_condition) elif label_condition["sel_condition"] in ["==", "!="]: lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[ "sel_condition"] + '("' + str( label_condition["input_value"]) + '"))' + get_join(label_condition) else: lfs_return_condition += '(data["' + label_condition["sel_column"] + '"].' + label_condition[ "sel_condition"] + '("' + str(label_condition["input_value"]) + '"))' + get_join( label_condition) else: lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[ "sel_condition"] + str(label_condition["input_value"]) + ')' + get_join(label_condition) if get_join(label_condition) == "": lfs_return_condition += ":\n" lfs_return_condition += '\t\treturn "' + lfs_return + '"\n' lfs_main_func += lfs_return_condition counter += 1 lfs_return_condition = '\n\telse:\n' lfs_return_condition += '\t\treturn "' + not_satisfy_label + '"' lfs_main_func += lfs_return_condition return lfs_main_func def get_sample_result_of_individual_rule_ver2(rule_json, file_ext, label_list, not_satisfy_label): file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext) size_take = 100 if file_ext in ["csv", "tsv"]: num_records = sum(1 for line in open(file_path)) - 1 if num_records > size_take: skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take)) else: skip = 0 df = pd.read_csv(file_path, skiprows=skip) elif file_path.endswith(".json"): df = pd.read_json(file_path) df = pd.json_normalize(df.to_dict("records")) elif file_path.endswith(".avro"): reader = DataFileReader(open(file_path, "rb"), DatumReader()) schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) elif file_path.endswith(".parquet"): pf = ParquetFile(file_path) take_rows = next(pf.iter_batches(batch_size=size_take)) df = pa.Table.from_batches([take_rows]).to_pandas() # file_content = pd.read_parquet(file_path, engine="pyarrow") else: raise ValueError("Invalid file format") rule_applier_func = create_sample_function_ver2(rule_json, label_list, not_satisfy_label) exec(rule_applier_func) df[rule_json["rule_name"]] = df.apply(eval('lf_rule_apply'), axis=1) return df.to_html(classes='table table-striped text-left', justify='left', index=False) def create_labelling_function_ver2(rule_list, label_list): lfs_main_func = 'def lfs_list_create():\n' lfs_main_func += '\tfrom snorkel.labeling import labeling_function\n' lfs_main_func += '\timport numpy as np\n' lfs_main_func += '\timport json\n' lfs_main_func += '\tABSTAIN = -1\n' lfs_main_func += '\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\n' lfs_list = '\tlfs_list=[' for rule in rule_list: lfs_list += 'lf_' + rule["rule_name"] + ',' lfs = '\t@labeling_function()\n' lfs += '\tdef lf_' + rule["rule_name"] + '(data):\n' counter = 0 for condition in rule["conditions"]: lfs_return = 'labels.index("' + condition["sel_label"] + '")' if counter > 0: lfs_return_condition = '\t\telif' else: lfs_return_condition = '\t\tif' for label_condition in condition["label_condition"]: if label_condition["sel_datatype"] == "string": if label_condition["sel_condition"] == "contains": lfs_return_condition += '((' + str(label_condition["input_value"]) + ') in data["' + \ label_condition["sel_column"] + '"])' + get_join(label_condition) elif label_condition["sel_condition"] in ["==", "!="]: lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[ "sel_condition"] + '("' + str( label_condition["input_value"]) + '"))' + get_join(label_condition) else: lfs_return_condition += '(data["' + label_condition["sel_column"] + '"].' + label_condition[ "sel_condition"] + '("' + str(label_condition["input_value"]) + '"))' + get_join( label_condition) else: lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[ "sel_condition"] + str(label_condition["input_value"]) + ')' + get_join(label_condition) if get_join(label_condition) == "": lfs_return_condition += ":\n" lfs_return_condition += '\t\t\treturn ' + lfs_return + '\n' lfs += lfs_return_condition counter += 1 lfs_return_condition = '\n\t\telse:\n' lfs_return_condition += '\t\t\treturn ABSTAIN\n' lfs += lfs_return_condition lfs_main_func += lfs if lfs_list.endswith(","): lfs_list = lfs_list.rstrip(lfs_list[-1]) lfs_list += ']\n' else: lfs_list += ']\n' lfs_main_func += lfs_list lfs_main_func += '\treturn lfs_list\n' lfs_main_func += 'lfs_list_create()' # f = open(os.path.join(DATA_FILE_PATH, 'lfs_list.txt'), 'w') # f.write(lfs_main_func) # f.close() return lfs_main_func def get_rule_name_list(rule_list): rule_name_list = [] for rule in rule_list: rule_name_list.append(rule["rule_name"]) return rule_name_list def label_dataset_ver2(request,rule_list, file_ext, label_list, not_satisfy_label, label_weightage, include_proba): file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext) if file_ext in ["csv", "tsv"]: df = pd.read_csv(file_path) elif file_ext == "json": df = pd.json_normalize(pd.read_json(file_path).to_dict("records")) elif file_ext == "avro": reader = DataFileReader(open(file_path, "rb"), DatumReader()) schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) elif file_ext == "parquet": df = pd.read_parquet(file_path, engine="pyarrow") labelling_functions = create_labelling_function_ver2(rule_list, label_list) exec(labelling_functions) lfs = eval('lfs_list_create()') applier = PandasLFApplier(lfs) l_data = applier.apply(df) label_model = LabelModel(cardinality=len(label_list), verbose=True) label_model.fit(l_data, n_epochs=500, log_freq=50, seed=123, class_balance=label_weightage) df["label"] = label_model.predict(L=l_data, tie_break_policy="abstain") if include_proba: prediction_of_prob = label_model.predict_proba(L=l_data) for label in label_list: df[label + "_prob"] = np.around(prediction_of_prob[:, label_list.index(label)], 2) * 100 df.loc[df["label"] == -1, "label"] = not_satisfy_label filetimestamp = str(int(time.time())) datasetName = "AION_labelled_"+filetimestamp + '.' + file_ext request.session['AION_labelled_Dataset'] = datasetName for item in label_list: df.loc[df["label"] == label_list.index(item), "label"] = item if file_ext in ["csv", "tsv"]: df.to_csv(os.path.join(DATA_FILE_PATH, datasetName), index=False) elif file_ext == "parquet": df.to_parquet(os.path.join(DATA_FILE_PATH, datasetName), engine="pyarrow", index=False) elif file_ext == "avro": pdx.to_avro(os.path.join(DATA_FILE_PATH, datasetName), df) else: raise ValueError("Invalid file format") #### saving file to database from appbe.dataPath import DATA_DIR from appbe.sqliteUtility import sqlite_db file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') newdata = {} newdata['datapath'] = [os.path.join(DATA_FILE_PATH, datasetName)] newdata['datasetname'] = [datasetName] sqlite_obj.write_data(pd.DataFrame.from_dict(newdata), 'dataingest') num_records = len(df.index) size_take = 100 if num_records <= size_take: size_take = num_records display_df = df.sample(n=size_take) weightage = np.around(label_model.get_weights(), 2) rule_name_list = get_rule_name_list(rule_list) analysis_df = LFAnalysis(l_data, lfs).lf_summary() analysis_df["Rule"] = analysis_df.index analysis_df["Rule"] = analysis_df["Rule"].str.replace("lf_", "") analysis_df = analysis_df[["Rule", "Polarity", "Coverage", "Overlaps", "Conflicts"]] weightage_dict = dict(zip(rule_name_list, weightage)) analysis_json = analysis_df.to_dict(orient="records") for item in analysis_json: item["Weightage"] = weightage_dict[item["Rule"]] analysis_df = pd.json_normalize(analysis_json) # rules_weightage = [] # for key in weightage_dict: # rules_weightage.append({ # "label": key, # "y": weightage_dict[key], # "legendText": key # }) response = { # "rule_name_list": rule_name_list, # "weightage_list": list(weightage), "analysis_df": analysis_df.to_html(classes='table table-striped text-left', justify='left', index=False), "result_html": display_df.to_html(classes='table table-striped text-left', justify='left', index=False) } return response def get_label_and_weightage(test_file_ext, marked_label_column,file_delim_test, custom_test_delim ): file_path = os.path.join(DATA_FILE_PATH, "test_data_file." + test_file_ext) if test_file_ext in ["csv", "tsv"]: df = pd.read_csv(file_path) elif test_file_ext == "json": df = pd.json_normalize(pd.read_json(file_path).to_dict("records")) elif test_file_ext == "avro": reader = DataFileReader(open(file_path, "rb"), DatumReader()) schema = json.loads(reader.meta.get('avro.schema').decode('utf-8')) df = pdx.read_avro(file_path, schema=schema, na_dtypes=True) elif test_file_ext == "parquet": df = pd.read_parquet(file_path, engine="pyarrow") json_df = pd.DataFrame(df[marked_label_column].value_counts(normalize=True) * 100) json_dict = json.loads(json_df.to_json()) label_with_weightage = [] for k in json_dict[marked_label_column]: label_with_weightage.append( {"label_name": k, "label_weightage": np.around(json_dict[marked_label_column][k], 2)}) return label_with_weightage
s3buckets.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import os import rsa import boto3 #usnish import pandas as pd import time def add_new_bucket(request): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] if request.POST["aionreferencename"] =='' or request.POST["s3bucketname"] == '' or request.POST["awsaccesskey"] == '' : return 'error' pkeydata='''-----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1AfnrMv fVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw0m4e wQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2PM4Re n0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHyKxlq i/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhxWrs/ lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQAB -----END RSA PUBLIC KEY-----''' pubkey = rsa.PublicKey.load_pkcs1(pkeydata) awssecretaccesskey = rsa.encrypt(request.POST["awssecretaccesskey"].encode(), pubkey) print(awssecretaccesskey) newdata = {} newdata['Name'] = request.POST["aionreferencename"] newdata['AWSAccessKeyID'] = request.POST["awsaccesskey"] newdata['AWSSecretAccessKey'] = str(awssecretaccesskey) newdata['S3BucketName'] = request.POST["s3bucketname"] data.append(newdata) with open(file_path, 'w') as f: json.dump(data, f) f.close() def get_s3_bucket(): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] return data def read_s3_bucket(name,filename,DATA_FILE_PATH): privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqQIBAAKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1Af nrMvfVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw 0m4ewQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2P M4Ren0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHy Kxlqi/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhx Wrs/lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQABAoIBAC/VbNfQPEqJSO3f VFPqfR73q2MbGdgiMQOTgeDvLxiF1QdizJ+j/I5mgiIAMviXuOpPU+NbdMHbZZWd D15kNlD8UCXVg6yyiOuHStjmjK4uHe8I86E1nxTb0hbyZCWZlbk/WizlDHInu+dT KdIZcq2AIidU6tAxtwA0ingHaRSoXDlSGwOTEigNqmWOKnDTVg0SMscoHOD7siXF DHm1/lkvD3uvcZk6c7fGxC8SgNX2dj6n/Nbuy0Em+bJ0Ya5wq4HFdLJn3EHZYORF ODUDYoGaSxeXqYsGg/KHJBc8J7xW9FdN9fGbHfw1YplrmiGL3daATtArjMmAh0EQ H8Sj7+ECgYkA3oWMCHi+4t8txRPkg1Fwt8dcqYhGtqpAus3NESVurAdi0ZPqEJcQ 4cUbflwQPhX0TOaBlkgzdP8DMdcW/4RalxHsAh5N8ezx/97PQMb3Bht0WsQUBeYJ xLV7T2astjTRWactGCG7dwTaUYRtU3FqL6//3CysmA12B5EMX0udNBOTKwmaYKww AwJ5AOISS7f12Q0fgTEVY0H8Zu5hHXNOA7DN92BUzf99iPx+H+codLet4Ut4Eh0C cFmjA3TC78oirp5mOOQmYxwaFaxlZ7Rs60dlPFrhz0rsHYPK1yUOWRr3RcXWSR13 r+kn+f+8k7nItfGi7shdcQW+adm/EqPfwTHM8QKBiQCIPEMrvKFBzVn8Wt2A+I+G NOyqbuC8XSgcNnvij4RelncN0P1xAsw3LbJTfpIDMPXNTyLvm2zFqIuQLBvMfH/q FfLkqSEXiPXwrb0975K1joGCQKHxqpE4edPxHO+I7nVt6khVifF4QORZHDbC66ET aTHA3ykcPsGQiGGGxoiMpZ9orgxyO3l5Anh92jmU26RNjfBZ5tIu9dhHdID0o8Wi M8c3NX7IcJZGGeCgywDPEFmPrfRHeggZnopaAfuDx/L182pQeJ5MEqlmI72rz8bb JByJa5P+3ZtAtzc2RdqNDIMnM7fYU7z2S279U3nZv0aqkk3j9UDqNaqvsZMq73GZ y8ECgYgoeJDi+YyVtqgzXyDTLv6MNWKna9LQZlbkRLcpg6ELRnb5F/dL/eB/D0Sx QpUFi8ZqBWL+A/TvgrCrTSIrfk71CKv6h1CGAS02dXorYro86KBLbJ0yp1T/WJUj rHrGHczglvoB+5stY/EpquNpyca03GcutgIi9P2IsTIuFdnUgjc7t96WEQwL -----END RSA PRIVATE KEY-----''' try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] awssecretaccesskey = '' found = False for x in data: if x['Name'] == name: awssecretaccesskey = x['AWSSecretAccessKey'] aws_access_key_id = x['AWSAccessKeyID'] bucketName = x['S3BucketName'] found = True break if found: privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') awssecretaccesskey = eval(awssecretaccesskey) awssecretaccesskey = rsa.decrypt(awssecretaccesskey, privkey) awssecretaccesskey = awssecretaccesskey.decode('utf-8') #awssecretaccesskey = 'SGcyJavYEQPwTbOg1ikqThT+Op/ZNsk7UkRCpt9g'#rsa.decrypt(awssecretaccesskey, privkey) client_s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(awssecretaccesskey)) #print(bucketName,filename) try: response = client_s3.get_object(Bucket=bucketName, Key=filename) df = pd.read_csv(response['Body']) except Exception as e: print(e)#usnish return 'Error', pd.DataFrame() #return 'Error', pd.DataFrame() return 'Success',df return 'Error', pd.DataFrame()
alchemy.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pyodbc as pyodbc import pandas as pd import json import sqlalchemy as db import pandas as pd import urllib def get_connection(request): dbType = request.session['dbType'] connection_string = "" if dbType.lower()=="sqlite": filepath = request.session['filepath'] #table = request.session["tablenamesql"] connection_string = "sqlite:///"+str(filepath) elif dbType.lower() in ["postgresql","mysql","mssql"]: db_name = request.session['dbname'] password = request.session['password'] user = request.session['username'] port = request.session['port'] host = request.session['host'] password=urllib.parse.quote_plus(password) if dbType.lower()=="postgresql": connection_string = "postgresql+psycopg2://" + user + ":" + password + "@" + host + ":" + port + "/" + db_name if dbType.lower()=="mysql": connection_string = "mysql+pyodbc://" + user + ":" + password + "@" + host + ":" + port + "/" + db_name if dbType.lower()=="mssql": driver=request.session['driver'] params = urllib.parse.quote_plus( 'Driver=%s;' % driver + 'Server=tcp:%s,' % host + '%s;' % port + 'Database=%s;' % db_name + 'Uid=%s;' % user + 'Pwd={%s};' % password + 'Encrypt=yes;' + 'TrustServerCertificate=no;' + 'Connection Timeout=30;') connection_string = 'mssql+pyodbc:///?odbc_connect=' + params return connection_string def list_tables(request): connection_string = get_connection(request) engine = db.create_engine(connection_string) connection = engine.connect() metadata = db.MetaData() metadata.reflect(engine) dt_list = [] try: dt_list= list(metadata.tables.keys()) print(dt_list) return dt_list except: print("Something went wrong") return dt_list def list_tables_fields(request,table_list): connection_string = get_connection(request) engine = db.create_engine(connection_string) connection = engine.connect() metadata = db.MetaData() metadata.reflect(engine) table_field_obj = {} table_field_obj['data'] = [] try: # filepath = request.session['filepath'] #table = request.session["tablenamesql"] table_list = json.loads(table_list) for table in table_list: tf_obj = {} tf_obj['TableName'] = str(table).strip() tf_obj['Fields']= [] table = db.Table(table, metadata, autoload=True, autoload_with=engine) col = table.columns.keys() tempdata = [] for x in col: my_list = {"column_name": x,"is_select":"false"} tempdata.append(my_list) tf_obj['Fields'] = tempdata table_field_obj['data'].append(tf_obj) return json.dumps(table_field_obj) except Exception as e: print("Something went wrong "+str(e)) return table_field_obj def get_data(connection_string,table): engine = db.create_engine(connection_string) connection = engine.connect() metadata = db.MetaData() metadata.reflect(engine) table = db.Table(table,metadata, autoload=True, autoload_with=engine) query = db.select([table]) ResultProxy = connection.execute(query) ResultSet = ResultProxy.fetchall() col = table.columns.keys() return pd.DataFrame(ResultSet, columns=col) def getDataFromSingleTable(request): dbType = request.session['dbType'] if dbType.lower() == "sqlite": table = request.session["tablenamesql"] else: table = request.session["tablename"] connection_string = get_connection(request) df = get_data(connection_string,table) return df def validatequery(request,table_details,join_details,where_details): resultdata = [] try: table_details = json.loads(table_details) join_details = json.loads(join_details) where_details = json.loads(where_details) connection_string = get_connection(request) engine = db.create_engine(connection_string) connection = engine.connect() metadata = db.MetaData() metadata.reflect(engine) sel_col = [] for item in table_details: table = item["TableName"] table = db.Table(table, metadata, autoload=True, autoload_with=engine) for ele in item["Fields"]: if str(ele["is_select"]).lower() == 'true': sel_col.append(table.columns[ele["column_name"]]) join_condition = [] where_clause = "" for item in join_details: table1 = item["Table1Name"] table1 = db.Table(table1, metadata, autoload=True, autoload_with=engine) left_join = table1.columns[item["Table1Field"]] table2 = item["Table2Name"] table2 = db.Table(table2, metadata, autoload=True, autoload_with=engine) right_join = table2.columns[item["Table2Field"]] join_condition = "{left_join} {Condition}= {right_join}".format(left_join=left_join, Condition=item["Condition"],right_join= right_join) '''dbType = request.session['dbType'] if dbType.lower()=="sqlite": for item in where_details: where_clause = "{table}.'{column}'{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"]) if dbType.lower()=="postgresql": for item in where_details: where_clause = "{table}.{column}{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"]) ''' if len(join_details)!=0: try: for item in where_details: where_clause = "{table}.'{column}'{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"]) query =db.select(sel_col).\ select_from(table1.join(table2,db.text(join_condition))). \ where(db.and_(db.text(where_clause))) ResultProxy = connection.execute(query) ResultSet = ResultProxy.fetchall() except: for item in where_details: where_clause = "{table}.{column}{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"]) query =db.select(sel_col).\ select_from(table1.join(table2,db.text(join_condition))). \ where(db.and_(db.text(where_clause))) ResultProxy = connection.execute(query) ResultSet = ResultProxy.fetchall() else: table = table_details[0]["TableName"] table = db.Table(table, metadata, autoload=True, autoload_with=engine) try: for item in where_details: where_clause = "{table}.'{column}'{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"]) query = db.select(sel_col). \ select_from(table). \ where(db.and_(db.text(where_clause))) ResultProxy = connection.execute(query) ResultSet = ResultProxy.fetchall() except: for item in where_details: where_clause = "{table}.{column}{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"]) query = db.select(sel_col). \ select_from(table). \ where(db.and_(db.text(where_clause))) ResultProxy = connection.execute(query) ResultSet = ResultProxy.fetchall() if len(ResultSet) > 0: data = pd.DataFrame(ResultSet) data.columns = ResultSet[0].keys() print(data) return data,"query exectuted successfully" else: return pd.DataFrame(),"No rows returned" # conn = get_connection(server_url,username_actian,password_actian,database_actian) # sql_text = query # cur = conn.cursor() # resultdata = simple_select(cur, query) # cur.close() #df = pd.DataFrame(resultdata) #print(df) except Exception as e: print(e) return pd.DataFrame(), str(e)
trainresult.py
import json import os import pandas as pd import urllib, base64 def check_deepCheckPlots(deployedLocation): deepCheck = 'False' boostOverfit = 'False' boostOverfitCond = 'False' mi='False' miCond='False' smc = 'False' smsCond = 'False' boostOverfitFile= os.path.join(deployedLocation,'log','boosting_overfit.html') boostOverfitCondFile= os.path.join(deployedLocation,'log','boosting_overfit_condition.html') smcFile= os.path.join(deployedLocation,'log','smc.html') smcCondFile= os.path.join(deployedLocation,'log','smc_condition.html') miFile= os.path.join(deployedLocation,'log','mi.html') miConFile= os.path.join(deployedLocation,'log','mi_con.html') file_exists = os.path.exists(boostOverfitFile) if file_exists: deepCheck = 'True' boostOverfit = 'True' file_exists = os.path.exists(boostOverfitCondFile) if file_exists: deepCheck = 'True' boostOverfitCond = 'True' file_exists = os.path.exists(miFile) if file_exists: deepCheck = 'True' mi = 'True' file_exists = os.path.exists(miConFile) if file_exists: deepCheck = 'True' miCond = 'True' file_exists = os.path.exists(smcFile) if file_exists: deepCheck = 'True' smc = 'True' file_exists = os.path.exists(smcCondFile) if file_exists: deepCheck = 'True' smsCond = 'True' output = {'deepCheck':deepCheck,'boostOverfit':boostOverfit,'boostOverfitCond':boostOverfitCond,'mi':mi,'miCond':miCond,'smc':smc,'smsCond':smsCond} return output def FeaturesUsedForTraining(output_json): resultJsonObj = json.loads(output_json) result = {} result['Status'] = resultJsonObj['status'] result['ModelType'] = resultJsonObj['data']['ModelType'] result['ScoreType'] = resultJsonObj['data']['ScoreType'] result['FeaturesUsed'] = resultJsonObj['data']['featuresused'] result['BestModel'] = resultJsonObj['data']['BestModel'] return result def ParseResults(output_json): msg1 = 'Results...' resultJsonObj = json.loads(output_json) result = {} survical_images = [] result['Status'] = resultJsonObj['status'] result['ModelType'] = resultJsonObj['data']['ModelType'] if 'vmDetails' in resultJsonObj['data']: result['DeployLocation'] = resultJsonObj['data']['vmDetails'] else: result['DeployLocation'] = resultJsonObj['data']['deployLocation'] result['BestModel'] = resultJsonObj['data']['BestModel'] if str(resultJsonObj['data']['BestScore']) == "NA": result['BestScore'] = 'NA' else: result['BestScore'] = round(float(resultJsonObj['data']['BestScore']), 2) result['ScoreType'] = resultJsonObj['data']['ScoreType'] result['FeaturesUsed'] = resultJsonObj['data']['featuresused'] ##### Training Confusion Matrix result['problem_type'] = result['ModelType'] if result['ModelType'].lower() == 'timeseriesanomalydetection': result['problem_type'] = 'TimeSeriesAnomalydetection' if result['ModelType'] == 'classification' or result['ModelType'].lower() == 'distributed classification' or (result['ModelType'] == 'anomalydetection' and (result['BestScore']) != 0) or result['ModelType'] == 'ImageClassification': bestmodel = resultJsonObj['data']['BestModel'] if bestmodel.lower() == 'nas': modelSummary= os.path.join(result['DeployLocation'],'summary.txt') f = open(modelSummary, 'r') file_content = f.read() f.close() #print(file_content) result['modelSummary'] = file_content #task 11997 if result['ModelType'].lower() == 'classification': result['problem_type'] = 'Classification' elif result['ModelType'].lower() == 'anomalydetection': result['problem_type'] = 'AnomalyDetection' elif result['ModelType'].lower() == 'imageclassification': result['problem_type'] = 'ImageClassification' elif result['ModelType'].lower() == 'distributed classification': result['problem_type'] = 'Distributed Classification' try: result['deepCheck'] = check_deepCheckPlots(result['DeployLocation']) except Exception as e: print(e) if 'ConfusionMatrix' in resultJsonObj['data']['trainmatrix']: TrainConfusionMatrix = resultJsonObj['data']['trainmatrix']['ConfusionMatrix'] numLabels = len(TrainConfusionMatrix) TrainConfusionMatrixList = [] for act_key, value in TrainConfusionMatrix.items(): temp = {} temp['Label'] = act_key for pred_key, pred_value in value.items(): temp[pred_key] = pred_value TrainConfusionMatrixList.append(temp) result['TrainConfusionMatrix'] = TrainConfusionMatrixList TrainClassificationReport = resultJsonObj['data']['trainmatrix']['ClassificationReport'] numRows = len(TrainClassificationReport) TrainClassificationReportList = [] metrics_keys_list = [] for key, value in TrainClassificationReport.items(): temp = {} temp['Label'] = key if isinstance( value, dict): for metricsKey, metricsValue in value.items(): temp[metricsKey] = round(metricsValue, 4) if metricsKey not in metrics_keys_list: metrics_keys_list.append( metricsKey) else: if metrics_keys_list: for key in metrics_keys_list: temp[key] = round(value, 4) TrainClassificationReportList.append(temp) result['TrainClassificationReport'] = TrainClassificationReportList result['Train_ROC_AUC_SCORE'] = round(float(resultJsonObj['data']['trainmatrix']['ROC_AUC_SCORE']), 4) else: result['TrainClassificationReport'] = '' result['Train_ROC_AUC_SCORE']='' ##### Testing Confusion Matix if 'ConfusionMatrix' in resultJsonObj['data']['matrix']: ConfusionMatrix = resultJsonObj['data']['matrix']['ConfusionMatrix'] numLabels = len(ConfusionMatrix) ConfusionMatrixList = [] for act_key, value in ConfusionMatrix.items(): temp = {} temp['Label'] = act_key for pred_key, pred_value in value.items(): temp[pred_key] = pred_value ConfusionMatrixList.append(temp) result['ConfusionMatrix'] = ConfusionMatrixList ClassificationReport = resultJsonObj['data']['matrix']['ClassificationReport'] numRows = len(ClassificationReport) ClassificationReportList = [] metrics_keys_list = [] for key, value in ClassificationReport.items(): temp = {} temp['Label'] = key if isinstance( value, dict): for metricsKey, metricsValue in value.items(): temp[metricsKey] = round(metricsValue, 4) if metricsKey not in metrics_keys_list: metrics_keys_list.append( metricsKey) else: if metrics_keys_list: for key in metrics_keys_list: temp[key] = round(value, 4) ClassificationReportList.append(temp) result['ClassificationReport'] = ClassificationReportList result['ROC_AUC_SCORE'] = round(float(resultJsonObj['data']['matrix']['ROC_AUC_SCORE']), 4) elif result['ModelType'] == 'similarityIdentification': result['problem_type'] = 'similarityIdentification' elif result['ModelType'] == 'contextualSearch': result['problem_type'] = 'contextualSearch' elif result['ModelType'] == 'MultiLabelPrediction': result['problem_type'] = 'MultiLabelPrediction' matrix = resultJsonObj['data']['matrix'] training_matrix = [] for x in matrix: fmatrix = {} fmatrix['feature'] = x performance = {} for y in matrix[x]: performance[y] = matrix[x][y] fmatrix['performance'] = performance training_matrix.append(fmatrix) testmatrix = resultJsonObj['data']['testmatrix'] testing_matrix = [] for x in testmatrix: fmatrix = {} fmatrix['feature'] = x performance = {} for y in testmatrix[x]: performance[y] = testmatrix[x][y] fmatrix['performance'] = performance testing_matrix.append(fmatrix) result['testing_matrix'] = testing_matrix result['training_matrix'] = training_matrix elif result['ModelType'] == 'regression' or result['ModelType'].lower() == 'distributed regression': try: result['deepCheck'] = check_deepCheckPlots(result['DeployLocation']) except Exception as e: print(e) try: result['problem_type'] = 'Regression' testing_matrix = {} if 'MAE' in resultJsonObj['data']['matrix']: testing_matrix['MAE'] = float(resultJsonObj['data']['matrix'].get('MAE','0')) testing_matrix['R2Score'] = float(resultJsonObj['data']['matrix'].get('R2Score','0')) testing_matrix['MSE'] = float(resultJsonObj['data']['matrix'].get('MSE','0')) testing_matrix['MAPE'] = float(resultJsonObj['data']['matrix'].get('MAPE','0')) testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix'].get('RMSE','0')) testing_matrix['NormalisedRMSEPercentage'] = float(resultJsonObj['data']['matrix'].get('Normalised RMSE(%)','0')) result['testing_matrix'] = testing_matrix training_matrix = {} training_matrix['MAE'] = float(resultJsonObj['data']['trainmatrix'].get('MAE','0')) training_matrix['R2Score'] = float(resultJsonObj['data']['trainmatrix'].get('R2Score','0')) training_matrix['MSE'] = float(resultJsonObj['data']['trainmatrix'].get('MSE','0')) training_matrix['MAPE'] = float(resultJsonObj['data']['trainmatrix'].get('MAPE','0')) training_matrix['RMSE'] = float(resultJsonObj['data']['trainmatrix'].get('RMSE','0')) training_matrix['NormalisedRMSEPercentage'] = float(resultJsonObj['data']['trainmatrix'].get('Normalised RMSE(%)','0')) result['training_matrix'] = training_matrix except Exception as e: print(e) elif result['ModelType'] == 'Text Similarity': result['problem_type'] = 'textsimilarity' testing_matrix = {} testing_matrix['Accuracy'] = float(resultJsonObj['data']['matrix']['Accuracy']) testing_matrix['ROC_AUC'] = float(resultJsonObj['data']['matrix']['ROC AUC']) result['testing_matrix'] = testing_matrix training_matrix = {} training_matrix['Accuracy'] = float(resultJsonObj['data']['trainmatrix']['Accuracy']) training_matrix['ROC_AUC'] = float(resultJsonObj['data']['trainmatrix']['ROC AUC']) result['training_matrix'] = training_matrix elif result['ModelType'] == 'RecommenderSystem': #taskid 11190 result['problem_type'] = 'Recommender' testing_matrix = {} testing_matrix['RMSE'] = 'NA' result['testing_matrix'] = testing_matrix training_matrix = {} training_matrix['RMSE'] = 'NA' result['training_matrix'] = training_matrix elif result['ModelType'] == 'SurvivalAnalysis': result['problem_type'] = 'SurvivalAnalysis' survivalProbabilityjson = resultJsonObj['data']['survivalProbability'] performanceimages = resultJsonObj['data']['imageLocation'] start = '[' end = ']' performanceimages = performanceimages[performanceimages.find(start) + len(start):performanceimages.rfind(end)] performanceimages = performanceimages.split(',') for imagefile in performanceimages: imagefile = imagefile.replace("'", "") string = base64.b64encode(open(imagefile, "rb").read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) survical_images.append(image_64) result['survivalProbability'] = survivalProbabilityjson elif result['ModelType'] == 'StateTransition': result['problem_type'] = 'StateTransition' stateprobabilityfile = os.path.join(result['DeployLocation'],'stateTransitionProbability.csv') clusterfile = os.path.join(result['DeployLocation'],'stateClustering.csv') if(os.path.isfile(stateprobabilityfile)): df_prob = pd.read_csv(stateprobabilityfile) df_prob = df_prob[['State','NextState','Probability']] result['probability'] = df_prob if(os.path.isfile(clusterfile)): df_clus = pd.read_csv(clusterfile) df_clus = df_clus[['clusterid','clusterlist']] result['cluster'] = df_clus elif result['ModelType'].lower() == 'timeseriesforecasting': #task 11997 result['problem_type'] = 'TimeSeriesForecasting' if result['BestModel'] == 'FBPROPHET': imagefile = os.path.join(result['DeployLocation'],'log','img','prophet_fig.png') string = base64.b64encode(open(imagefile, "rb").read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) survical_images.append(image_64) testing_matrix = {} testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE']) testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2']) testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) result['testing_matrix'] = testing_matrix forecastjson = resultJsonObj['data']['forecasts'] result['forecast'] = forecastjson if result['BestModel'] == 'VAR': ''' FeaturesMatrix = resultJsonObj['data']['matrix']['FeaturesMatrix'] mae = '' mse = '' mape = '' rmse = '' for x in FeaturesMatrix: if mae != '': mae += ',' if mse != '': mse += ',' if R2 != '': R2 += ',' if rmse != '': rmse += ',' featurename = x['Features'] mae = mae + featurename + '=' + x['MAE'] mse = mse + featurename + '=' + x['MSE'] R2 = R2 + featurename + '=' + x['R2'] rmse = rmse + featurename + '=' + x['RMSE'] testing_matrix = {} testing_matrix['MAE'] = mae testing_matrix['MSE'] = mse testing_matrix['R2'] = R2 testing_matrix['RMSE'] = rmse result['testing_matrix'] = testing_matrix forecastjson = resultJsonObj['data']['forecasts'] result['forecast'] = forecastjson ''' testing_matrix = {} testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE']) testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2']) testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) result['testing_matrix'] = testing_matrix forecastjson = resultJsonObj['data']['forecasts'] result['forecast'] = forecastjson elif result['BestModel'] == 'LSTM' or result['BestModel'] == 'MLP': testing_matrix = {} testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) result['testing_matrix'] = testing_matrix forecastjson = resultJsonObj['data']['forecasts'] result['forecast'] = forecastjson else: testing_matrix = {} testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE']) testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE']) testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2']) testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE']) result['testing_matrix'] = testing_matrix forecastjson = resultJsonObj['data']['forecasts'] result['forecast'] = forecastjson elif result['ModelType'] == 'topicmodelling': result['problem_type'] = 'TopicModelling' topics = resultJsonObj['topics'] df_topic = [] dataDict = {} for x in topics: dataDict = {} words = topics[x] print(words) word = '' for key in words: print(key) if word != '': word = word+', ' word = word+key+'('+str(round(words[key],2))+')' dataDict["ID"] = x dataDict["Words"] = word df_topic.append(dataDict) result['topics'] = df_topic elif result['ModelType'].lower() == 'association rule': result['problem_type'] = 'AssociationRules' deploy_location = result['DeployLocation'] freq_item_file = os.path.join(result['DeployLocation'],'frequentItems.csv') if(os.path.isfile(freq_item_file)): rules_file = os.path.join(result['DeployLocation'],'associationRules.csv') if(os.path.isfile(rules_file)): df_rules = pd.read_csv(rules_file) df_rules = df_rules[['antecedents','consequents','support','confidence','lift']] #df_rules['antecedents'] = df_rules['antecedents'] result['rules'] = df_rules else: result['error'] = 'There are no association found in frequent items above that threshold (minThreshold)' else: result['error'] = 'There are no frequent items above that threshold (minSupport), try by reducing the minSupport value' elif result['ModelType'] == 'clustering': result['problem_type'] = 'Clustering' testing_matrix = {} if 'SilHouette_Avg' in resultJsonObj['data']['matrix']: testing_matrix['SilHouette_Avg'] = round(float(resultJsonObj['data']['matrix']['SilHouette_Avg']),2) else: testing_matrix['SilHouette_Avg'] = 'NA' if 'DaviesBouldinScore' in resultJsonObj['data']['matrix']: testing_matrix['DaviesBouldinScore'] = round(float(resultJsonObj['data']['matrix']['DaviesBouldinScore']),2) else: testing_matrix['DaviesBouldinScore'] = 'NA' if 'CalinskiHarabazScore' in resultJsonObj['data']['matrix']: testing_matrix['CalinskiHarabazScore'] = round(float(resultJsonObj['data']['matrix']['CalinskiHarabazScore']),2) else: testing_matrix['CalinskiHarabazScore'] = 'NA' centroidpath = os.path.join(result['DeployLocation'],'centers.csv') if(os.path.isfile(centroidpath)): df_center = pd.read_csv(centroidpath) df_center = df_center.rename(columns={"Unnamed: 0": "Cluster"}) result['centerpoints'] = round(df_center,2) result['testing_matrix'] = testing_matrix training_matrix = {} if 'SilHouette_Avg' in resultJsonObj['data']['matrix']: training_matrix['SilHouette_Avg'] = round(float(resultJsonObj['data']['matrix']['SilHouette_Avg']),2) training_matrix['DaviesBouldinScore'] = round(float(resultJsonObj['data']['matrix']['DaviesBouldinScore']),2) training_matrix['CalinskiHarabazScore'] = round(float(resultJsonObj['data']['matrix']['CalinskiHarabazScore']),2) else: training_matrix['SilHouette_Avg'] = 'NA' training_matrix['DaviesBouldinScore'] = 'NA' training_matrix['CalinskiHarabazScore'] = 'NA' result['training_matrix'] = training_matrix #print(result) evaluatedModelsList = resultJsonObj['data']['EvaluatedModels'] #print(evaluatedModelsList) for index in range(len(evaluatedModelsList)): if evaluatedModelsList[index]['Score'] == 'NA': evaluatedModelsList[index]['Score'] = 'NA' else: evaluatedModelsList[index]['Score'] = round(float(evaluatedModelsList[index]['Score']), 4) if result['ModelType'] == 'classification': evaluatedModelsList = sorted(evaluatedModelsList, key=lambda k: k['Score'],reverse=True) else: evaluatedModelsList = sorted(evaluatedModelsList, key=lambda k: k['Score']) result['EvaluatedModels'] = evaluatedModelsList result['LogFile'] = resultJsonObj['data']['LogFile'] return result, survical_images
compute.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys import json def getInstanceonGCP(image,instances): try: from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('LLMTuning'): data = sqlite_obj.read_data('LLMTuning','image="'+image['id']+'"') for values in data: instance = {} instance['type'] = 'instance' instance['id'] = values[2] instance['workLoad'] = image['workLoad'] instance['machineImageProjectID'] = image['machineImageProjectID'] instance['ssh'] = image['ssh'] instance['machineConfiguration'] = image['machineConfiguration'] instance['instanceType'] = image['instanceType'] instances.append(instance) except Exception as e: print(e) return instances def getInstanceonAWS(amiid,instances): try: from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('LLMTuning'): data = sqlite_obj.read_data('LLMTuning','image="'+amiid['id']+'"') for values in data: instance = {} instance['type'] = 'instance' instance['id'] = values[2] instance['workLoad'] = amiid['workLoad'] instance['regionName'] = amiid['regionName'] instance['ssh'] = amiid['ssh'] instance['machineConfiguration'] = amiid['machineConfiguration'] instance['instanceType'] = amiid['instanceType'] instances.append(instance) except Exception as e: print(e) return instances def updatelocalsetings(request): from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('computeInfrastructure'): updated_data = 'selectedInfrastructure="Local"' sqlite_obj.update_data(updated_data,'','computeInfrastructure') def updateToComputeSettings(infratructure): from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('computeInfrastructure'): updated_data = 'selectedInfrastructure="'+infratructure+'"' sqlite_obj.update_data(updated_data,'','computeInfrastructure') def updateGCPConfig(request): try: credentialsJson = request.POST.get('credentialsJson') projectID = request.POST.get('gcpProjectid') machineType = request.POST.get('gcpmachineType') selectedID = request.POST.get('gcpInstance') gcpZone = request.POST.get('gcpZone') workload = request.POST.get('gcpworkload') noOfInstance = request.POST.get('GCPnoofinstance') #print(credentialsJson,projectID,machineType,selectedID,gcpZone,workload,noOfInstance) if credentialsJson != '' and projectID != '': from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('gcpCredentials'): updated_data = 'credentialsJson="'+credentialsJson+'",projectID="'+projectID+'",machineType="'+machineType+'",selectedID="'+selectedID+'",regionName="'+gcpZone+'",noOfInstance="'+str(noOfInstance)+'",workload="'+workload+'"' sqlite_obj.update_data(updated_data,'','gcpCredentials') else: newdata = {} newdata.update({'id':['1'],'credentialsJson': [credentialsJson],'projectID': [projectID],'machineType':[machineType],'selectedID':[selectedID],'regionName':[gcpZone],'noOfInstance':[noOfInstance],'workload':[workload]}) sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'gcpCredentials') return('success') else: return('error') except Exception as e: print(e) return('error') def updateComputeConfig(request): try: AWSAccessKeyID = request.POST.get('AWSAccessKeyID') AWSSecretAccessKey = request.POST.get('AWSSecretAccessKey') workload = request.POST.get('workload') machineType = request.POST.get('machineType') selectedID = request.POST.get('amiInstance') regionName = request.POST.get('regionName') noOfInstance = request.POST.get('NoOfInstance') securitygroupid = request.POST.get('AWSSecuritygroupID') if AWSAccessKeyID != '' and AWSSecretAccessKey != '': from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') if sqlite_obj.table_exists('awsCredentials'): column_names = sqlite_obj.column_names('awsCredentials') if 'securitygroupid' not in column_names: query = 'Alter Table awsCredentials ADD securitygroupid TEXT' sqlite_obj.execute_query(query) updated_data = 'AWSAccessKeyID="'+AWSAccessKeyID+'",AWSSecretAccessKey="'+AWSSecretAccessKey+'",machineType="'+machineType+'",selectedID="'+selectedID+'",regionName="'+regionName+'",noOfInstance="'+noOfInstance+'",workload="'+workload+'",securitygroupid="'+securitygroupid+'"' sqlite_obj.update_data(updated_data,'','awsCredentials') else: newdata = {} newdata.update({'id':['1'],'AWSAccessKeyID': [AWSAccessKeyID],'AWSSecretAccessKey': [AWSSecretAccessKey],'machineType':[machineType],'selectedID':[selectedID],'regionName':[regionName],'noOfInstance':[noOfInstance],'workload':[workload],'securitygroupid':[securitygroupid]}) sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'awsCredentials') return('success') else: return('error') except Exception as e: print(e) return('error') def selectedInfratructure(): from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') selcInfra = 'Local' if sqlite_obj.table_exists('computeInfrastructure'): data = sqlite_obj.read_data('computeInfrastructure') for values in data: selcInfra = values[1] return selcInfra def readComputeConfig(): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','compute_conf.json')) f = open(file_path, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) from appbe.sqliteUtility import sqlite_db from appbe.dataPath import DATA_DIR import pandas as pd file_path = os.path.join(DATA_DIR, 'sqlite') sqlite_obj = sqlite_db(file_path, 'config.db') selcInfra = 'Local' if sqlite_obj.table_exists('computeInfrastructure'): data = sqlite_obj.read_data('computeInfrastructure') for values in data: selcInfra = values[1] else: data = {} data.update({'id':['1'],'selectedInfrastructure': ['Local']}) sqlite_obj.write_data(pd.DataFrame.from_dict(data),'computeInfrastructure') configSettingsJson['computeInfrastructure'] = selcInfra for ami in configSettingsJson['AWS_EC2']['amis']: configSettingsJson['AWS_EC2']['instances'] = getInstanceonAWS(ami,configSettingsJson['AWS_EC2']['instances']) for image in configSettingsJson['GCP']['machineImage']: configSettingsJson['GCP']['instances'] = getInstanceonGCP(image,configSettingsJson['GCP']['instances']) AWSAccessKeyID = '' AWSSecretAccessKey = '' securitygroupid = '' machineType = 'AMI' selectedID = '' regionName = '' noofInfra = 1 workLoad = 'LLM' if sqlite_obj.table_exists('awsCredentials'): column_names = sqlite_obj.column_names('awsCredentials') #print(column_names) if 'workload' not in column_names: query = 'Alter Table awsCredentials ADD workload TEXT' sqlite_obj.execute_query(query) if 'securitygroupid' not in column_names: query = 'Alter Table awsCredentials ADD securitygroupid TEXT' sqlite_obj.execute_query(query) data = sqlite_obj.read_data('awsCredentials') for values in data: AWSAccessKeyID = values[1] AWSSecretAccessKey = values[2] machineType = values[3] selectedID = values[4] regionName = values[5] noofInfra = values[6] workLoad = values[7] securitygroupid = values[8] selectedAWS = {} selectedAWS['accessKey'] = AWSAccessKeyID selectedAWS['secretAccessKey'] = AWSSecretAccessKey selectedAWS['machineType']=machineType selectedAWS['selectedID'] = selectedID selectedAWS['regionName'] = regionName selectedAWS['noOfInstance']=noofInfra selectedAWS['workLoad'] = workLoad selectedAWS['securitygroupid'] = securitygroupid configSettingsJson['awsCredentials'] = selectedAWS gcpCredentials='' projectID = '' selectedID = '' machineType = '' regionName = '' noOfInstance = 1 workLoad = 'LLM' if sqlite_obj.table_exists('gcpCredentials'): column_names = sqlite_obj.column_names('gcpCredentials') if 'workload' not in column_names: query = 'Alter Table gcpCredentials ADD workload TEXT' sqlite_obj.execute_query(query) data = sqlite_obj.read_data('gcpCredentials') for values in data: gcpCredentials = values[1] projectID = values[2] machineType = values[3] selectedID = values[4] regionName = values[5] noOfInstance = values[6] workLoad = values[7] selectedGCP = {} selectedGCP['gcpCredentials'] = gcpCredentials selectedGCP['selectedID'] = selectedID selectedGCP['projectID'] = projectID selectedGCP['machineType'] = machineType selectedGCP['regionName'] = regionName selectedGCP['noOfInstance'] = noOfInstance selectedAWS['workLoad'] = workLoad configSettingsJson['gcpCredentials'] = selectedGCP #print(configSettingsJson) return(configSettingsJson) except Exception as e: print(e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
validatecsv.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import csv import logging import pandas as pd class csv_validator: def __init__(self): self.log = logging.getLogger('eion') def __text_header(self, filename, threshold=0.75): df = pd.read_csv(filename, header=None,nrows=1000) numeric_columns = df.dtypes[df.dtypes != object] if not len(numeric_columns): first_row_len = df.iloc[0].str.len() index = 0 for c in df: if (df[c].map(len).mean() * threshold <= first_row_len[index]): return False index += 1 return True return False def validate_header(self, filename,delimiter,textqualifier,threshold=0.75): with open(filename, 'rt',encoding='utf-8') as csvfile: has_header = csv.Sniffer().has_header(csvfile.read(8192)) csvfile.seek(0) if not has_header: has_header = self.__text_header(filename, threshold) reader = csv.reader(csvfile, delimiter=delimiter,quotechar=textqualifier) good_csv = True col_len = len(next(reader)) bad_lines = [] offset = 2 # +1 for first read and +1 for python index start at 0 for index, row in enumerate(reader): if len(row) != col_len: good_csv = False if(index == 1 and has_header): offset += 1 bad_lines.append(index + offset) return has_header, good_csv, bad_lines if __name__ == '__main__': import sys val = csv_validator() print(val.validate_header(sys.argv[1]))
eda.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import math import sys,os import pandas as pd from sklearn.cluster import KMeans from sklearn.decomposition import PCA import numpy as np import scipy.stats as st from sklearn.preprocessing import StandardScaler from dython.nominal import associations class ux_eda (): def __init__(self, dataPath=pd.DataFrame(),delimiter=',',textqualifier='"',optimize=None,): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.dataFrame = pd.DataFrame() if isinstance(dataPath, pd.DataFrame): self.dataFrame = dataPath if optimize == 1: self.dataFrame = self.dataFrame.sample(n=1000, random_state=1) else: if optimize == 1: self.dataFrame = pd.read_csv(dataPath,nrows=1000,encoding='utf-8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') else: self.dataFrame = pd.read_csv(dataPath, encoding='utf-8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace') self.dataFrame.rename(columns=lambda x: x.strip(), inplace=True) self.features = self.dataFrame.columns.tolist() self.indexFeature = [] self.dateFeature = [] self.categoricalFeature = [] self.constantFeature = [] self.textFeature = [] self.numericFeature = [] self.numericAndCatFeature = [] for feature, featureType in zip(self.features, self.dataFrame.dtypes): if self.__check_seq_feature(self.dataFrame[feature]): self.indexFeature.append(feature) elif self.__match_date_format(self.dataFrame[feature]): self.dateFeature.append(feature) elif self.__check_constant_features(self.dataFrame[feature]): self.constantFeature.append(feature) elif self.__check_category_features(self.dataFrame[feature]): self.categoricalFeature.append(feature) elif featureType == 'object': ''' numOfRows = self.dataFrame.shape[0] distinctCount = len(self.dataFrame[feature].unique()) tempDff = self.dataFrame[feature] self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x)) tempDf = self.dataFrame[feature] tempDf = tempDf.dropna() numberOfNonNullVals = tempDf.count() numericRatio = 0.8 if(numberOfNonNullVals > int(numOfRows * numericRatio)): self.numericFeature.append(feature) else: self.dataFrame[feature] = tempDff ''' self.textFeature.append(feature) elif featureType in aionNumericDtypes: self.numericFeature.append(feature) # self.dataFrame[self.categoricalFeature] = self.dataFrame[self.categoricalFeature].apply(lambda x: x.cat.codes) self.numericAndCatFeature = self.numericFeature + self.categoricalFeature # EDA Performance change # ---------------------------- def subsampleData(self, subsampleData): self.dataFrame = self.dataFrame.sample(n=subsampleData, random_state=1) def get_features_datatype(self,v,num_list,cat_list,text_list): """ To get exact datatype of the feature in Data Overview.""" if v in cat_list: return 'Categorical' elif v in num_list: return 'Numerical' elif v in text_list: return 'Text' def getCorrelationMatrix(self): try: if len(self.dataFrame.columns) > 25: df3 = df[self.dataFrame.columns[0:24]] else: df3 = self.dataFrame.copy() cor_mat= associations(self.dataFrame,compute_only=True) cor_mat=cor_mat['corr'] cor_mat = cor_mat.astype(float).round(2) cor_mat.replace(np.nan, 0, inplace=True) cor_mat.fillna('None',inplace=True) return cor_mat except Exception as e: print(e) correlationgraph = pd.DataFrame() return (correlationgraph) def dataDistribution(self): df_eda_actual = self.dataFrame.copy() des1 = df_eda_actual.describe(include='all').T des1['missing count %'] = df_eda_actual.isnull().mean() * 100 des1['zero count %'] = df_eda_actual.isin([0]).mean() * 100 dataColumns = list(self.dataFrame.columns.values) des1.insert(0, 'Features', dataColumns) actual_df_numerical_features = df_eda_actual.select_dtypes(exclude='object') actual_df_categorical_features = df_eda_actual.select_dtypes(include='object') #For text features textFeature_df = df_eda_actual.filter(self.textFeature) actual_df_categorical_features = actual_df_categorical_features.drop(self.textFeature, axis=1) for i in des1['Features']: num_cols = actual_df_numerical_features.columns.to_list() cat_cols = actual_df_categorical_features.columns.to_list() text_cols = self.textFeature des1['Features Type'] = des1['Features'].apply(lambda x: self.get_features_datatype(x, num_cols,cat_cols,text_cols)) curr_columns = des1.columns.to_list() curr_columns.remove('Features Type') insert_i = curr_columns.index('Features')+1 curr_columns.insert(insert_i,'Features Type') des1 = des1[curr_columns] return des1 # ---------------------------- def subsetFeatures(self, edaFeatures): print(self.dataFrame.columns) self.dataFrame = self.dataFrame[edaFeatures] self.features = edaFeatures self.indexFeature = [] self.dateFeature = [] self.categoricalFeature = [] self.constantFeature = [] self.textFeature = [] self.numericFeature = [] self.numericAndCatFeature = [] print('abc') aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] for feature, featureType in zip(self.features, self.dataFrame.dtypes): if self.__check_seq_feature(self.dataFrame[feature]): self.indexFeature.append(feature) elif self.__match_date_format(self.dataFrame[feature]): self.dateFeature.append(feature) elif self.__check_constant_features(self.dataFrame[feature]): self.constantFeature.append(feature) elif self.__check_category_features(self.dataFrame[feature]): self.categoricalFeature.append(feature) elif featureType == 'object': ''' numOfRows = self.dataFrame.shape[0] distinctCount = len(self.dataFrame[feature].unique()) tempDff = self.dataFrame[feature] self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x)) tempDf = self.dataFrame[feature] tempDf = tempDf.dropna() numberOfNonNullVals = tempDf.count() numericRatio = 0.8 if(numberOfNonNullVals > int(numOfRows * numericRatio)): self.numericFeature.append(feature) else: self.dataFrame[feature] = tempDff ''' self.textFeature.append(feature) elif featureType in aionNumericDtypes: self.numericFeature.append(feature) print('def') self.numericAndCatFeature = self.numericFeature + self.categoricalFeature # ---------------------------- def testNum(self,value): try: x=eval(value) return x except: return np.nan def getFeatures(self): leastRatioFeature = self.__LeastfeatureRatio() return (self.features, self.dateFeature, self.indexFeature, self.constantFeature, self.textFeature, leastRatioFeature,self.numericAndCatFeature,self.numericFeature,self.categoricalFeature) def getNumericFeatureCount(self): return(len(self.numericAndCatFeature)) def calculateNumberofCluster(self): df = self.dataFrame[self.numericFeature] return self.__NumberofCluster(df) def getTopTextFeatures(self,topn): df_text = pd.DataFrame() if (len(self.textFeature) > 1): df_text['combined'] = self.dataFrame[self.textFeature].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) features = ['combined'] else: df_text[['combined']] = self.dataFrame[self.textFeature] features = ['combined'] df_text[features[0]] = df_text[features[0]].fillna("NA") textCorpus = df_text[features[0]] from text import eda texteda_obj = eda.ExploreTextData() df = texteda_obj.MostCommonWords(textCorpus,topn) return df def __NumberofCluster(self, featureData): Sum_of_squared_distances = [] K = range(1, 15) for k in K: km = KMeans(n_clusters=k) km = km.fit(featureData) Sum_of_squared_distances.append(km.inertia_) x1, y1 = 1, Sum_of_squared_distances[0] x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances) - 1] distances = [] for inertia in range(len(Sum_of_squared_distances)): x0 = inertia + 2 y0 = Sum_of_squared_distances[inertia] numerator = abs((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1) denominator = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2) distances.append(numerator / denominator) n_clusters = distances.index(max(distances)) + 2 return (n_clusters) #13841 : TrustedAI: hopkins stat def getHopkinsVal(self,df): try: from appbe.hopkinsStat import hopkins from sklearn.preprocessing import StandardScaler,OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer numeric_transformer = Pipeline( steps=[("imputer", SimpleImputer(missing_values=np.nan,strategy="mean")), ("standard_scaler", StandardScaler())] ) categorical_transformer = Pipeline( steps=[ ("imputer", SimpleImputer(missing_values=np.nan,strategy="most_frequent")), ("encoder", OneHotEncoder(handle_unknown="ignore")) ] ) preprocessor = ColumnTransformer( transformers=[ ("num", numeric_transformer, self.numericFeature), ("cat", categorical_transformer, self.categoricalFeature) ] ) pipe = Pipeline([('scaler',preprocessor)]) scaled_df = pipe.fit_transform(df) if type(scaled_df) != np.ndarray: scaled_df = scaled_df.toarray() score = round(hopkins(scaled_df,scaled_df.shape[0]),2) return str(score) except Exception as e: print(e) return '' def getClusterDetails(self): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] df_clus = pd.get_dummies(self.dataFrame[self.numericAndCatFeature], prefix_sep='####') for i in df_clus.columns: dataType = df_clus[i].dtypes if dataType not in aionNumericDtypes: df_clus[i] = df_clus[i].fillna(df_clus[i].mode()[0]) else: df_clus[i] = df_clus[i].fillna(df_clus[i].mean()) n = self.__NumberofCluster(df_clus) n = n - 1 kmeans = KMeans(n_clusters=n, init='k-means++', max_iter=10, n_init=10, random_state=0) # Fit and predict y_means = kmeans.fit_predict(df_clus) centroids = kmeans.cluster_centers_.squeeze() labels = kmeans.labels_ features = df_clus.columns cluster_details = [] for j in range(len(features)): cluster = {} feature = features[j] perflag = 0 if '####' in feature: x = features[j].split('####') feature = x[0] + ' ' + x[1] + '(%)' perflag = 1 else: feature = feature + '(AVG)' cluster['label'] = feature total_sum = 0 if perflag == 1: for i in range(n): centroid = centroids[i] value = round(centroid[j], 2) total_sum = total_sum + value for i in range(n): centroid = centroids[i] value = round(centroid[j], 2) if perflag == 1: value = (value / total_sum) * 100 value = round(value, 2) cluster['Cluster ' + str(i + 1)] = value cluster_details.append(cluster) hopkins_val = self.getHopkinsVal(self.dataFrame,) return cluster_details,hopkins_val def getHighlyCorrelatedFeatures(self,noOfTop): df_corr = abs(self.dataFrame[self.numericAndCatFeature].corr()).stack().reset_index() df_corr.columns = ['FEATURE_1', 'FEATURE_2', 'CORRELATION'] mask_dups = (df_corr[['FEATURE_1', 'FEATURE_2']].apply(frozenset, axis=1).duplicated()) | ( df_corr['FEATURE_1'] == df_corr['FEATURE_2']) df_corr = df_corr[~mask_dups] df_corr = df_corr.sort_values(by='CORRELATION', ascending=False) df_top = df_corr.head(n=noOfTop) return(df_top) # ---------------------- 12686:Data Distribution related Changes S T A R T ---------------------- def word_token_for_feature(self, selectedFeature, dataframe): comment_words = "" try: df_text = pd.DataFrame() df_text[[selectedFeature]] = dataframe features = [selectedFeature] df_text[features[0]] = df_text[features[0]].fillna("NA") textCorpus = df_text[features[0]] from text import TextProcessing tp = TextProcessing.TextProcessing() preprocessed_text = tp.transform(textCorpus) df_text[selectedFeature] = preprocessed_text df_text_list = df_text.values.tolist() for val in df_text_list: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += " ".join(tokens) + " " except: comment_words = "" return comment_words # -------------------------------------------- E N D -------------------------------------------- def word_token(self): df_text = pd.DataFrame() if (len(self.textFeature) > 1): df_text['combined'] = self.dataFrame[self.textFeature].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) features = ['combined'] else: df_text[['combined']] = self.dataFrame[self.textFeature] features = ['combined'] df_text[features[0]] = df_text[features[0]].fillna("NA") textCorpus = df_text[features[0]] from text import TextProcessing tp = TextProcessing.TextProcessing() preprocessed_text = tp.transform(textCorpus) df_text['combined'] = preprocessed_text df_text_list = df_text.values.tolist() comment_words = "" for val in df_text_list: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += " ".join(tokens) + " " if comment_words == "": comment_words = 'Not found any token' return comment_words def getdata(self): return self.dataFrame def getPCATop10Features(self): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] df = self.dataFrame[self.numericAndCatFeature] for feature in self.numericAndCatFeature: if feature in self.categoricalFeature: df[feature] = pd.Categorical(df[feature]) df[feature] = df[feature].cat.codes df[feature] = df[feature].fillna(df[feature].mode()[0]) else: df[feature] = df[feature].fillna(df[feature].mean()) pca = PCA(n_components=2).fit(StandardScaler().fit_transform(df)) mapping = pd.DataFrame(pca.components_, columns=self.numericAndCatFeature) mapping = mapping.diff(axis=0).abs() mapping = mapping.iloc[1] mapping = mapping.sort_values(ascending=False).head(10) return mapping def getTopRows(self, rows=5): return self.dataFrame.head(rows) def __check_seq_feature(self, data): if data.dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']: total_record = data.count() count = (data - data.shift() == 1).sum() if ((total_record - count) == 1): return True return False def __match_date_format(self, data): try: ## Using regex lib, we are check if any col contains datetime format like yyyy-mm-dd or yyyy/mm/dd format. if it finds return true. import re u_data = data.to_string() date_find = (re.findall(r"[0-9]{1,4}[\_|\-|\/|\|][0-9]{1,2}[\_|\-|\/|\|][0-9]{1,4}", u_data) or re.findall(r'\d{,2}\-[A-Za-z]{,9}\-\d{,4}', u_data) or re.findall(r"[0-9]{1,4}[\_|\-|\/|\|][0-9]{1,2}[\_|\-|\/|\|][0-9]{1,4}.\d" , u_data) or re.findall(r"[0-9]{1,4}[\_|\-|\/|\|][A-Za-z]{,9}[\_|\-|\/|\|][0-9]{1,4}", u_data)) if (date_find): try: data = pd.to_datetime(data, utc=True) return True except Exception as e: ##If not a datetime col, just pass to return false statement. pass except Exception as e: data = data.astype(str) beforecheckcount = data.count() #####YYYY-MM-DD HH:MM:SS#### check1 = data[data.str.match( r'(^\d\d\d\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$)') == True] aftercheckcount = check1.count() if (beforecheckcount == aftercheckcount): return True #####MM/DD/YYYY HH:MM#### check2 = data[data.str.match( r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\d\d\d\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] aftercheckcount = check2.count() if (beforecheckcount == aftercheckcount): return True #####DD-MM-YYYY HH:MM#### check2 = data[data.str.match( r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9]|1[0-2])-(\d\d\d\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True] aftercheckcount = check2.count() if (beforecheckcount == aftercheckcount): return True #####YYYY/MM/DD#### check2 = data[data.str.match(r'(^\d\d\d\d/(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$)') == True] aftercheckcount = check2.count() if (beforecheckcount == aftercheckcount): return True #####MM/DD/YYYY#### check2 = data[data.str.match(r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\d\d\d\d)$)') == True] aftercheckcount = check2.count() if (beforecheckcount == aftercheckcount): return True #####YYYY-MM-DD HH:MM:SS.fff#### check11 = data[data.str.match( r'(^\d\d\d\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])\.(\d{3})$)') == True] aftercheckcount = check11.count() if (beforecheckcount == aftercheckcount): return True return False def __check_category_features(self, modelFeatures): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] dataType = modelFeatures.dtypes numOfRows = len(modelFeatures) if dataType not in aionNumericDtypes: if dataType != 'bool': nUnique = len(modelFeatures.unique().tolist()) if nUnique <= 30: return True return False def __check_constant_features(self, modelFeatures): return len(modelFeatures.unique().tolist()) == 1 def __featureRatio(self, modelFeatures): if len(modelFeatures): return len(modelFeatures.unique().tolist()) / len(modelFeatures) return 0 def __LeastfeatureRatio(self): ratio = 1 feat = "" for feature in (self.numericAndCatFeature + self.textFeature): r = self.__featureRatio(self.dataFrame[feature]) if r < ratio: ratio = r feat = feature return feat def getDistribution(self): aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] df = self.dataFrame[self.numericAndCatFeature] dist={} for feature in self.numericAndCatFeature: if feature in self.categoricalFeature: df[feature] = pd.Categorical(df[feature]) df[feature] = df[feature].cat.codes df[feature] = df[feature].fillna(df[feature].mode()[0]) else: df[feature] = df[feature].fillna(df[feature].mean()) distributionname,sse = self.DistributionFinder(df[feature]) if distributionname == '': dist[feature] = 'Unknown' else: dist[feature] = distributionname return dist def DistributionFinder(self,data): try: distributionName = "" sse = 0.0 KStestStatic = 0.0 dataType = "" if (data.dtype == "float64"): dataType = "Continuous" elif (data.dtype == "int"): dataType = "Discrete" elif (data.dtype == "int64"): dataType = "Discrete" if (dataType == "Discrete"): distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] index, counts = np.unique(data.astype(int), return_counts=True) if (len(index) >= 2): best_sse = np.inf y1 = [] total = sum(counts) mean = float(sum(index * counts)) / total variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) dispersion = mean / float(variance) theta = 1 / float(dispersion) r = mean * (float(theta) / 1 - theta) datamin = data.min() datamax = data.max() for j in counts: y1.append(float(j) / total) pmf1 = st.bernoulli.pmf(index, mean) pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) pmf4 = st.nbinom.pmf(index, mean, r) pmf5 = st.poisson.pmf(index, mean) sse1 = np.sum(np.power(y1 - pmf1, 2.0)) sse2 = np.sum(np.power(y1 - pmf2, 2.0)) sse3 = np.sum(np.power(y1 - pmf3, 2.0)) sse4 = np.sum(np.power(y1 - pmf4, 2.0)) sse5 = np.sum(np.power(y1 - pmf5, 2.0)) sselist = [sse1, sse2, sse3, sse4, sse5] best_distribution = 'NA' for i in range(0, len(sselist)): if best_sse > sselist[i] > 0: best_distribution = distributions[i].name best_sse = sselist[i] elif (len(index) == 1): best_distribution = "Constant Data-No Distribution" best_sse = 0.0 distributionName = best_distribution sse = best_sse elif (dataType == "Continuous"): distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, st.gamma, st.beta] best_distribution = st.norm.name best_sse = np.inf datamin = data.min() datamax = data.max() nrange = datamax - datamin y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: params = distribution.fit(data.astype(float)) arg = params[:-2] loc = params[-2] scale = params[-1] pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if (best_sse > sse > 0): best_distribution = distribution.name best_sse = sse distributionName = best_distribution sse = best_sse except: response = str(sys.exc_info()[0]) message = 'Job has Failed' + response exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) return distributionName, sse
stationarity_seasonality_check.py
import pandas as pd import numpy as np from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.stattools import kpss from statsmodels.tsa.seasonal import seasonal_decompose import logging import os import warnings warnings.filterwarnings('ignore') ## Main class to find out seassonality and stationary in timeseries data. class StationarySeasonalityTest: def __init__(self,df,featurename,datetimefeature): self.df=df self.targetFeature=featurename self.datetimefeature=datetimefeature ## to get the timeseries data stationary information def stationary_model(self,df,target_feature,stationary_check_method): stationary_status=None if (stationary_check_method.lower()=='adfuller'): stats_model=adfuller(df[target_feature]) statistic, p_value, n_lags, num_bservations,critical_values,info_criterion_best=stats_model[0],stats_model[1],stats_model[2],stats_model[3],stats_model[4],stats_model[5] if (p_value>0.05): stationary_status=str("Non-Stationary") elif(p_value<0.05): stationary_status=str("Stationary") ##kpss is opposite to ADF in considering null hypothesis. In KPSS, if null hypothesis,then it is stationary as oppose to ADF. elif (stationary_check_method.lower()=='kpss'): from statsmodels.tsa.stattools import kpss stats_model = kpss(df[target_feature]) statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] ##In kpss, the stationary condition is opposite to Adafuller. if (p_value>0.05): stationary_status=str("Stationary") else: stationary_status=str("Non-Stationary") return stats_model,n_lags,p_value,stationary_status ## Get stationary details def stationary_check(self,target_feature,time_col,method): df=self.df df[time_col]=pd.to_datetime(df[time_col]) df=df.set_index(time_col) try: stationary_check_method=method except: stationary_check_method='adfuller' if (len(target_feature) == 1): try: if isinstance(target_feature,list): target_feature=''.join(target_feature) elif isinstance(target_feature,int): target_feature=str(target_feature) elif isinstance(target_feature,str): pass except Exception as e: pass stationary_result={} stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,target_feature,stationary_check_method) # stationary_result[target_feature]=stationary_status stationary_result[target_feature]=stationary_status elif(len(target_feature) > 1): stationary_result={} for col in df.columns: stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,col,stationary_check_method) stationary_result[col]=stationary_status else: pass stationary_val=None for v in stationary_result.values(): stationary_val=v stationary_combined_res=dict() c_dict=[k for k,v in stationary_result.items() if 'non-stationary' in v] if (len(c_dict)>=1): stationary_combined_res['dataframe_stationarity']='Non-Stationary' else: stationary_combined_res['dataframe_stationarity']='Stationary' return stats_model,n_lags,p_value,stationary_val,stationary_combined_res #Get seasonality by using seasonal_decompose lib. def seasonality_model(self,target_feature,df): seasonality_status=None try: try: stats_model = kpss(df[target_feature]) statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] except: n_lags=1 pass try: df_target=self.df[target_feature] decompose_result_mult = seasonal_decompose(df_target,model='additive', extrapolate_trend='freq', period=n_lags) except Exception as e: ##If additive model (type of seasonal component) failed, use multiplicative decompose_result_mult = seasonal_decompose(df_target,model='multiplicative', extrapolate_trend='freq', period=1) trend = decompose_result_mult.trend observed=decompose_result_mult.observed seasonal = decompose_result_mult.seasonal residual = decompose_result_mult.resid try: if isinstance(df_target, pd.Series): auto_correlation = df_target.autocorr(lag=n_lags) elif isinstance(df_target, pd.DataFrame): df_target = df_target.squeeze() auto_correlation = df_target.autocorr(lag=n_lags) except: pass if (seasonal.sum()==0): seasonality_status="Non-Seasonal" else: seasonality_status="Seasonal" # #Please use the below plot for GUI show (seasonality components) # decompose_result_mult.plot().savefig('seasonality_plot.png') df['observed'] = decompose_result_mult.observed df['residual'] = decompose_result_mult.resid df['seasonal'] = decompose_result_mult.seasonal df['trend'] = decompose_result_mult.trend except Exception as e: print("Seasonality function exception: \t",e) return df,decompose_result_mult,seasonality_status ##Main function to check seasonlity in data def seasonal_check(self,target_feature,time_col,seasonal_model): df=self.df try: df[time_col]=pd.to_datetime(df[time_col]) except Exception as e: pass df=df.set_index(time_col) if (len(target_feature)==1): try: if isinstance(target_feature,list): target_feature=''.join(target_feature) elif isinstance(target_feature,int): target_feature=str(target_feature) elif isinstance(target_feature,str): pass except Exception as e: ## Because of EDA, all log messages removed. (self.log.info ) pass ## Seasonal component for individual feature based. seasonality_result=dict() df,decompose_result_mult,seasonality_status = self.seasonality_model(target_feature,df) # seasonality_result[target_feature]=seasonality_status seasonality_result['Feature: '+str(target_feature)]=seasonality_status elif(len(target_feature) > 1): seasonality_result=dict() for col in df.columns: df,decompose_result_mult,seasonality_status = self.seasonality_model(col,df) seasonality_result[col]=seasonality_status else: pass # ## Seasonal component for whole dataset seasonality_val=None for v in seasonality_result.values(): seasonality_val=v seasonality_combined_res=dict() c_dict=[k for k,v in seasonality_result.items() if 'non-seasonality' in v] if (len(c_dict)>=1): seasonality_combined_res['dataframe_seasonality']='No Seasonal elements' else: seasonality_combined_res['dataframe_seasonality']='contains seasonal elements.' return df,decompose_result_mult,seasonality_val,seasonality_combined_res #Main user defined caller for stationary and seasonality (SS) def analysis(self,seasonality_status,stationarity_status): seasonal_model="additive" time_col=self.datetimefeature stationary_method='adfuller' if (isinstance(self.targetFeature,list)): target=self.targetFeature pass elif (isinstance(self.targetFeature,str)): target=list(self.targetFeature.split(',')) if (stationarity_status.lower()=="true"): stats_model,n_lags,p_value,stationary_result,stationary_combined_res=self.stationary_check(target,time_col,stationary_method) return stationary_result if (seasonality_status.lower()=="true"): df,decompose_result_mult,seasonality_result,seasonality_combined_res=self.seasonal_check(target,time_col,seasonal_model) return seasonality_result #Main fn for standalone test purpose if __name__=='__main__': print("Inside seasonality-stationary test main function...") print("Below code used for standalone test purpose.")
azureStorage.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json import os import rsa import boto3 #usnish import pandas as pd import time def add_new_azureStorage(request): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf')) with open(file_path, 'r') as f: data = json.load(f) f.close() if data == '': data = [] except: data = [] if request.POST["azurename"] =='' or request.POST["azureaccountkey"] == '' or request.POST["containername"] == '' : return 'error' newdata = {} newdata['azurename'] = request.POST["azurename"] newdata['azureaccountkey'] = request.POST["azureaccountkey"] newdata['containername'] = request.POST["containername"] data.append(newdata) with open(file_path, 'w') as f: json.dump(data, f) f.close() return 'success' def get_azureStorage(): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] return data def read_azureStorage(name,directoryname,DATA_FILE_PATH): try: file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf')) with open(file_path, 'r') as f: data = json.load(f) except: data = [] found = False for x in data: if x['azurename'] == name: storage_account_name = str(x['azurename']) storage_account_key = str(x['azureaccountkey']) azure_container_name = x['containername'] found = True break try: if found: root_dir = str(directoryname) from azure.storage.filedatalake import DataLakeServiceClient import io import pandavro as pdx from detect_delimiter import detect try: service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", storage_account_name), credential=storage_account_key) print(azure_container_name) file_system_client = service_client.get_file_system_client(azure_container_name) print(root_dir) file_paths = file_system_client.get_paths(path=root_dir) main_df = pd.DataFrame() for path in file_paths: if not path.is_directory: file_client = file_system_client.get_file_client(path.name) file_ext = os.path.basename(path.name).split('.', 1)[1] if file_ext in ["csv", "tsv"]: with open(csv_local, "wb") as my_file: download = file_client.download_file() download.readinto(my_file) with open(csv_local, 'r') as file: data = file.read() row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t']) processed_df = pd.read_csv(csv_local, sep=row_delimiter) if file_ext == "parquet": download = file_client.download_file() stream = io.BytesIO() download.readinto(stream) processed_df = pd.read_parquet(stream, engine='pyarrow') if file_ext == "avro": with open(avro_local, "wb") as my_file: download = file_client.download_file() download.readinto(my_file) processed_df = pdx.read_avro(avro_local) if not main_df.empty: main_df = main_df.append(processed_df, ignore_index=True) else: main_df = pd.DataFrame(processed_df) except Exception as e: print(e) return 'Success',main_df except Exception as e: print(e) return 'Error', pd.DataFrame()
utils.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from typing import Tuple, Union, List import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from flwr.common.logger import log from logging import INFO TRUE_FALSE_MAPPING = {'True':'False','true':'false',True:False,'y':'n','Y':'N','Yes':'No','yes':'no','YES':'NO'} XY = Tuple[np.ndarray, np.ndarray] Dataset = Tuple[XY, XY] LogRegParams = Union[XY, Tuple[np.ndarray]] XYList = List[XY] modelUsed=None modelname=None def setmodelName(modelselected): try: modelname=str(modelselected) print("setmodelName ,given modelname: \n",modelname) if (modelname.lower() == 'logisticregression'): modelUsed=LogisticRegression() return True elif (modelname.lower() == "naivebayes"): modelUsed = GaussianNB() return True elif (modelname.lower() == "sgdclassifier"): #from sklearn.linear_model import SGDClassifier modelUsed=SGDClassifier() return True elif (modelname.lower() == "knn"): modelUsed = KNeighborsClassifier() return True elif (modelname.lower() == "decisiontreeclassifier"): modelUsed = DecisionTreeClassifier() return True else: return False except Exception as e: log(INFO, "set fl model name fn issue: ",e) def get_model_parameters(model:modelUsed) -> LogRegParams: """Returns the paramters of a sklearn LogisticRegression model.""" model_name=model.__class__.__name__ if model.fit_intercept: params = (model.coef_, model.intercept_) else: params = (model.coef_,) return params def set_model_params( model:modelUsed, params: LogRegParams ) -> modelUsed: """Sets the parameters of a sklean LogisticRegression model.""" model.coef_ = params[0] model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = params[1] except Exception as e: log(INFO, "set_model_params fn issue: ",e) pass return model def set_initial_params(model,no_classes,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ n_classes = no_classes n_features = no_features model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros((n_classes, n_features)) model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = np.zeros((n_classes,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def shuffle(X: np.ndarray, y: np.ndarray) -> XY: """Shuffle X and y.""" rng = np.random.default_rng() idx = rng.permutation(len(X)) return X[idx], y[idx] def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: """Split X and y into a number of partitions.""" return list( zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) ) def get_true_option(d, default_value=None): if isinstance(d, dict): for k,v in d.items(): if v in TRUE_FALSE_MAPPING.keys(): return k return default_value def get_true_options( d): options = [] if isinstance(d, dict): for k,v in d.items(): if v in TRUE_FALSE_MAPPING.keys(): options.append(k) return options def set_true_option(d, key=None, value='True'): if key in d.keys(): if value in TRUE_FALSE_MAPPING.keys(): for k in d.keys(): d[ k] = TRUE_FALSE_MAPPING[ value] d[key] = value return d
distribution.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np import os import sys import scipy.stats as st def DistributionFinder(data): try: distributionName = "" sse = 0.0 KStestStatic = 0.0 dataType = "" if (data.dtype == "float64"): dataType = "Continuous" elif (data.dtype == "int"): dataType = "Discrete" elif (data.dtype == "int64"): dataType = "Discrete" if (dataType == "Discrete"): distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] index, counts = np.unique(data.astype(int), return_counts=True) if (len(index) >= 2): best_sse = np.inf y1 = [] total = sum(counts) mean = float(sum(index * counts)) / total variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) dispersion = mean / float(variance) theta = 1 / float(dispersion) r = mean * (float(theta) / 1 - theta) for j in counts: y1.append(float(j) / total) pmf1 = st.bernoulli.pmf(index, mean) pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) pmf4 = st.nbinom.pmf(index, mean, r) pmf5 = st.poisson.pmf(index, mean) sse1 = np.sum(np.power(y1 - pmf1, 2.0)) sse2 = np.sum(np.power(y1 - pmf2, 2.0)) sse3 = np.sum(np.power(y1 - pmf3, 2.0)) sse4 = np.sum(np.power(y1 - pmf4, 2.0)) sse5 = np.sum(np.power(y1 - pmf5, 2.0)) sselist = [sse1, sse2, sse3, sse4, sse5] best_distribution = 'NA' for i in range(0, len(sselist)): if best_sse > sselist[i] > 0: best_distribution = distributions[i].name best_sse = sselist[i] elif (len(index) == 1): best_distribution = "Constant Data-No Distribution" best_sse = 0.0 distributionName = best_distribution sse = best_sse elif (dataType == "Continuous"): distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, st.gamma, st.beta] best_distribution = st.norm.name best_sse = np.inf datamin = data.min() datamax = data.max() nrange = datamax - datamin y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: params = distribution.fit(data.astype(float)) arg = params[:-2] loc = params[-2] scale = params[-1] pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if (best_sse > sse > 0): best_distribution = distribution.name best_sse = sse distributionName = best_distribution sse = best_sse except: response = str(sys.exc_info()[0]) message = 'Job has Failed' + response exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) print(message) return distributionName, sse
pushrecords.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import socket import os import rsa from os.path import expanduser from pathlib import Path import requests import platform from appbe.dataPath import DATA_DIR import socket import getmac import subprocess import sys import json from datetime import datetime import binascii computername = socket.getfqdn() global_key = ''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+ NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB -----END RSA PUBLIC KEY----- ''' quarter_key = ''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1 08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9 njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01 gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB -----END RSA PUBLIC KEY----- ''' halfYear_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp 7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB -----END RSA PUBLIC KEY----- ''' oneYear_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/ fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0 KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB -----END RSA PUBLIC KEY----- ''' full_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0 elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN 4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+ t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB -----END RSA PUBLIC KEY----- ''' def validate_key_Pair(privatepath,publickey): with open(privatepath, 'rb') as privatefile: keydata = privatefile.read() privatefile.close() try: privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM') data = 'Validate Global License' signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1') pubkey = rsa.PublicKey.load_pkcs1(publickey) except: return False try: rsa.verify(data.encode('utf-8'), signature, pubkey) return True except Exception as e: return False def updateDRecord(licensepath): domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic') if(os.path.isfile(licensepath)): with open(licensepath, 'rb') as f: licensekey = f.read() f.close() with open(domain_license_path, 'wb') as f: f.write(licensekey) f.close() if(validate_key_Pair(domain_license_path,global_key)): return True,'Valid Domain License' else: return False,'Invalid Domain License' else: return False,'File Not Exists' def generateLicenseKey(userKey): record = {'UserKey':userKey} record = json.dumps(record) status = 'Error' url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license' try: response = requests.post(url, data=record,headers={"x-api-key":"3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM","Content-Type":"application/json",}) if response.status_code == 200: outputStr=response.content outputStr = outputStr.decode('utf-8','ignore') outputStr = outputStr.strip() license_dict = json.loads(str(outputStr)) if license_dict['status'] == 'success': status = 'Success' licenseKey = license_dict['msg'] else: status = 'Error' licenseKey = '' else: status = 'Error' licenseKey = '' except Exception as inst: print(inst) status = 'Error' licenseKey = '' msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''} return msg def updateRecord(licensepath): currentDirectory = os.path.dirname(os.path.abspath(__file__)) license_path = os.path.join(currentDirectory,'..','lic','license.lic') if(os.path.isfile(licensepath)): with open(licensepath, 'rb') as f: licensekey = f.read() f.close() with open(license_path, 'wb') as f: f.write(licensekey) f.close() status,msg = check_domain_license() if status: status,msg = getdaysfromstartdate() if status: status,msg = check_days_license(int(msg)) return status,msg else: return False,'File Not Exists' def check_domain_license(): if 'CORP.HCL.IN' in computername: return True,'HCL Domain' else: return True,'HCL Domain' def diff_month(d1, d2): return (d1.year - d2.year) * 12 + d1.month - d2.month def getdaysfromstartdate(): currentDirectory = os.path.dirname(os.path.abspath(__file__)) startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt') if(os.path.isfile(startdatePath)): with open(startdatePath, "rb") as fl: encrypted_message = fl.read() fl.close() privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+ GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr 38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH 2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4 Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+ gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1 uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw 3NVWCIalMia+aKnvRHqHy/cQfEo2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4 UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu +edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ= -----END RSA PRIVATE KEY----- ''' privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') decrypted_message = rsa.decrypt(encrypted_message, privkey) decrypted_message = decrypted_message.decode() import datetime start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d') current_date = datetime.datetime.today().strftime('%Y-%m-%d') current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d') Months = diff_month(current_date,start_time) return True,Months else: return False,'Start Date Not Exists' def check_days_license(months): currentDirectory = os.path.dirname(os.path.abspath(__file__)) license_path = os.path.join(currentDirectory,'..','lic','license.lic') if(os.path.isfile(license_path)): if(validate_key_Pair(license_path,full_key)): return True,'Valid License' elif(validate_key_Pair(license_path,oneYear_key)): if months <= 12: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' elif(validate_key_Pair(license_path,halfYear_key)): if months <= 6: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' elif(validate_key_Pair(license_path,quarter_key)): if months <= 3: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' else: return False,'Invalid License' else: return False,'License Not exists.Please contact ERS Research for renewal.' def checklicense(): import binascii license_path = os.path.join(DATA_DIR,'License','license.lic') if(os.path.isfile(license_path)): try: with open(license_path, 'r') as privatefile: license_key = privatefile.read() privatefile.close() encrypted_message = binascii.unhexlify(license_key.encode()) privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3 anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83 rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7 NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07 AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k 6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2 3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+ 9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2 -----END RSA PRIVATE KEY----- ''' privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') decrypted_message = rsa.decrypt(encrypted_message, privkey) msg = decrypted_message.decode().split('####') product = msg[0] computernameLicense = msg[1] computername = socket.getfqdn() licenseValid = False if product.lower() == 'aion': if computernameLicense == computername: uuidlicense = msg[3] uuid = guid() if uuidlicense == uuid: current_date = datetime.now() license_expiry_date = msg[5] license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S') if current_date > license_expiry_date: return False,'License Expire' else: return True,'' return False,'License Error' except Exception as e: print(e) return False,'License Error' else: return False,'Generate License' def generate_record_key(product,version): computername = socket.getfqdn() macaddress = getmac.get_mac_address() license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S') try: user = os.getlogin() except: user = 'NA' uuid = guid() msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date pkeydata='''-----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1 kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG T9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB -----END RSA PUBLIC KEY----- ''' pubkey = rsa.PublicKey.load_pkcs1(pkeydata) encrypted_message = rsa.encrypt(msg.encode(), pubkey) encrypted_message = binascii.hexlify(encrypted_message).decode() return(encrypted_message) def run(cmd): try: return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding="utf-8").stdout.strip() except Exception as e: print(e) return None def guid(): if sys.platform == 'darwin': return run( "ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\" '/IOPlatformUUID/{print $(NF-1)}'", ) if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys': return run('wmic csproduct get uuid').split('\n')[2].strip() if sys.platform.startswith('linux'): return run('cat /var/lib/dbus/machine-id') or \ run('cat /etc/machine-id') if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'): return run('cat /etc/hostid') or \ run('kenv -q smbios.system.uuid') def updateLicense(licensekey): license_folder = os.path.join(DATA_DIR,'License') license_folder = Path(license_folder) license_folder.mkdir(parents=True, exist_ok=True) license_file = license_folder/'license.lic' with open(license_file, "w") as fl: fl.write(licensekey) fl.close() def enterRecord(version): validLicense,msg = checklicense() if not validLicense: key = generate_record_key('AION',version) msg = {'status':msg,'key':key,'licenseKey':'','link':''} return validLicense,msg
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
uq_interface.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' #from sklearn.externals import joblib import joblib # import pyreadstat # import sys # import math import time import pandas as pd import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.svm import SVC from sklearn.linear_model import LinearRegression import argparse import json import os import pathlib from tensorflow.keras.models import load_model # from tensorflow.keras import backend as K import tensorflow as tf # from sklearn.decomposition import LatentDirichletAllocation from pathlib import Path #from aionUQ import aionUQ from uq_main import aionUQ import os from datetime import datetime from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument('savFile') parser.add_argument('csvFile') parser.add_argument('features') parser.add_argument('target') args = parser.parse_args() from appbe.dataPath import DEPLOY_LOCATION if ',' in args.features: args.features = [x.strip() for x in args.features.split(',')] else: args.features = args.features.split(",") models = args.savFile if Path(models).is_file(): # if Path(args.savFile.is_file()): model = joblib.load(args.savFile) # print(model.__class__.__name__) # print('class:',model.__class__) # print(type(model).__name__) # try: # print('Classess=',model.classes_) # except: # print("Classess=N/A") # print('params:',model.get_params()) # try: # print('fea_imp =',model.feature_importances_) # except: # print("fea_imp =N/A") ProblemName = model.__class__.__name__ Params = model.get_params() # print("ProblemName: \n",ProblemName) # print("Params: \n",Params) # print('ProblemName:',model.__doc__) # print(type(ProblemName)) if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']: Problemtype = 'Classification' else : Problemtype = 'Regression' if Problemtype == 'Classification': df = pd.read_csv(args.csvFile) object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object'] df = df.drop(object_cols, axis=1) df = df.dropna(axis=1) df = df.reset_index(drop=True) modelfeatures = args.features # dfp = df[modelfeatures] tar = args.target # target = df[tar] y=df[tar] X = df.drop(tar, axis=1) #for dummy test,train values pass X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar) #accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,"uqtest") accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification() # print("UQ Classification: \n",output_jsonobject) print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per) print("End of UQ Classification.\n") else: df = pd.read_csv(args.csvFile) modelfeatures = args.features # print("modelfeatures: \n",modelfeatures) # print("type modelfeatures: \n",type(modelfeatures)) dfp = df[modelfeatures] tar = args.target target = df[tar] #Not used, just dummy X,y split y=df[tar] X = df.drop(tar, axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression() print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject) print("End of UQ reg\n") elif Path(models).is_dir(): os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' os.environ['TF_CPP_MIN_LOG_LEVEL']='2' model = load_model(models) ProblemName = model.__class__.__name__ Problemtype = 'Classification' # print('class:',model.__class__) # print('class1',model.__class__.__name__) # print(model.summary()) # print('ProblemName1:',model.get_config()) def Params(model: tf.keras.Model): Params = [] model.Params(print_fn=lambda x: Params.append(x)) return '\n'.join(Params) df = pd.read_csv(args.csvFile) modelfeatures = args.features dfp = df[modelfeatures] tar = args.target target = df[tar] df3 = dfp.astype(np.float32) predic = model.predict(df3) if predic.shape[-1] > 1: predic = np.argmax(predic, axis=-1) else: predic = (predic > 0.5).astype("int32") matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() classificationreport = round(classificationreport,2) classificationreport = classificationreport.to_json(orient='index') output = {} output["Precision"] = "%.3f" % precision_score(target, predic,average='weighted') output["Recall"] = "%.3f" % recall_score(target, predic,average='weighted') output["Accuracy"] = "%.3f" % accuracy_score(target, predic) output["ProblemName"] = ProblemName output["Params"] = Params output["Problemtype"] = Problemtype output["Confusionmatrix"] = matrixconfusion output["classificationreport"] = classificationreport print(json.dumps(output))
aionUQ.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from matplotlib import pyplot import sys import os import json import matplotlib.pyplot as plt from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression from uq360.algorithms.ucc_recalibration import UCCRecalibration from sklearn import datasets from sklearn.model_selection import train_test_split import pandas as pd from uq360.metrics.regression_metrics import compute_regression_metrics import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import roc_curve # from math import sqrt from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error # from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature #Added libs from MLTest import sys import time from sklearn.metrics import confusion_matrix from pathlib import Path import logging # import json class aionUQ: # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation): # #printprint("Inside aionUQ \n") try: #print("Inside aionUQ init\n ") self.data=df self.dfFeatures=dfp self.uqconfig_base=Params self.uqconfig_meta=Params self.targetFeature=targetfeature self.target=target self.selectedfeature=modelfeatures self.y=self.target self.X=self.dfFeatures self.log = logging.getLogger('eion') self.basemodel=model self.model_name=ProblemName self.Deployment = os.path.join(deployLocation,'log','UQ') os.makedirs(self.Deployment,exist_ok=True) self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph') os.makedirs(self.uqgraphlocation,exist_ok=True) except Exception as e: self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus): from sklearn.model_selection import train_test_split # To get each class values and uncertainty if (aionstatus.lower() == 'aionuq'): X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest # y_val = y_train.append(y_test) else: # y_val = self.y df=self.data y=df[self.targetFeature] X = df.drop(self.targetFeature, axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # from sklearn.tree import DecisionTreeRegressor # from sklearn.linear_model import LinearRegression,Lasso,Ridge # from sklearn import linear_model # from sklearn.ensemble import RandomForestRegressor if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: uq_scoring_param=uq_scoring_param else: uq_scoring_param='picp' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) y_hat_total_mean=np.mean(y_hat) y_hat_lb_total_mean=np.mean(y_hat_lb) y_hat_ub_total_mean=np.mean(y_hat_ub) mpiw_20_per=(y_hat_total_mean*20/100) mpiw_lower_range = y_hat_total_mean - mpiw_20_per mpiw_upper_range = y_hat_total_mean + mpiw_20_per from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) self.log.info('Model total picp_percentage : '+str(picp_percentage)) return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): try: global x_feature,y_feature if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): x_feature=''.join(map(str, self.selectedfeature)) else: x_feature= str(self.selectedfeature) # self.selectedfeature=str(self.selectedfeature) X_test=np.squeeze(X_test) y_feature=str(self.targetFeature) pred_dict = {x_feature: X_test, 'y': y_test, 'y_mean': y_mean, 'y_upper': y_upper, 'y_lower': y_lower } pred_df = pd.DataFrame(data=pred_dict) pred_df_sorted = pred_df.sort_values(by=x_feature) plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed') plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') plt.legend() plt.xlabel(x_feature) plt.ylabel(y_feature) plt.title('UQ Confidence Interval Plot.') # plt.savefig('uq_test_plt.png') if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'): os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png') plt.savefig(str(self.Deployment)+'/uq_test_plt.png') plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png') plt.clf() plt.cla() plt.close() pltreg=plot_picp_by_feature(X_test, y_test, y_lower, y_upper, xlabel=x_feature) #pltreg.savefig('x.png') pltr=pltreg.figure if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'): os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png') pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png') plt.clf() plt.cla() plt.close() except Exception as e: # #print("display exception: \n",e) self.log.info('<!------------- UQ model Display Error ---------------> '+str(e)) def classUncertainty(self,pred,score): try: outuq = {} classes = np.unique(pred) for c in classes: ids = pred == c class_score = score[ids] predc = 'Class_'+str(c) outuq[predc]=np.mean(class_score) x = np.mean(class_score) #Uncertaininty in percentage x=x*100 self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x))) return outuq except Exception as e: # #print("display exception: \n",e) self.log.info('<!------------- UQ classUncertainty Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus): try: # print("Inside uqMain_BBMClassification\n") # print("lenth of x_train {}, x_test {}, y_train {}, y_test {}".format(x_train, x_test, y_train, y_test)) aionstatus = str(aionstatus) if (aionstatus.lower() == 'aionuq'): X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test else: X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.neighbors import KNeighborsClassifier base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ #print(model_name) try: #geting used features model_used_features=self.basemodel.feature_names_in_ self.log.info("Base model used training features are (UQ Testing): \n"+str(model_used_features)) except: pass model_params=self.basemodel.get_params() uq_scoring_param='accuracy' basemodel=None if (model_name == "GradientBoostingClassifier"): basemodel=GradientBoostingClassifier elif (model_name == "SGDClassifier"): basemodel=SGDClassifier elif (model_name == "GaussianNB"): basemodel=GaussianNB elif (model_name == "DecisionTreeClassifier"): basemodel=DecisionTreeClassifier elif(model_name == "RandomForestClassifier"): basemodel=RandomForestClassifier elif (model_name == "SVC"): basemodel=SVC elif(model_name == "KNeighborsClassifier"): basemodel=KNeighborsClassifier elif(model_name.lower() == "logisticregression"): basemodel=LogisticRegression elif(model_name == "XGBClassifier"): basemodel=XGBClassifier elif(model_name == "LGBMClassifier"): basemodel=LGBMClassifier else: basemodel=LogisticRegression calibrated_mdl=None if (model_name == "SVC"): from sklearn.calibration import CalibratedClassifierCV basemodel=SVC(**model_params) calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_mdl.fit(X_train, y_train) basepredict = calibrated_mdl.predict(X_test) predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] elif (model_name == "SGDClassifier"): from sklearn.calibration import CalibratedClassifierCV basemodel=SGDClassifier(**model_params) calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_mdl.fit(X_train, y_train) basepredict = calibrated_mdl.predict(X_test) predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] else: from sklearn.calibration import CalibratedClassifierCV base_mdl = basemodel(**model_params) calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3) basemodelfit = calibrated_mdl.fit(X_train, y_train) basepredict = calibrated_mdl.predict(X_test) predprob_base=calibrated_mdl.predict_proba(X_test)[:, :] cal_model_params=calibrated_mdl.get_params() acc_score_base=accuracy_score(y_test, basepredict) base_estimator_calibrate = cal_model_params['base_estimator'] uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) try: X_train=X_train[model_used_features] X_test=X_test[model_used_features] except: pass uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) # uqmodel_fit = uq_model.fit(X_train, y_train) y_t_pred, y_t_score = uq_model.predict(X_test) acc_score=accuracy_score(y_test, y_t_pred) test_accuracy_perc=round(100*acc_score) if(aionstatus == "aionuq"): test_accuracy_perc=round(test_accuracy_perc,2) #uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0. uq_aurrrc=0 pass else: bbm_c_plot = plot_risk_vs_rejection_rate( y_true=y_test, y_prob=predprob_base, selection_scores=y_t_score, y_pred=y_t_pred, plot_label=['UQ_risk_vs_rejection'], risk_func=accuracy_score, num_bins = 10 ) # This done by kiran, need to uncomment for GUI integration. # bbm_c_plot_sub = bbm_c_plot[4] bbm_c_plot_sub = bbm_c_plot if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'): os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') # bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') re_plot=plot_reliability_diagram(y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, plot_label=['UQModel reliability_diagram'], num_bins=10 ) # This done by kiran, need to uncomment for GUI integration. # re_plot_sub = re_plot[4] re_plot_sub = re_plot if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'): os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png') # re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png') uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, selection_scores=y_t_score, attributes=None, risk_func=accuracy_score,subgroup_ids=None, return_counts=False, num_bins=10) uq_aurrrc=uq_aurrrc test_accuracy_perc=round(test_accuracy_perc) #metric_all=compute_classification_metrics(y_test, y_prob, option='all') metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') #expected_calibration_error uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False) # uq_aurrrc=uq_aurrrc confidence_score=acc_score_base-uq_ece ece_confidence_score=round(confidence_score,2) # Model uncertainty using ECE score # model_uncertainty_ece = 1-ece_confidence_score #Uncertainty Using model inherent predict probability mean_predprob_total=np.mean(y_t_score) model_confidence=mean_predprob_total model_uncertainty = 1-mean_predprob_total model_confidence = round(model_confidence,2) # To get each class values and uncertainty if (aionstatus.lower() == 'aionuq'): y_val = np.append(y_train,y_test) else: y_val = self.y self.log.info('------------------> Model Confidence Score '+str(model_confidence)) outuq = self.classUncertainty(y_t_pred,y_t_score) # Another way to get conf score model_uncertainty_per=round((model_uncertainty*100),2) model_confidence_per=round((model_confidence*100),2) acc_score_per = round((acc_score*100),2) uq_ece_per=round((uq_ece*100),2) output={} recommendation = "" if (uq_ece > 0.5): # RED text recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).' else: # self.log.info('Model has good ECE score and accuracy, ready to deploy.\n.') if (uq_ece <= 0.1 and model_confidence >= 0.9): # Green Text recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' else: # Orange recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. ' #Adding each class uncertainty value classoutput = {} for k,v in outuq.items(): classoutput[k]=(str(round((v*100),2))) output['classes'] = classoutput output['ModelConfidenceScore']=(str(model_confidence_per)) output['ExpectedCalibrationError']=str(uq_ece_per) output['ModelUncertainty']=str(model_uncertainty_per) output['Recommendation']=recommendation # output['user_msg']='Please check the plot for more understanding of model uncertainty' #output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4) output['Accuracy']=str(acc_score_per) output['Problem']= 'Classification' #self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %')) # #print("Prediction mean for the given model:",np.mean(y_hat),"\n") #self.log.info(recommendation) #self.log.info("Model_confidence_score: " +str(confidence_score)) #self.log.info("Model_uncertainty: " +str(round(model_uncertainty,2))) #self.log.info('Please check the plot for more understanding of model uncertainty.\n.') uq_jsonobject = json.dumps(output) with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f: json.dump(output, f) return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per except Exception as inst: self.log.info('\n < ---------- UQ Model Execution Failed Start--------->') self.log.info('\n<------Model Execution failed!!!.' + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') def aion_confidence_plot(self,df): df=df df = df.sort_values(by=self.selectedfeature) best_values=df.Best_values.to_list() best_upper=df.Best__upper.to_list() best_lower=df.Best__lower.to_list() Total_Upper_PI=df.Total_Upper_PI.to_list() Total_Low_PI=df.Total_Low_PI.to_list() Obseved = df.Observed.to_list() plt.plot(df[x_feature], df['Observed'], 'o', label='Observed') plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey') plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey') plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) plt.legend() plt.xlabel(self.selectedfeature) plt.ylabel(self.targetFeature) plt.title('UQ Best & Good Area Plot') if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'): os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png') plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png') plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus): aionstatus = str(aionstatus) # if (aionstatus.lower() == 'aionuq'): # X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) # else: # X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) # modelName = "" self.log.info('<!------------- Inside BlackBox MetaModel Regression process. ---------------> ') try: from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression import pandas as pd base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ model_params=self.basemodel.get_params() # #print("model_params['criterion']: \n",model_params['criterion']) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # modelname='sklearn.linear_model'+'.'+model_name # X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest #Geeting trained model name and to use the model in BlackboxMetamodelRegression from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LinearRegression,Lasso,Ridge from sklearn.ensemble import RandomForestRegressor if (model_name == "DecisionTreeRegressor"): basemodel=DecisionTreeRegressor elif (model_name == "LinearRegression"): basemodel=LinearRegression elif (model_name == "Lasso"): basemodel=Lasso elif (model_name == "Ridge"): basemodel=Ridge elif(model_name == "RandomForestRegressor"): basemodel=RandomForestRegressor else: basemodel=LinearRegression if (aionstatus.lower() == 'aionuq'): X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) else: X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus) if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: uq_scoring_param=uq_scoring_param else: uq_scoring_param='picp' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) # #print("X_train.shape: \n",X_train.shape) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) self.log.info('<!------------- observed_picp: ---------------> '+str(observed_alphas_picp)) self.log.info('<!------------- observed_widths_mpiw: ---------------> '+str(observed_widths_mpiw)) # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” #metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss. metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) metric_used='' for k,v in metric_all.items(): metric_used=str(round(v,2)) self.log.info('<!------------- Metric used for regression UQ: ---------------> '+str(metric_all)) # Determine the confidence level and recommentation to the tester # test_data=y_test observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) #Calculate total uncertainty for all features # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) # df1=self.data total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) recommendation="" output={} if (observed_alphas_picp >= 0.95 and total_picp >= 0.75): # Add GREEN text self.log.info('Model has good confidence for the selected feature, ready to deploy.\n.') recommendation = "Model has good confidence score, ready to deploy." elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)): # Orange recommendation = "Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling." self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') else: # RED text recommendation = "Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling." self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') #Build uq json info dict output['ModelConfidenceScore']=(str(total_picp_percentage)+'%') output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%') output['SelectedFeatureConfidence']=(str(picp_percentage)+'%') output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%') output['PredictionIntervalCoverageProbability']=observed_alphas_picp output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw) output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) output['Recommendation']=str(recommendation) output['Metric']=uq_scoring_param output['Score']=metric_used output['Problemtype']= 'Regression' self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %')) self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %')) #self.log.info('Please check the plot for more understanding of model uncertainty.\n.') #self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub) uq_jsonobject = json.dumps(output) with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f: json.dump(output, f) #To get best and medium UQ range of values from total predict interval y_hat_m=y_hat.tolist() y_hat_lb=y_hat_lb.tolist() upper_bound=y_hat_ub.tolist() y_hat_ub=y_hat_ub.tolist() for x in y_hat_lb: y_hat_ub.append(x) total_pi=y_hat_ub medium_UQ_range = y_hat_ub best_UQ_range= y_hat.tolist() ymean_upper=[] ymean_lower=[] y_hat_m=y_hat.tolist() for i in y_hat_m: y_hat_m_range= (i*20/100) x=i+y_hat_m_range y=i-y_hat_m_range ymean_upper.append(x) ymean_lower.append(y) min_best_uq_dist=round(min(best_UQ_range)) max_best_uq_dist=round(max(best_UQ_range)) # initializing ranges list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) list_best = y_hat_m X_test = np.squeeze(X_test) ''' uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, 'Best__upper':ymean_upper, 'Best__lower':ymean_lower, 'Total_Low_PI': y_hat_lb, 'Total_Upper_PI': upper_bound, } print(uq_dict) uq_pred_df = pd.DataFrame(data=uq_dict) uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False) csv_path=str(self.Deployment)+"/uq_pred_df.csv" df=pd.read_csv(csv_path) self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\n.') #Callconfidence olot fn only for UQTest interface if (aionstatus.lower() == 'aionuq'): #No need to showcase confidence plot for aion main pass else: self.aion_confidence_plot(df) ''' return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject) except Exception as inst: exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
uq_main.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from matplotlib import pyplot import sys import os import matplotlib.pyplot as plt from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression from sklearn import datasets from sklearn.model_selection import train_test_split import pandas as pd from uq360.metrics.regression_metrics import compute_regression_metrics import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import roc_curve from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature import sys import time from sklearn.metrics import confusion_matrix from pathlib import Path import logging import logging.config from os.path import expanduser import platform from sklearn.utils import shuffle class aionUQ: # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature): try: self.data=df self.dfFeatures=dfp self.uqconfig_base=Params self.uqconfig_meta=Params self.targetFeature=targetfeature self.log = logging.getLogger('aionUQ') self.target=target self.selectedfeature=modelfeatures self.y=self.target self.X=self.dfFeatures from appbe.dataPath import DEPLOY_LOCATION self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time())))) os.makedirs(self.Deployment,exist_ok=True) self.basemodel=model self.model_name=ProblemName # self.X, self.y = shuffle(self.X, self.y) X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0) self.xtrain = X_train self.xtest = X_test self.ytrain = y_train self.ytest = y_test # self.deployLocation=deployLocation except Exception as e: # self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) # self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def totalUncertainty(self,df,basemodel,model_params): try: # from sklearn.model_selection import train_test_split # df=self.data # y=df[self.targetFeature] # X = df.drop(self.targetFeature, axis=1) if (isinstance(self.selectedfeature,list)): selectedfeature=[self.selectedfeature[0]] selectedfeature=' '.join(map(str,selectedfeature)) if (isinstance(self.targetFeature,list)): targetFeature=[self.targetFeature[0]] targetFeature=' '.join(map(str,targetFeature)) X = self.data[selectedfeature] y = self.data[targetFeature] X = X.values.reshape((-1,1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # from sklearn.tree import DecisionTreeRegressor # from sklearn.linear_model import LinearRegression,Lasso,Ridge # from sklearn import linear_model # from sklearn.ensemble import RandomForestRegressor if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: uq_scoring_param=uq_scoring_param else: uq_scoring_param='picp' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) y_hat_total_mean=np.mean(y_hat) y_hat_lb_total_mean=np.mean(y_hat_lb) y_hat_ub_total_mean=np.mean(y_hat_ub) mpiw_20_per=(y_hat_total_mean*20/100) mpiw_lower_range = y_hat_total_mean - mpiw_20_per mpiw_upper_range = y_hat_total_mean + mpiw_20_per from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) # self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) # self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) # self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) # self.log.info('Model total picp_percentage : '+str(picp_percentage)) except Exception as e: print("totalUncertainty fn error: \n",e) return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): try: global x_feature,y_feature if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): x_feature=','.join(map(str, self.selectedfeature)) else: x_feature= str(self.selectedfeature) # self.selectedfeature=str(self.selectedfeature) X_test=np.squeeze(X_test) y_feature=str(self.targetFeature) pred_dict = {x_feature: X_test, 'y': y_test, 'y_mean': y_mean, 'y_upper': y_upper, 'y_lower': y_lower } pred_df = pd.DataFrame(data=pred_dict) x_feature1 = x_feature.split(',') pred_df_sorted = pred_df.sort_values(by=x_feature1) plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y'], 'o', label='Observed') plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') plt.legend() plt.xlabel(x_feature1[0]) plt.ylabel(y_feature) plt.title('UQ Confidence Interval Plot.') # plt.savefig('uq_test_plt.png') ''' if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'): os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') ''' plt.savefig(str(self.Deployment)+'/uq_test_plt.png') #plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png') plt.clf() plt.cla() plt.close() pltreg=plot_picp_by_feature(X_test, y_test, y_lower, y_upper, xlabel=x_feature) #pltreg.savefig('x.png') pltr=pltreg.figure ''' if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'): os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') ''' pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') picpPlot = os.path.join(self.Deployment,'picp_per_feature.png') #pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') plt.clf() plt.cla() plt.close() except Exception as e: print("display exception: \n",e) # self.log.info('<!------------- UQ model Display Error ---------------> '+str(e)) return confidencePlot,picpPlot def classUncertainty(self,predprob_base): # from collections import Counter predc="Class_" classes = np.unique(self.y) total = len(self.y) list_predprob=[] counter = Counter(self.y) #for loop for test class purpose for k,v in counter.items(): n_samples = len(self.y[self.y==k]) per = ((v/total) * 100) prob_c=predprob_base[:,int(k)] list_predprob.append(prob_c) # #print("Class_{} : {}/{} percentage={}% \n".format(k,n_samples,total,per )) outuq={} for k in classes: predc += str(k) mean_predprob_class=np.mean(list_predprob[int(k)]) uncertainty=1-mean_predprob_class predc+='_Uncertainty' outuq[predc]=uncertainty predc="Class_" return outuq def uqMain_BBMClassification(self): # self.log.info('<!------------- Inside BlackBox MetaModel Classification process. ---------------> ') # import matplotlib.pyplot as plt try: from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification except: ##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification. from uq360.algorithms.blackbox_metamodel import MetamodelClassification # from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics # from sklearn import datasets # from sklearn.model_selection import train_test_split # from sklearn.metrics import accuracy_score from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # from sklearn.linear_model import LogisticRegression # import pandas as pd base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ model_params=self.basemodel.get_params() try: #geting used features model_used_features=self.basemodel.feature_names_in_ except: pass X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest uq_scoring_param='accuracy' basemodel=None if (model_name == "GradientBoostingClassifier"): basemodel=GradientBoostingClassifier elif (model_name == "SGDClassifier"): basemodel=SGDClassifier elif (model_name == "GaussianNB"): basemodel=GaussianNB elif (model_name == "DecisionTreeClassifier"): basemodel=DecisionTreeClassifier elif(model_name == "RandomForestClassifier"): basemodel=RandomForestClassifier elif (model_name == "SVC"): basemodel=SVC elif(model_name == "KNeighborsClassifier"): basemodel=KNeighborsClassifier elif(model_name == "LogisticRegression"): basemodel=LogisticRegression else: basemodel=LogisticRegression try: try: ##Removed meta_config because leave meta model config as default ml model params uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) except: uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) except: ##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification try: ##Removed meta_config because leave meta model config as default ml model params uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) except: uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model try: X_train=X_train[model_used_features] X_test=X_test[model_used_features] except: pass uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) # uqmodel_fit = uq_model.fit(X_train, y_train) #Test data pred, score y_t_pred, y_t_score = uq_model.predict(X_test) #predict probability # uq_pred_prob=uq_model.predict_proba(X_test) # predprob_base=basemodel.predict_proba(X_test)[:, :] #if (model_name == "SVC" or model_name == "SGDClassifier"): # if model_name in ['SVC','SGDClassifier']: if (model_name == "SVC"): from sklearn.calibration import CalibratedClassifierCV basemodel=SVC(**model_params) calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_svc.fit(X_train, y_train) basepredict = basemodel.predict(X_test) predprob_base = calibrated_svc.predict_proba(X_test)[:, :] elif (model_name == "SGDClassifier"): from sklearn.calibration import CalibratedClassifierCV basemodel=SGDClassifier(**model_params) calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_svc.fit(X_train, y_train) basepredict = basemodel.predict(X_test) predprob_base = calibrated_svc.predict_proba(X_test)[:, :] else: base_mdl = basemodel(**model_params) basemodelfit = base_mdl.fit(X_train, y_train) basepredict = base_mdl.predict(X_test) predprob_base=base_mdl.predict_proba(X_test)[:, :] acc_score=accuracy_score(y_test, y_t_pred) test_accuracy_perc=round(100*acc_score) ''' bbm_c_plot = plot_risk_vs_rejection_rate( y_true=y_test, y_prob=predprob_base, selection_scores=y_t_score, y_pred=y_t_pred, plot_label=['UQ_risk_vs_rejection'], risk_func=accuracy_score, num_bins = 10 ) # This done by kiran, need to uncomment for GUI integration. try: bbm_c_plot_sub = bbm_c_plot[4] bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png') riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png') except Exception as e: print(e) pass riskPlot = '' ''' riskPlot = '' ''' try: re_plot=plot_reliability_diagram(y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, plot_label=['UQModel reliability_diagram'], num_bins=10) # This done by kiran, need to uncomment for GUI integration. re_plot_sub = re_plot[4] # re_plot_sub = re_plot re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png') reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png') except Exception as e: print(e) pass reliability_plot = '' ''' reliability_plot = '' uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, selection_scores=y_t_score, attributes=None, risk_func=accuracy_score,subgroup_ids=None, return_counts=False, num_bins=10) uq_aurrrc=uq_aurrrc test_accuracy_perc=round(test_accuracy_perc) #metric_all=compute_classification_metrics(y_test, y_prob, option='all') metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') #expected_calibration_error uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False) uq_aurrrc=uq_aurrrc confidence_score=acc_score-uq_ece ece_confidence_score=round(confidence_score,2) # Model uncertainty using ECE score # model_uncertainty_ece = 1-ece_confidence_score # #print("model_uncertainty1: \n",model_uncertainty_ece) #Uncertainty Using model inherent predict probability mean_predprob_total=np.mean(predprob_base) model_uncertainty = 1-mean_predprob_total model_confidence=mean_predprob_total model_confidence = round(model_confidence,2) # To get each class values and uncertainty outuq = self.classUncertainty(predprob_base) # Another way to get conf score model_uncertainty_per=round((model_uncertainty*100),2) # model_confidence_per=round((model_confidence*100),2) model_confidence_per=round((ece_confidence_score*100),2) acc_score_per = round((acc_score*100),2) uq_ece_per=round((uq_ece*100),2) output={} recommendation = "" if (uq_ece > 0.5): # RED text recommendation = 'Model has high ece (expected calibration error) score compare to threshold (50%),not good to deploy. Add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).' msg = 'Bad' else: # self.log.info('Model has good ECE score and accuracy, ready to deploy.\n.') if (uq_ece <= 0.1 and model_confidence >= 0.9): # Green Text recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' msg = 'Best' else: # Orange recommendation = 'Model has average confidence score (ideal is >90% confidence) and good ECE score (ideal is <10% error).Model can be improved by adding more training data across all feature ranges and re-training the model.' msg = 'Good' #Adding each class uncertainty value output['Problem']= 'Classification' output['recommend']= 'recommend' output['msg']= msg output['UQ_Area_Under_Risk_Rejection_Rate_Curve']=round(uq_aurrrc,4) output['Model_Total_Confidence']=(str(model_confidence_per)+str('%')) output['Expected_Calibration_Error']=(str(uq_ece_per)+str('%')) output['Model_Total_Uncertainty']=(str(model_uncertainty_per)+str('%')) # output['Risk Plot'] = str(riskPlot) # output['Reliability Plot'] = str(reliability_plot) for k,v in outuq.items(): output[k]=(str(round((v*100),2))+str(' %')) output['Recommendation']=recommendation # output['user_msg']='Please check the plot for more understanding of model uncertainty' output['Metric_Accuracy_Score']=(str(acc_score_per)+str(' %')) outputs = json.dumps(output) with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f: json.dump(output, f) return test_accuracy_perc,uq_ece,outputs def aion_confidence_plot(self,df): try: global x_feature df=df df = df.sort_values(by=self.selectedfeature) best_values=df.Best_values.to_list() best_upper=df.Best__upper.to_list() best_lower=df.Best__lower.to_list() Total_Upper_PI=df.Total_Upper_PI.to_list() Total_Low_PI=df.Total_Low_PI.to_list() Obseved = df.Observed.to_list() x_feature1 = x_feature.split(',') plt.plot(df[x_feature1[0]], df['Observed'], 'o', label='Observed') plt.plot(df[x_feature1[0]], df['Best__upper'],'r--', lw=2, color='grey') plt.plot(df[x_feature1[0]], df['Best__lower'],'r--', lw=2, color='grey') plt.plot(df[x_feature1[0]], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') plt.fill_between(df[x_feature1[0]], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) plt.fill_between(df[x_feature1[0]],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) plt.legend() plt.xlabel(x_feature1[0]) plt.ylabel(self.targetFeature) plt.title('UQ Best & Good Area Plot') ''' if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png'): os.remove(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png') plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png') ''' plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') uq_confidence_plt = os.path.join(str(self.Deployment),'uq_confidence_plt.png') except Exception as inst: print('-----------dsdas->',inst) uq_confidence_plt = '' return uq_confidence_plt def uqMain_BBMRegression(self): # modelName = "" # self.log.info('<!------------- Inside BlockBox MetaModel Regression process. ---------------> ') try: from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression import pandas as pd base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ model_params=self.basemodel.get_params() # #print("model_params['criterion']: \n",model_params['criterion']) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # modelname='sklearn.linear_model'+'.'+model_name # self.xtrain = self.xtrain.values.reshape((-1,1)) # self.xtest = self.xtest.values.reshape((-1,1)) if (isinstance(self.selectedfeature,list)): selectedfeature=[self.selectedfeature[0]] selectedfeature=' '.join(map(str,selectedfeature)) if (isinstance(self.targetFeature,list)): targetFeature=[self.targetFeature[0]] targetFeature=' '.join(map(str,targetFeature)) X = self.data[selectedfeature] y = self.data[targetFeature] X = X.values.reshape((-1,1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) #Geeting trained model name and to use the model in BlackboxMetamodelRegression from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LinearRegression,Lasso,Ridge from sklearn.ensemble import RandomForestRegressor if (model_name == "DecisionTreeRegressor"): basemodel=DecisionTreeRegressor elif (model_name == "LinearRegression"): basemodel=LinearRegression elif (model_name == "Lasso"): basemodel=Lasso elif (model_name == "Ridge"): basemodel=Ridge elif(model_name == "RandomForestRegressor"): basemodel=RandomForestRegressor else: basemodel=LinearRegression if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: if (uq_scoring_param.lower() == 'picp'): uq_scoring_param='prediction interval coverage probability score (picp)' else: uq_scoring_param=uq_scoring_param else: uq_scoring_param='prediction interval coverage probability score (picp)' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) metric_used='' for k,v in metric_all.items(): metric_used=str(round(v,2)) # Determine the confidence level and recommentation to the tester # test_data=y_test observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) #Calculate total uncertainty for all features # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) # df1=self.data total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params) recommendation="" observed_widths_mpiw = round((observed_widths_mpiw/1000000)*100) if observed_widths_mpiw > 100: observed_widths_mpiw = 100 output={} if (observed_alphas_picp >= 0.90 and total_picp >= 0.75): # GREEN text recommendation = "Model has good confidence and MPIW score, ready to deploy." msg='Good' elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.90) and (total_picp >= 0.50)): # Orange recommendation = " Model has average confidence compare to threshold (ideal is both model confidence and MPIW should be >90%) .Model can be improved by adding more training data across all feature ranges and re-training the model." msg = 'Average' else: # RED text recommendation = "Model has less confidence compare to threshold (ideal is both model confidence and MPIW should be >90%), need to be add more input data across all feature ranges and retrain base model, also try with different regression algorithms/ensembling." msg = 'Bad' #Build uq json info dict output['Model_total_confidence']=(str(total_picp_percentage)+'%') output['Model_total_Uncertainty']=(str(total_Uncertainty_percentage)+'%') output['Selected_feature_confidence']=(str(picp_percentage)+'%') output['Selected_feature_Uncertainty']=(str(Uncertainty_percentage)+'%') output['Prediction_Interval_Coverage_Probability']=observed_alphas_picp output['Mean_Prediction_Interval_Width']=str(observed_widths_mpiw)+'%' output['Desirable_MPIW_range']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) output['Recommendation']=str(recommendation) output['Metric_used']=uq_scoring_param output['Metric_value']=metric_used output['Problem']= 'Regression' output['recommend']= 'recommend' output['msg'] = msg with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f: json.dump(output, f) #To get best and medium UQ range of values from total predict interval y_hat_m=y_hat.tolist() y_hat_lb=y_hat_lb.tolist() upper_bound=y_hat_ub.tolist() y_hat_ub=y_hat_ub.tolist() for x in y_hat_lb: y_hat_ub.append(x) total_pi=y_hat_ub medium_UQ_range = y_hat_ub best_UQ_range= y_hat.tolist() ymean_upper=[] ymean_lower=[] y_hat_m=y_hat.tolist() for i in y_hat_m: y_hat_m_range= (i*20/100) x=i+y_hat_m_range y=i-y_hat_m_range ymean_upper.append(x) ymean_lower.append(y) min_best_uq_dist=round(min(best_UQ_range)) max_best_uq_dist=round(max(best_UQ_range)) # initializing ranges list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) list_best = y_hat_m ''' print(X_test) print(X_test) X_test = np.squeeze(X_test) print(x_feature) ''' uq_dict = pd.DataFrame(X_test) #print(uq_dict) uq_dict['Observed'] = y_test uq_dict['Best_values'] = y_hat_m uq_dict['Best__upper'] = ymean_upper uq_dict['Best__lower'] = ymean_lower uq_dict['Total_Low_PI'] = y_hat_lb uq_dict['Total_Upper_PI'] = upper_bound ''' uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, 'Best__upper':ymean_upper, 'Best__lower':ymean_lower, 'Total_Low_PI': y_hat_lb, 'Total_Upper_PI': upper_bound, }''' uq_pred_df = pd.DataFrame(data=uq_dict) uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False) csv_path=str(self.Deployment)+"/uq_pred_df.csv" df=pd.read_csv(csv_path) # self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\n.') # confidenceplot = self.aion_confidence_plot(df) # output['Confidence Plot']= confidenceplot uq_jsonobject = json.dumps(output) print("UQ regression problem training completed...\n") return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject except Exception as inst: print('-------',inst) exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc)
regression_metrics.py
import numpy as np from scipy.stats import norm from sklearn.metrics import mean_squared_error, r2_score from ..utils.misc import fitted_ucc_w_nullref def picp(y_true, y_lower, y_upper): """ Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies within predicted interval. Measures the prediction interval calibration for regression. Args: y_true: Ground truth y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: the fraction of samples for which the grounds truth lies within predicted interval. """ satisfies_upper_bound = y_true <= y_upper satisfies_lower_bound = y_true >= y_lower return np.mean(satisfies_upper_bound * satisfies_lower_bound) def mpiw(y_lower, y_upper): """ Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the sharpness of intervals. Args: y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: the average width the prediction interval across samples. """ return np.mean(np.abs(y_lower - y_upper)) def auucc_gain(y_true, y_mean, y_lower, y_upper): """ Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference with constant band. Args: y_true: Ground truth y_mean: predicted mean y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: AUUCC gain """ u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper) auucc = u.get_AUUCC() assert(isinstance(auucc, list) and len(auucc) == 2), "Failed to calculate auucc gain" assert (not np.isclose(auucc[1], 0.)), "Failed to calculate auucc gain" auucc_gain = (auucc[1]-auucc[0])/auucc[0] return auucc_gain def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper): """ Computes Gaussian negative_log_likelihood assuming symmetric band around the mean. Args: y_true: Ground truth y_mean: predicted mean y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: nll """ y_std = (y_upper - y_lower) / 4.0 nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze())) return nll def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option="all", nll_fn=None): """ Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes the ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] metrics. Args: y_true: Ground truth y_mean: predicted mean y_lower: predicted lower bound y_upper: predicted upper bound option: string or list of string contained the name of the metrics to be computed. nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower. Returns: dict: dictionary containing the computed metrics. """ assert y_true.shape == y_mean.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_mean.shape) assert y_true.shape == y_lower.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_lower.shape) assert y_true.shape == y_upper.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_upper.shape) results = {} if not isinstance(option, list): if option == "all": option_list = ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] else: option_list = [option] if "rmse" in option_list: results["rmse"] = mean_squared_error(y_true, y_mean, squared=False) if "nll" in option_list: if nll_fn is None: nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper) results["nll"] = nll else: results["nll"] = np.mean(nll_fn(y_true)) if "auucc_gain" in option_list: gain = auucc_gain(y_true, y_mean, y_lower, y_upper) results["auucc_gain"] = gain if "picp" in option_list: results["picp"] = picp(y_true, y_lower, y_upper) if "mpiw" in option_list: results["mpiw"] = mpiw(y_lower, y_upper) if "r2" in option_list: results["r2"] = r2_score(y_true, y_mean) return results def _check_not_tuple_of_2_elements(obj, obj_name='obj'): """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError('%s must be a tuple of 2 elements.' % obj_name) def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7, ax=None, figsize=None, dpi=None, title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs): """ Plot the uncertainty distribution for a single distribution. Args: dist: scipy.stats._continuous_distns. A scipy distribution object. show_quantile_dots: boolean. Whether to show quantil dots on top of the density plot. qd_sample: int. Number of dots for the quantile dot plot. qd_bins: int. Number of bins for the quantile dot plot. ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. figsize: tuple of 2 elements or None, optional (default=None). Figure size. dpi : int or None, optional (default=None). Resolution of the figure. title : string or None, optional (default=Prediction Distribution) Axes title. If None, title is disabled. xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. xlabel : string or None, optional (default=Prediction) X-axis title label. If None, title is disabled. ylabel : string or None, optional (default=Density) Y-axis title label. If None, title is disabled. Returns: matplotlib.axes.Axes: ax : The plot with prediction distribution. """ import matplotlib.pyplot as plt if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100) ax.plot(x, dist.pdf(x), **kwargs) if show_quantile_dots: from matplotlib.patches import Circle from matplotlib.collections import PatchCollection import matplotlib.ticker as ticker data = dist.rvs(size=10000) p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample) x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf) # Create bins hist = np.histogram(x_, bins=qd_bins) bins, edges = hist radius = (edges[1] - edges[0]) / 2 ax2 = ax.twinx() patches = [] max_y = 0 for i in range(qd_bins): x_bin = (edges[i + 1] + edges[i]) / 2 y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])] max_y = max(y_bins) if max(y_bins) > max_y else max_y for _, y_bin in enumerate(y_bins): circle = Circle((x_bin, y_bin), radius) patches.append(circle) p = PatchCollection(patches, alpha=0.4) ax2.add_collection(p) # Axis tweek y_scale = (max_y + radius) / max(dist.pdf(x)) ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale)) ax2.yaxis.set_major_formatter(ticks_y) ax2.set_yticklabels([]) if xlims is not None: ax2.set_xlim(left=xlims[0], right=xlims[1]) else: ax2.set_xlim([min(x_) - radius, max(x) + radius]) ax2.set_ylim([0, max_y + radius]) ax2.set_aspect(1) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) return ax def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10, ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale="linear", title=None, xlabel=None, ylabel=None): """ Plot how prediction uncertainty varies across the entire range of a feature. Args: x_test: One dimensional ndarray. Feature column of the test dataset. y_test: One dimensional ndarray. Ground truth label of the test dataset. y_test_pred_lower_total: One dimensional ndarray. Lower bound of the total uncertainty range. y_test_pred_upper_total: One dimensional ndarray. Upper bound of the total uncertainty range. num_bins: int. Number of bins used to discritize x_test into equal-sample-sized bins. ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. figsize: tuple of 2 elements or None, optional (default=None). Figure size. dpi : int or None, optional (default=None). Resolution of the figure. xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``. xscale: Passed to ``ax.set_xscale()``. title : string or None, optional Axes title. If None, title is disabled. xlabel : string or None, optional X-axis title label. If None, title is disabled. ylabel : string or None, optional Y-axis title label. If None, title is disabled. Returns: matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature. """ from scipy.stats.mstats import mquantiles import matplotlib.pyplot as plt if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) x_uniques_sorted = np.sort(np.unique(x_test)) num_unique = len(x_uniques_sorted) sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test) if len(x_uniques_sorted) > 10: # bin the values q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:]) q_sample_bin_ids = np.digitize(x_test, q_bins) picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin], y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)]) unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins) picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)] picp_replicated = np.array([item for sublist in picp_replicated for item in sublist]) else: picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin], y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)]) picp_replicated = picps ax.plot(x_uniques_sorted, picp_replicated, label='PICP') ax.axhline(0.95, linestyle='--', label='95%') ax.set_ylabel('PICP') ax.legend(loc='best') if title is None: title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format( picp(y_test, y_test_pred_lower_total, y_test_pred_upper_total), mpiw(y_test_pred_lower_total, y_test_pred_upper_total)) if xlims is not None: ax.set_xlim(left=xlims[0], right=xlims[1]) if ylims is not None: ax.set_ylim(bottom=ylims[0], top=ylims[1]) ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if xscale is not None: ax.set_xscale(xscale) return ax def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total, y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None, ax=None, figsize=None, dpi=None, xlims=None, xscale="linear", title=None, xlabel=None, ylabel=None): """ Plot how prediction uncertainty varies across the entire range of a feature. Args: x_test: one dimensional ndarray. Feature column of the test dataset. y_test_pred_mean: One dimensional ndarray. Model prediction for the test dataset. y_test_pred_lower_total: One dimensional ndarray. Lower bound of the total uncertainty range. y_test_pred_upper_total: One dimensional ndarray. Upper bound of the total uncertainty range. y_test_pred_lower_epistemic: One dimensional ndarray. Lower bound of the epistemic uncertainty range. y_test_pred_upper_epistemic: One dimensional ndarray. Upper bound of the epistemic uncertainty range. ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. figsize: tuple of 2 elements or None, optional (default=None). Figure size. dpi : int or None, optional (default=None). Resolution of the figure. xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. xscale: Passed to ``ax.set_xscale()``. title : string or None, optional Axes title. If None, title is disabled. xlabel : string or None, optional X-axis title label. If None, title is disabled. ylabel : string or None, optional Y-axis title label. If None, title is disabled. Returns: matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature. """ import matplotlib.pyplot as plt if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) x_uniques_sorted = np.sort(np.unique(x_test)) y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2 agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted]) agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted]) ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction') ax.fill_between(x_uniques_sorted, agg_y_mean - 2.0 * agg_y_std, agg_y_mean + 2.0 * agg_y_std, alpha=0.3, label='total uncertainty') if y_test_pred_lower_epistemic is not None: y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2 agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted]) ax.fill_between(x_uniques_sorted, agg_y_mean - 2.0 * agg_y_std_epistemic, agg_y_mean + 2.0 * agg_y_std_epistemic, alpha=0.3, label='model uncertainty') ax.legend(loc='best') if xlims is not None: ax.set_xlim(left=xlims[0], right=xlims[1]) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if xscale is not None: ax.set_xscale(xscale) return ax
classification_metrics.py
import numpy as np import pandas as pd from scipy.stats import entropy from sklearn.metrics import roc_auc_score, log_loss, accuracy_score def entropy_based_uncertainty_decomposition(y_prob_samples): """ Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components. References: .. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference on Machine Learning (pp. 1184-1193). PMLR. Args: y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities corresponding to samples from the model posterior. Returns: tuple: - total_uncertainty: entropy of the predictive distribution. - aleatoric_uncertainty: aleatoric component of the total_uncertainty. - epistemic_uncertainty: epistemic component of the total_uncertainty. """ y_preds_samples_stacked = np.stack(y_prob_samples) preds_mean = np.mean(y_preds_samples_stacked, 0) total_uncertainty = entropy(preds_mean, axis=1) aleatoric_uncertainty = np.mean( np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1), axis=1) epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty def multiclass_brier_score(y_true, y_prob): """Brier score for multi-class. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. Returns: float: Brier score. """ assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" y_target = np.zeros_like(y_prob) y_target[:, y_true] = 1.0 return np.mean(np.sum((y_target - y_prob) ** 2, axis=1)) def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score, attributes=None, num_bins=10, subgroup_ids=None, return_counts=False): """ Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where coverage instead of rejection rate is used. References: .. [3] Franc, Vojtech, and Daniel Prusa. "On discriminative learning of prediction uncertainty." In International Conference on Machine Learning, pp. 1963-1971. 2019. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like of shape (n_samples,) predicted labels. selection_scores: scores corresponding to certainty in the predicted labels. risk_func: risk function under consideration. attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. num_bins: number of bins. subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids. return_counts: set to True to return counts also. Returns: float or tuple: - aurrrc (float): area under risk rejection rate curve. - rejection_rates (list): rejection rates for each bin (returned only if return_counts is True). - selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True). - risks (list): risk in each bin (returned only if return_counts is True). """ if selection_scores is None: assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)] if y_pred is None: assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" y_pred = np.argmax(y_prob, axis=1) order = np.argsort(selection_scores)[::-1] rejection_rates = [] selection_thresholds = [] risks = [] for bin_id in range(num_bins): samples_in_bin = len(y_true) // num_bins selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]] selection_thresholds.append(selection_threshold) ids = selection_scores >= selection_threshold if sum(ids) > 0: if attributes is None: if isinstance(y_true, pd.Series): y_true_numpy = y_true.values else: y_true_numpy = y_true if subgroup_ids is None: risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids]) else: if sum(subgroup_ids & ids) > 0: risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids]) else: risk_value = 0.0 else: risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes) else: risk_value = 0.0 risks.append(risk_value) rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true)) aurrrc = np.nanmean(risks) if not return_counts: return aurrrc else: return aurrrc, rejection_rates, selection_thresholds, risks def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False): """ Computes the reliability curve and the expected calibration error [1]_ . References: .. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference on Machine Learning, PMLR 70:1321-1330, 2017. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like of shape (n_samples,) predicted labels. num_bins: number of bins. return_counts: set to True to return counts also. Returns: float or tuple: - ece (float): expected calibration error. - confidences_in_bins: average confidence in each bin (returned only if return_counts is True). - accuracies_in_bins: accuracy in each bin (returned only if return_counts is True). - frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True). """ assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" num_samples, num_classes = y_prob.shape top_scores = np.max(y_prob, axis=1) if y_pred is None: y_pred = np.argmax(y_prob, axis=1) if num_classes == 2: bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0)) else: bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0)) non_boundary_bin_edges = bins_edges[1:-1] bin_centers = (bins_edges[1:] + bins_edges[:-1])/2 sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges) num_samples_in_bins = np.zeros(num_bins) accuracies_in_bins = np.zeros(num_bins) confidences_in_bins = np.zeros(num_bins) for bin in range(num_bins): num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin]) if num_samples_in_bins[bin] > 0: accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin] confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin] ece = np.sum( num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples ) frac_samples_in_bins = num_samples_in_bins / num_samples if not return_counts: return ece else: return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers def compute_classification_metrics(y_true, y_prob, option='all'): """ Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes the [aurrrc, ece, auroc, nll, brier, accuracy] metrics. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. option: string or list of string contained the name of the metrics to be computed. Returns: dict: a dictionary containing the computed metrics. """ results = {} if not isinstance(option, list): if option == "all": option_list = ["aurrrc", "ece", "auroc", "nll", "brier", "accuracy"] else: option_list = [option] if "aurrrc" in option_list: results["aurrrc"] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob) if "ece" in option_list: results["ece"] = expected_calibration_error(y_true=y_true, y_prob=y_prob) if "auroc" in option_list: results["auroc"], _ = roc_auc_score(y_true=y_true, y_score=y_prob) if "nll" in option_list: results["nll"] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) if "brier" in option_list: results["brier"] = multiclass_brier_score(y_true=y_true, y_prob=y_prob) if "accuracy" in option_list: results["accuracy"] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) return results def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""], num_bins=10): """ Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves can be plot by passing data as lists. Args: y_true: array-like or or a list of array-like of shape (n_samples,) ground truth labels. y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like or or a list of array-like of shape (n_samples,) predicted labels. plot_label: (optional) list of names identifying each curve. num_bins: number of bins. Returns: tuple: - ece_list: ece: list containing expected calibration error for each curve. - accuracies_in_bins_list: list containing binned average accuracies for each curve. - frac_samples_in_bins_list: list containing binned sample frequencies for each curve. - confidences_in_bins_list: list containing binned average confidence for each curve. """ import matplotlib.pyplot as plt if not isinstance(y_true, list): y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred] if len(plot_label) != len(y_true): raise ValueError('y_true and plot_label should be of same length.') ece_list = [] accuracies_in_bins_list = [] frac_samples_in_bins_list = [] confidences_in_bins_list = [] for idx in range(len(plot_label)): ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx], y_prob[idx], y_pred[idx], num_bins=num_bins, return_counts=True) ece_list.append(ece) accuracies_in_bins_list.append(accuracies_in_bins) frac_samples_in_bins_list.append(frac_samples_in_bins) confidences_in_bins_list.append(confidences_in_bins) fig = plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) for idx in range(len(plot_label)): plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx]) plt.title("Confidence Histogram") plt.xlabel("Confidence") plt.ylabel("Fraction of Samples") plt.grid() plt.ylim([0.0, 1.0]) plt.legend() plt.subplot(1, 2, 2) for idx in range(len(plot_label)): plt.plot(bins, accuracies_in_bins_list[idx], 'o-', label="{} ECE = {:.2f}".format(plot_label[idx], ece_list[idx])) plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label="Perfect Calibration") plt.title("Reliability Plot") plt.xlabel("Confidence") plt.ylabel("Accuracy") plt.grid() plt.legend() plt.show() return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""], risk_func=None, attributes=None, num_bins=10, subgroup_ids=None): """ Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves can be plot by passing data as lists. Args: y_true: array-like or or a list of array-like of shape (n_samples,) ground truth labels. y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like or or a list of array-like of shape (n_samples,) predicted labels. selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels. risk_func: risk function under consideration. attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. num_bins: number of bins. subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a subgroup of the samples specified by subgroup_ids. Returns: tuple: - aurrrc_list: list containing the area under risk rejection rate curves. - rejection_rate_list: list containing the binned rejection rates. - selection_thresholds_list: list containing the binned selection thresholds. - risk_list: list containing the binned risks. """ import matplotlib.pyplot as plt if not isinstance(y_true, list): y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids] if len(plot_label) != len(y_true): raise ValueError('y_true and plot_label should be of same length.') aurrrc_list = [] rejection_rate_list = [] risk_list = [] selection_thresholds_list = [] for idx in range(len(plot_label)): aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve( y_true[idx], y_prob[idx], y_pred[idx], selection_scores=selection_scores[idx], risk_func=risk_func, attributes=attributes, num_bins=num_bins, subgroup_ids=subgroup_ids[idx], return_counts=True ) aurrrc_list.append(aursrc) rejection_rate_list.append(rejection_rates) risk_list.append(risks) selection_thresholds_list.append(selection_thresholds) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) for idx in range(len(plot_label)): plt.plot(rejection_rate_list[idx], risk_list[idx], label="{} AURRRC={:.5f}".format(plot_label[idx], aurrrc_list[idx])) plt.legend(loc="best") plt.xlabel("Rejection Rate") if risk_func is None: ylabel = "Prediction Error Rate" else: if 'accuracy' in risk_func.__name__: ylabel = "1.0 - " + risk_func.__name__ else: ylabel = risk_func.__name__ plt.ylabel(ylabel) plt.title("Risk vs Rejection Rate Plot") plt.grid() plt.subplot(1, 2, 2) for idx in range(len(plot_label)): plt.plot(selection_thresholds_list[idx], risk_list[idx], label="{}".format(plot_label[idx])) plt.legend(loc="best") plt.xlabel("Selection Threshold") if risk_func is None: ylabel = "Prediction Error Rate" else: if 'accuracy' in risk_func.__name__: ylabel = "1.0 - " + risk_func.__name__ else: ylabel = risk_func.__name__ plt.ylabel(ylabel) plt.title("Risk vs Selection Threshold Plot") plt.grid() plt.show() return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list