file
stringlengths 6
44
| content
stringlengths 38
162k
|
---|---|
register.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class register():
def __init__(self, importer, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.importer = importer
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','metrics': 'metrics.json','production': 'production.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self, models=None):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = utils.read_json(config_file)\
\n return config\
"
return text
def addLocalFunctionsCode(self, models):
self.function_code += self.__addValidateConfigCode(models)
def addPrefixCode(self, smaller_is_better=False, indent=1):
compare = 'min' if smaller_is_better else 'max'
self.codeText += f"""
def get_best_model(run_path):
models_path = [d for d in run_path.iterdir() if d.is_dir]
scores = {{}}
for model in models_path:
metrics = utils.read_json(model/IOFiles['metrics'])
if metrics.get('score', None):
scores[model.stem] = metrics['score']
best_model = {compare}(scores, key=scores.get)
return best_model
def __merge_logs(log_file_sequence,path, files):
if log_file_sequence['first'] in files:
with open(path/log_file_sequence['first'], 'r') as f:
main_log = f.read()
files.remove(log_file_sequence['first'])
for file in files:
with open(path/file, 'r') as f:
main_log = main_log + f.read()
(path/file).unlink()
with open(path/log_file_sequence['merged'], 'w') as f:
f.write(main_log)
def merge_log_files(folder, models):
log_file_sequence = {{
'first': 'aion.log',
'merged': 'aion.log'
}}
log_file_suffix = '_aion.log'
log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()]
log_files.append(log_file_sequence['first'])
__merge_logs(log_file_sequence, folder, log_files)
def register(config, targetPath, log):
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {{meta_data_file}}')
run_id = meta_data['monitoring']['runId']
usecase = config['targetPath']
current_run_path = targetPath/'runs'/str(run_id)
register_model_name = get_best_model(current_run_path)
models = config['models']
merge_log_files(targetPath, models)
meta_data['register'] = {{'runId':run_id, 'model': register_model_name}}
utils.write_json(meta_data, targetPath/IOFiles['metaData'])
utils.write_json({{'Model':register_model_name,'runNo':str(run_id)}}, targetPath/IOFiles['production'])
status = {{'Status':'Success','Message':f'Model Registered: {{register_model_name}}'}}
log.info(f'output: {{status}}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'json'}
]
return modules
def addMainCode(self, models, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(register(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addStatement(self, statement, indent=1):
self.codeText += f"\n{self.tab * indent}{statement}"
def getCode(self, indent=1):
return self.function_code + '\n' + self.codeText
|
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from mlac.timeseries.core.imports import importModule
from mlac.timeseries.core.load_data import tabularDataReader
from mlac.timeseries.core.transformer import transformer as profiler
from mlac.timeseries.core.selector import selector
from mlac.timeseries.core.trainer import learner
from mlac.timeseries.core.register import register
from mlac.timeseries.core.deploy import deploy
from mlac.timeseries.core.drift_analysis import drift
from mlac.timeseries.core.functions import global_function
from mlac.timeseries.core.data_reader import data_reader
from mlac.timeseries.core.utility import utility_function
|
load_data.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class tabularDataReader():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.function_code = ''
self.codeText = ''
self.code_generated = False
def getInputFiles(self):
IOFiles = {
"rawData": "rawData.dat",
"metaData" : "modelMetaData.json",
"log" : "aion.log",
"outputData" : "rawData.dat",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
text = 'IOFiles = '
if not IOFiles:
text += '{ }'
else:
text += json.dumps(IOFiles, indent=4)
return text
def getOutputFiles(self):
output_files = {
'metaData' : 'modelMetaData.json',
'log' : 'aion.log',
'outputData' : 'rawData.dat'
}
text = 'output_file = '
if not output_files:
text += '{ }'
else:
text += json.dumps(output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\n'
text += self.getInputFiles()
if indent:
text = text.replace('\n', self.tab * indent + '\n')
return text
def __addValidateConfigCode(self):
text = "\n\
\ndef validateConfig():\
\n config_file = Path(__file__).parent/'config.json'\
\n if not Path(config_file).exists():\
\n raise ValueError(f'Config file is missing: {config_file}')\
\n config = read_json(config_file)\
\n if not config['targetPath']:\
\n raise ValueError(f'Target Path is not configured')\
\n return config"
return text
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if not meta_data_file.exists():
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(load_data(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addLoadDataCode(self):
self.codeText += """
#This function will read the data and save the data on persistent storage
def load_data(config, targetPath, log):
meta_data_file = targetPath / IOFiles['metaData']
meta_data = read_json(meta_data_file)
if meta_data.get('monitoring', False) and not meta_data['monitoring'].get('retrain', False):
raise ValueError('New data is not enougth to retrain model')
df = read_data(config['dataLocation'])
status = {}
output_data_path = targetPath / IOFiles['outputData']
log.log_dataframe(df)
required_features = list(set(config['selected_features'] + config['dateTimeFeature'] + config['target_feature']))
log.info('Dataset features required: ' + ','.join(required_features))
missing_features = [x for x in required_features if x not in df.columns.tolist()]
if missing_features:
raise ValueError(f'Some feature/s is/are missing: {missing_features}')
log.info('Removing unused features: ' + ','.join(list(set(df.columns) - set(required_features))))
df = df[required_features]
log.info(f'Required features: {required_features}')
try:
log.info(f'Saving Dataset: {str(output_data_path)}')
write_data(df, output_data_path, index=False)
status = {'Status': 'Success', 'DataFilePath': IOFiles['outputData'], 'Records': len(df)}
except:
raise ValueError('Unable to create data file')
meta_data['load_data'] = {}
meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if
x != config['target_feature']]
meta_data['load_data']['Status'] = status
write_json(meta_data, meta_data_file)
output = json.dumps(status)
log.info(output)
return output
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\n' + self.codeText
|
deploy.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_deploy_params(config):
param_keys = ["modelVersion","problem_type","target_feature","lag_order","noofforecasts"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['ipAddress'] = '127.0.0.1'
data['portNo'] = '8094'
return data
def import_trainer_module(importer):
non_sklearn_modules = get_variable('non_sklearn_modules')
if non_sklearn_modules:
for mod in non_sklearn_modules:
module = get_module_mapping(mod)
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
imported_modules = [
]
def run_deploy(config):
generated_files = []
importer = importModule()
deployer = deploy()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelServing'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('Prediction')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
importModules(importer,deployer.getPredictionCodeModules())
code = file_header(usecase)
code += importer.getCode()
code += deployer.getInputOutputFiles()
deployer.addPredictionCode()
code += deployer.getCode()
# create prediction file
with open(deploy_path/"predict.py", 'w') as f:
f.write(code)
generated_files.append("predict.py")
# create create service file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + deployer.getServiceCode())
generated_files.append("aionCode.py")
importer.addModule('seaborn')
importer.addModule('sklearn')
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
config_file = deploy_path/"config.json"
config_data = get_deploy_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('Prediction', deploy_path,config['modelName'], generated_files) |
trainer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from mlac.timeseries.app import utility as utils
def get_model_name(algo, method):
if method == 'modelBased':
return algo + '_' + 'MLBased'
if method == 'statisticalBased':
return algo + '_' + 'StatisticsBased'
else:
return algo
def get_training_params(config, algo):
param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param","dateTimeFeature"]#BugID:13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['algorithms'] = {algo: config['algorithms'][algo]}
data['targetPath'] = config['modelName']
return data
def update_score_comparer(scorer):
smaller_is_better_scorer = ['neg_mean_squared_error','mse','neg_root_mean_squared_error','rmse','neg_mean_absolute_error','mae']
if scorer.lower() in smaller_is_better_scorer:
utils.update_variable('smaller_is_better', True)
else:
utils.update_variable('smaller_is_better', False)
def run_trainer(config):
trainer = learner()
importer = importModule()
function = global_function()
utils.importModules(importer,trainer.getPrefixModules())
update_score_comparer(config['scoring_criteria'])
model_name = list(config['algorithms'].keys())[0]
if model_name == 'MLP':
utils.importModules(importer,trainer.getMlpCodeModules())
trainer.addMlpCode()
elif model_name == 'LSTM':
utils.importModules(importer,trainer.getLstmCodeModules())
trainer.addLstmCode()
trainer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name)
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('train')
with open(deploy_path/"utility.py", 'w') as f:
f.write(utils.file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(utils.file_header(usecase))
generated_files.append("__init__.py")
importer.addModule("warnings")
code = importer.getCode()
code += 'warnings.filterwarnings("ignore")\n'
code += f"\nmodel_name = '{model_name}'\n"
utils.append_variable('models_name',model_name)
out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','metrics':'metrics.json','metaDataOutput':f'{model_name}_modelMetaData.json','production':'production.json'}
trainer.addOutputFiles(out_files)
code += trainer.getInputOutputFiles()
code += function.getCode()
trainer.addLocalFunctionsCode()
code += trainer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_training_params(config, model_name), f, indent=4)
generated_files.append("config.json")
utils.create_docker_file('train', deploy_path,config['modelName'], generated_files)
|
selector.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
output_file_map = {
'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'}
}
def get_selector_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_selector(config):
select = selector()
importer = importModule()
function = global_function()
importModules(importer,select.getPrefixModules())
importModules(importer, select.getSuffixModules())
importModules(importer, select.getMainCodeModules())
select.addPrefixCode()
select.addSuffixCode()
select.addMainCode()
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('selector')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += select.getInputOutputFiles()
code += function.getCode()
select.addLocalFunctionsCode()
code += select.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_selector_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('selector', deploy_path,config['modelName'], generated_files) |
utility.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import datetime
from pathlib import Path
variables = {}
def update_variable(name, value):
variables[name] = value
def get_variable(name, default=None):
return variables.get(name, default)
def append_variable(name, value):
data = get_variable(name)
if not data:
update_variable(name, [value])
elif not isinstance(data, list):
update_variable(name, [data, value])
else:
data.append(value)
update_variable(name, data)
def addDropFeature(feature, features_list, coder, indent=1):
coder.addStatement(f'if {feature} in {features_list}:', indent=indent)
coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1)
def importModules(importer, modules_list):
for module in modules_list:
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
def file_header(use_case, module_name=None):
time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ')
text = "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n"
return text + f"'''\nThis file is automatically generated by AION for {use_case} usecase.\nFile generation time: {time_str}\n'''"
def get_module_mapping(module):
mapping = {
"LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'}
,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'}
,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'}
,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'}
,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'}
,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'}
,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'}
,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'}
,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'}
,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'}
,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'}
,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'}
,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'}
,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'}
,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'}
,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'}
,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'}
,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'}
}
return mapping.get(module, None)
def create_docker_file(name, path,usecasename,files=[],text_feature=False):
text = ""
if name == 'load_data':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'transformer':
text='FROM python:3.8-slim-buster\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
if text_feature:
text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\n'
text+='\n'
text+='''RUN \
'''
if text_feature:
text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\ &&
'''
text+=''' pip install --no-cache-dir -r requirements.txt\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\
'''
text+='\n'
elif name == 'selector':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'train':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'register':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'Prediction':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
if text_feature:
text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\n'
text+='''RUN \
'''
if text_feature:
text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\ &&
'''
text+='''pip install --no-cache-dir -r requirements.txt\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\
'''
text+='\n'
text+='ENTRYPOINT ["python", "aionCode.py","-ip","0.0.0.0","-pn","8094"]\n'
elif name == 'input_drift':
text='FROM python:3.8-slim-buster'
text+='\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\n'
for file in files:
text+=f'\nCOPY {file} {file}'
text+='\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
file_name = Path(path)/'Dockerfile'
with open(file_name, 'w') as f:
f.write(text) |
drift_analysis.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_drift_params(config):
param_keys = ["modelVersion","problem_type","retrainThreshold","dataLocation"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_drift_analysis(config):
importer = importModule()
monitor = drift()
monitor.addLocalFunctionsCode()
monitor.addPrefixCode()
monitor.addMainCode()
importModules(importer, monitor.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelMonitoring'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += '\nwarnings.filterwarnings("ignore")\n'
code += monitor.getInputOutputFiles()
code += monitor.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_drift_params(config), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('input_drift', deploy_path,config['modelName'], generated_files)
|
transformer.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
output_file_map = {
'text' : {'text' : 'text_profiler.pkl'},
'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'},
'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'},
'normalizer' : {'normalizer' : 'normalizer.pkl'}
}
def add_common_imports(importer):
common_importes = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
def get_transformer_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio","dateTimeFeature"] #BugID:13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_transformer(config):
transformer = profiler()
importer = importModule()
function = global_function()
importModules(importer, transformer.getPrefixModules())
importer.addModule('warnings')
transformer.addPrefixCode()
importModules(importer, transformer.getMainCodeModules())
transformer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('transformer')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += "\nimport os\nos.path.abspath(os.path.join(__file__, os.pardir))\n" #chdir to import from current dir
code += importer.getCode()
code += '\nwarnings.filterwarnings("ignore")\n'
code += transformer.getInputOutputFiles()
code += function.getCode()
transformer.addLocalFunctionsCode()
code += transformer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_transformer_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('transformer', deploy_path,config['modelName'], generated_files)
|
register.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_register_params(config, models):
param_keys = ["modelVersion","problem_type"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['models'] = models
return data
def run_register(config):
importer = importModule()
registration = register(importer)
models = get_variable('models_name')
smaller_is_better = get_variable('smaller_is_better', False)
registration.addLocalFunctionsCode(models)
registration.addPrefixCode(smaller_is_better)
registration.addMainCode(models)
importModules(importer, registration.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('register')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += '\nwarnings.filterwarnings("ignore")\n'
code += registration.getInputOutputFiles()
code += registration.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_register_params(config, models), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('register', deploy_path,config['modelName'], generated_files)
|
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .load_data import run_loader
from .transformer import run_transformer
from .selector import run_selector
from .trainer import run_trainer
from .register import run_register
from .deploy import run_deploy
from .drift_analysis import run_drift_analysis
|
load_data.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_load_data_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features","dateTimeFeature","dataLocation"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_loader(config):
generated_files = []
importer = importModule()
loader = tabularDataReader()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += loader.getInputOutputFiles()
loader.addLocalFunctionsCode()
loader.addLoadDataCode()
loader.addMainCode()
code += loader.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_load_data_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('load_data', deploy_path,config['modelName'],generated_files) |
__init__.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
from .load_data import tabularDataReader
from .transformer import transformer as profiler
from .selector import selector
from .trainer import learner
from .deploy import deploy
from .functions import global_function
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
baseline.py | import joblib
import pandas as pd
import sys
import math
import time
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
import argparse
import json
def mltesting(modelfile,datafile,features,target):
model = joblib.load(modelfile)
ProblemName = model.__class__.__name__
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']:
Problemtype = 'Classification'
elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']:
Problemtype = 'Regression'
else:
Problemtype = 'Unknown'
if Problemtype == 'Classification':
Params = model.get_params()
try:
df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True)
if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC':
features = model.feature_names_in_
elif ProblemName == 'XGBClassifier':
features = model.get_booster().feature_names
elif ProblemName == 'LGBMClassifier':
features = model.feature_name_
elif ProblemName == 'CatBoostClassifier':
features = model.feature_names_
modelfeatures = features
dfp = df[modelfeatures]
tar = target
target = df[tar]
predic = model.predict(dfp)
output = {}
matrixconfusion = pd.DataFrame(confusion_matrix(predic,target))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose()
classificationreport = round(classificationreport,2)
classificationreport = classificationreport.to_json(orient='index')
output["Precision"] = "%.2f" % precision_score(target, predic,average='weighted')
output["Recall"] = "%.2f" % recall_score(target, predic,average='weighted')
output["Accuracy"] = "%.2f" % accuracy_score(target, predic)
output["ProblemName"] = ProblemName
output["Status"] = "Success"
output["Params"] = Params
output["Problemtype"] = Problemtype
output["Confusionmatrix"] = matrixconfusion
output["classificationreport"] = classificationreport
# import statistics
# timearray = []
# for i in range(0,5):
# start = time.time()
# predic1 = model.predict(dfp.head(1))
# end = time.time()
# timetaken = (round((end - start) * 1000,2),'Seconds')
# timearray.append(timetaken)
# print(timearray)
start = time.time()
for i in range(0,5):
predic1 = model.predict(dfp.head(1))
end = time.time()
timetaken = (round((end - start) * 1000,2),'Seconds')
# print(timetaken)
start1 = time.time()
for i in range(0,5):
predic2 = model.predict(dfp.head(10))
end1 = time.time()
timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds')
# print(timetaken1)
start2 = time.time()
for i in range(0,5):
predic3 = model.predict(dfp.head(100))
end2 = time.time()
timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds')
# print(timetaken2)
output["onerecord"] = timetaken
output["tenrecords"] = timetaken1
output["hundrecords"] = timetaken2
print(json.dumps(output))
except Exception as e:
output = {}
output['Problemtype']='Classification'
output['Status']= "Fail"
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\n Problem Type : Classification \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n'))
print(output["Msg"])
print(json.dumps(output))
elif Problemtype == 'Regression':
Params = model.get_params()
try:
df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True)
if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor':
features = model.feature_names_in_
elif ProblemName == 'XGBRegressor':
features = model.get_booster().feature_names
elif ProblemName == 'LGBMRegressor':
features = model.feature_name_
elif ProblemName == 'CatBoostRegressor':
features = model.feature_names_
modelfeatures = features
dfp = df[modelfeatures]
tar = target
target = df[tar]
predict = model.predict(dfp)
mse = mean_squared_error(target, predict)
mae = mean_absolute_error(target, predict)
rmse = math.sqrt(mse)
r2 = r2_score(target,predict,multioutput='variance_weighted')
output = {}
output["MSE"] = "%.2f" % mean_squared_error(target, predict)
output["MAE"] = "%.2f" % mean_absolute_error(target, predict)
output["RMSE"] = "%.2f" % math.sqrt(mse)
output["R2"] = "%.2f" %r2_score(target,predict,multioutput='variance_weighted')
output["ProblemName"] = ProblemName
output["Problemtype"] = Problemtype
output["Params"] = Params
output['Status']='Success'
start = time.time()
predic1 = model.predict(dfp.head(1))
end = time.time()
timetaken = (round((end - start) * 1000,2) ,'Seconds')
# print(timetaken)
start1 = time.time()
predic2 = model.predict(dfp.head(10))
end1 = time.time()
timetaken1 = (round((end1 - start1) * 1000,2),'Seconds')
# print(timetaken1)
start2 = time.time()
predic3 = model.predict(dfp.head(100))
end2 = time.time()
timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds')
# print(timetaken2)
output["onerecord"] = timetaken
output["tenrecords"] = timetaken1
output["hundrecords"] = timetaken2
print(json.dumps(output))
except Exception as e:
output = {}
output['Problemtype']='Regression'
output['Status']='Fail'
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\n Problem Type : Regression \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n'))
print(json.dumps(output))
else:
output = {}
output['Problemtype']='Unknown'
output['Status']='Fail'
output['Params'] = ''
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\n Error : {}'.format(ProblemName, 'Model not supported')
print(json.dumps(output))
return(json.dumps(output))
def baseline_testing(modelFile,csvFile,features,target):
features = [x.strip() for x in features.split(',')]
return mltesting(modelFile,csvFile,features,target) |
uq_interface.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#from sklearn.externals import joblib
import joblib
# import pyreadstat
# import sys
# import math
import time
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
import argparse
import json
import os
import pathlib
from tensorflow.keras.models import load_model
# from tensorflow.keras import backend as K
import tensorflow as tf
# from sklearn.decomposition import LatentDirichletAllocation
from pathlib import Path
#from aionUQ import aionUQ
from uq_main import aionUQ
import os
from datetime import datetime
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument('savFile')
parser.add_argument('csvFile')
parser.add_argument('features')
parser.add_argument('target')
args = parser.parse_args()
from appbe.dataPath import DEPLOY_LOCATION
if ',' in args.features:
args.features = [x.strip() for x in args.features.split(',')]
else:
args.features = args.features.split(",")
models = args.savFile
if Path(models).is_file():
# if Path(args.savFile.is_file()):
model = joblib.load(args.savFile)
# print(model.__class__.__name__)
# print('class:',model.__class__)
# print(type(model).__name__)
# try:
# print('Classess=',model.classes_)
# except:
# print("Classess=N/A")
# print('params:',model.get_params())
# try:
# print('fea_imp =',model.feature_importances_)
# except:
# print("fea_imp =N/A")
ProblemName = model.__class__.__name__
Params = model.get_params()
# print("ProblemName: \n",ProblemName)
# print("Params: \n",Params)
# print('ProblemName:',model.__doc__)
# print(type(ProblemName))
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']:
Problemtype = 'Classification'
else :
Problemtype = 'Regression'
if Problemtype == 'Classification':
df = pd.read_csv(args.csvFile)
object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object']
df = df.drop(object_cols, axis=1)
df = df.dropna(axis=1)
df = df.reset_index(drop=True)
modelfeatures = args.features
# dfp = df[modelfeatures]
tar = args.target
# target = df[tar]
y=df[tar]
X = df.drop(tar, axis=1)
#for dummy test,train values pass
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar)
#accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,"uqtest")
accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification()
# print("UQ Classification: \n",output_jsonobject)
print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per)
print("End of UQ Classification.\n")
else:
df = pd.read_csv(args.csvFile)
modelfeatures = args.features
# print("modelfeatures: \n",modelfeatures)
# print("type modelfeatures: \n",type(modelfeatures))
dfp = df[modelfeatures]
tar = args.target
target = df[tar]
#Not used, just dummy X,y split
y=df[tar]
X = df.drop(tar, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression()
print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject)
print("End of UQ reg\n")
elif Path(models).is_dir():
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
model = load_model(models)
ProblemName = model.__class__.__name__
Problemtype = 'Classification'
# print('class:',model.__class__)
# print('class1',model.__class__.__name__)
# print(model.summary())
# print('ProblemName1:',model.get_config())
def Params(model: tf.keras.Model):
Params = []
model.Params(print_fn=lambda x: Params.append(x))
return '\n'.join(Params)
df = pd.read_csv(args.csvFile)
modelfeatures = args.features
dfp = df[modelfeatures]
tar = args.target
target = df[tar]
df3 = dfp.astype(np.float32)
predic = model.predict(df3)
if predic.shape[-1] > 1:
predic = np.argmax(predic, axis=-1)
else:
predic = (predic > 0.5).astype("int32")
matrixconfusion = pd.DataFrame(confusion_matrix(predic,target))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose()
classificationreport = round(classificationreport,2)
classificationreport = classificationreport.to_json(orient='index')
output = {}
output["Precision"] = "%.3f" % precision_score(target, predic,average='weighted')
output["Recall"] = "%.3f" % recall_score(target, predic,average='weighted')
output["Accuracy"] = "%.3f" % accuracy_score(target, predic)
output["ProblemName"] = ProblemName
output["Params"] = Params
output["Problemtype"] = Problemtype
output["Confusionmatrix"] = matrixconfusion
output["classificationreport"] = classificationreport
print(json.dumps(output))
|
aionUQ.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from matplotlib import pyplot
import sys
import os
import json
import matplotlib.pyplot as plt
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
from uq360.algorithms.ucc_recalibration import UCCRecalibration
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
from uq360.metrics.regression_metrics import compute_regression_metrics
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
# from math import sqrt
from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error
# from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature
from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature
#Added libs from MLTest
import sys
import time
from sklearn.metrics import confusion_matrix
from pathlib import Path
import logging
# import json
class aionUQ:
# def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model):
def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation):
# #printprint("Inside aionUQ \n")
try:
#print("Inside aionUQ init\n ")
self.data=df
self.dfFeatures=dfp
self.uqconfig_base=Params
self.uqconfig_meta=Params
self.targetFeature=targetfeature
self.target=target
self.selectedfeature=modelfeatures
self.y=self.target
self.X=self.dfFeatures
self.log = logging.getLogger('eion')
self.basemodel=model
self.model_name=ProblemName
self.Deployment = os.path.join(deployLocation,'log','UQ')
os.makedirs(self.Deployment,exist_ok=True)
self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph')
os.makedirs(self.uqgraphlocation,exist_ok=True)
except Exception as e:
self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus):
from sklearn.model_selection import train_test_split
# To get each class values and uncertainty
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest
# y_val = y_train.append(y_test)
else:
# y_val = self.y
df=self.data
y=df[self.targetFeature]
X = df.drop(self.targetFeature, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# from sklearn.tree import DecisionTreeRegressor
# from sklearn.linear_model import LinearRegression,Lasso,Ridge
# from sklearn import linear_model
# from sklearn.ensemble import RandomForestRegressor
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
y_hat_total_mean=np.mean(y_hat)
y_hat_lb_total_mean=np.mean(y_hat_lb)
y_hat_ub_total_mean=np.mean(y_hat_ub)
mpiw_20_per=(y_hat_total_mean*20/100)
mpiw_lower_range = y_hat_total_mean - mpiw_20_per
mpiw_upper_range = y_hat_total_mean + mpiw_20_per
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw))
self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range))
self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range))
self.log.info('Model total picp_percentage : '+str(picp_percentage))
return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range
def display_results(self,X_test, y_test, y_mean, y_lower, y_upper):
try:
global x_feature,y_feature
if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)):
x_feature=''.join(map(str, self.selectedfeature))
else:
x_feature= str(self.selectedfeature)
# self.selectedfeature=str(self.selectedfeature)
X_test=np.squeeze(X_test)
y_feature=str(self.targetFeature)
pred_dict = {x_feature: X_test,
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
pred_df_sorted = pred_df.sort_values(by=x_feature)
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound')
plt.legend()
plt.xlabel(x_feature)
plt.ylabel(y_feature)
plt.title('UQ Confidence Interval Plot.')
# plt.savefig('uq_test_plt.png')
if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'):
os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png')
plt.savefig(str(self.Deployment)+'/uq_test_plt.png')
plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png')
plt.clf()
plt.cla()
plt.close()
pltreg=plot_picp_by_feature(X_test, y_test,
y_lower, y_upper,
xlabel=x_feature)
#pltreg.savefig('x.png')
pltr=pltreg.figure
if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'):
os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png')
pltr.savefig(str(self.Deployment)+'/picp_per_feature.png')
pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
except Exception as e:
# #print("display exception: \n",e)
self.log.info('<!------------- UQ model Display Error ---------------> '+str(e))
def classUncertainty(self,pred,score):
try:
outuq = {}
classes = np.unique(pred)
for c in classes:
ids = pred == c
class_score = score[ids]
predc = 'Class_'+str(c)
outuq[predc]=np.mean(class_score)
x = np.mean(class_score)
#Uncertaininty in percentage
x=x*100
self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x)))
return outuq
except Exception as e:
# #print("display exception: \n",e)
self.log.info('<!------------- UQ classUncertainty Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus):
try:
# print("Inside uqMain_BBMClassification\n")
# print("lenth of x_train {}, x_test {}, y_train {}, y_test {}".format(x_train, x_test, y_train, y_test))
aionstatus = str(aionstatus)
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
else:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification
from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.neighbors import KNeighborsClassifier
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
#print(model_name)
try:
#geting used features
model_used_features=self.basemodel.feature_names_in_
self.log.info("Base model used training features are (UQ Testing): \n"+str(model_used_features))
except:
pass
model_params=self.basemodel.get_params()
uq_scoring_param='accuracy'
basemodel=None
if (model_name == "GradientBoostingClassifier"):
basemodel=GradientBoostingClassifier
elif (model_name == "SGDClassifier"):
basemodel=SGDClassifier
elif (model_name == "GaussianNB"):
basemodel=GaussianNB
elif (model_name == "DecisionTreeClassifier"):
basemodel=DecisionTreeClassifier
elif(model_name == "RandomForestClassifier"):
basemodel=RandomForestClassifier
elif (model_name == "SVC"):
basemodel=SVC
elif(model_name == "KNeighborsClassifier"):
basemodel=KNeighborsClassifier
elif(model_name.lower() == "logisticregression"):
basemodel=LogisticRegression
elif(model_name == "XGBClassifier"):
basemodel=XGBClassifier
elif(model_name == "LGBMClassifier"):
basemodel=LGBMClassifier
else:
basemodel=LogisticRegression
calibrated_mdl=None
if (model_name == "SVC"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SVC(**model_params)
calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base = calibrated_mdl.predict_proba(X_test)[:, :]
elif (model_name == "SGDClassifier"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SGDClassifier(**model_params)
calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base = calibrated_mdl.predict_proba(X_test)[:, :]
else:
from sklearn.calibration import CalibratedClassifierCV
base_mdl = basemodel(**model_params)
calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3)
basemodelfit = calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base=calibrated_mdl.predict_proba(X_test)[:, :]
cal_model_params=calibrated_mdl.get_params()
acc_score_base=accuracy_score(y_test, basepredict)
base_estimator_calibrate = cal_model_params['base_estimator']
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,
base_config=model_params, meta_config=model_params)
try:
X_train=X_train[model_used_features]
X_test=X_test[model_used_features]
except:
pass
uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train))
# uqmodel_fit = uq_model.fit(X_train, y_train)
y_t_pred, y_t_score = uq_model.predict(X_test)
acc_score=accuracy_score(y_test, y_t_pred)
test_accuracy_perc=round(100*acc_score)
if(aionstatus == "aionuq"):
test_accuracy_perc=round(test_accuracy_perc,2)
#uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0.
uq_aurrrc=0
pass
else:
bbm_c_plot = plot_risk_vs_rejection_rate(
y_true=y_test,
y_prob=predprob_base,
selection_scores=y_t_score,
y_pred=y_t_pred,
plot_label=['UQ_risk_vs_rejection'],
risk_func=accuracy_score,
num_bins = 10 )
# This done by kiran, need to uncomment for GUI integration.
# bbm_c_plot_sub = bbm_c_plot[4]
bbm_c_plot_sub = bbm_c_plot
if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'):
os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png')
# bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png')
re_plot=plot_reliability_diagram(y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
plot_label=['UQModel reliability_diagram'],
num_bins=10 )
# This done by kiran, need to uncomment for GUI integration.
# re_plot_sub = re_plot[4]
re_plot_sub = re_plot
if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'):
os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png')
# re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png')
uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
selection_scores=y_t_score,
attributes=None,
risk_func=accuracy_score,subgroup_ids=None, return_counts=False,
num_bins=10)
uq_aurrrc=uq_aurrrc
test_accuracy_perc=round(test_accuracy_perc)
#metric_all=compute_classification_metrics(y_test, y_prob, option='all')
metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy')
#expected_calibration_error
uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False)
# uq_aurrrc=uq_aurrrc
confidence_score=acc_score_base-uq_ece
ece_confidence_score=round(confidence_score,2)
# Model uncertainty using ECE score
# model_uncertainty_ece = 1-ece_confidence_score
#Uncertainty Using model inherent predict probability
mean_predprob_total=np.mean(y_t_score)
model_confidence=mean_predprob_total
model_uncertainty = 1-mean_predprob_total
model_confidence = round(model_confidence,2)
# To get each class values and uncertainty
if (aionstatus.lower() == 'aionuq'):
y_val = np.append(y_train,y_test)
else:
y_val = self.y
self.log.info('------------------> Model Confidence Score '+str(model_confidence))
outuq = self.classUncertainty(y_t_pred,y_t_score)
# Another way to get conf score
model_uncertainty_per=round((model_uncertainty*100),2)
model_confidence_per=round((model_confidence*100),2)
acc_score_per = round((acc_score*100),2)
uq_ece_per=round((uq_ece*100),2)
output={}
recommendation = ""
if (uq_ece > 0.5):
# RED text
recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).'
else:
# self.log.info('Model has good ECE score and accuracy, ready to deploy.\n.')
if (uq_ece <= 0.1 and model_confidence >= 0.9):
# Green Text
recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. '
else:
# Orange
recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. '
#Adding each class uncertainty value
classoutput = {}
for k,v in outuq.items():
classoutput[k]=(str(round((v*100),2)))
output['classes'] = classoutput
output['ModelConfidenceScore']=(str(model_confidence_per))
output['ExpectedCalibrationError']=str(uq_ece_per)
output['ModelUncertainty']=str(model_uncertainty_per)
output['Recommendation']=recommendation
# output['user_msg']='Please check the plot for more understanding of model uncertainty'
#output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4)
output['Accuracy']=str(acc_score_per)
output['Problem']= 'Classification'
#self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %'))
# #print("Prediction mean for the given model:",np.mean(y_hat),"\n")
#self.log.info(recommendation)
#self.log.info("Model_confidence_score: " +str(confidence_score))
#self.log.info("Model_uncertainty: " +str(round(model_uncertainty,2)))
#self.log.info('Please check the plot for more understanding of model uncertainty.\n.')
uq_jsonobject = json.dumps(output)
with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f:
json.dump(output, f)
return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per
except Exception as inst:
self.log.info('\n < ---------- UQ Model Execution Failed Start--------->')
self.log.info('\n<------Model Execution failed!!!.' + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
self.log.info('\n < ---------- Model Execution Failed End --------->')
def aion_confidence_plot(self,df):
df=df
df = df.sort_values(by=self.selectedfeature)
best_values=df.Best_values.to_list()
best_upper=df.Best__upper.to_list()
best_lower=df.Best__lower.to_list()
Total_Upper_PI=df.Total_Upper_PI.to_list()
Total_Low_PI=df.Total_Low_PI.to_list()
Obseved = df.Observed.to_list()
plt.plot(df[x_feature], df['Observed'], 'o', label='Observed')
plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey')
plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey')
plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red')
plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5)
plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5)
plt.legend()
plt.xlabel(self.selectedfeature)
plt.ylabel(self.targetFeature)
plt.title('UQ Best & Good Area Plot')
if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'):
os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png')
plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png')
plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png')
def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus):
aionstatus = str(aionstatus)
# if (aionstatus.lower() == 'aionuq'):
# X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
# else:
# X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
# modelName = ""
self.log.info('<!------------- Inside BlackBox MetaModel Regression process. ---------------> ')
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
# #print("model_params['criterion']: \n",model_params['criterion'])
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# modelname='sklearn.linear_model'+'.'+model_name
# X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest
#Geeting trained model name and to use the model in BlackboxMetamodelRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.ensemble import RandomForestRegressor
if (model_name == "DecisionTreeRegressor"):
basemodel=DecisionTreeRegressor
elif (model_name == "LinearRegression"):
basemodel=LinearRegression
elif (model_name == "Lasso"):
basemodel=Lasso
elif (model_name == "Ridge"):
basemodel=Ridge
elif(model_name == "RandomForestRegressor"):
basemodel=RandomForestRegressor
else:
basemodel=LinearRegression
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
else:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus)
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
# #print("X_train.shape: \n",X_train.shape)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
self.log.info('<!------------- observed_picp: ---------------> '+str(observed_alphas_picp))
self.log.info('<!------------- observed_widths_mpiw: ---------------> '+str(observed_widths_mpiw))
# UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2”
#metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss.
metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None)
metric_used=''
for k,v in metric_all.items():
metric_used=str(round(v,2))
self.log.info('<!------------- Metric used for regression UQ: ---------------> '+str(metric_all))
# Determine the confidence level and recommentation to the tester
# test_data=y_test
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
#Calculate total uncertainty for all features
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data)
# df1=self.data
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
recommendation=""
output={}
if (observed_alphas_picp >= 0.95 and total_picp >= 0.75):
# Add GREEN text
self.log.info('Model has good confidence for the selected feature, ready to deploy.\n.')
recommendation = "Model has good confidence score, ready to deploy."
elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)):
# Orange
recommendation = "Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."
self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .')
else:
# RED text
recommendation = "Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."
self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .')
#Build uq json info dict
output['ModelConfidenceScore']=(str(total_picp_percentage)+'%')
output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%')
output['SelectedFeatureConfidence']=(str(picp_percentage)+'%')
output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%')
output['PredictionIntervalCoverageProbability']=observed_alphas_picp
output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw)
output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range)))
output['Recommendation']=str(recommendation)
output['Metric']=uq_scoring_param
output['Score']=metric_used
output['Problemtype']= 'Regression'
self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %'))
self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %'))
#self.log.info('Please check the plot for more understanding of model uncertainty.\n.')
#self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub)
uq_jsonobject = json.dumps(output)
with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f:
json.dump(output, f)
#To get best and medium UQ range of values from total predict interval
y_hat_m=y_hat.tolist()
y_hat_lb=y_hat_lb.tolist()
upper_bound=y_hat_ub.tolist()
y_hat_ub=y_hat_ub.tolist()
for x in y_hat_lb:
y_hat_ub.append(x)
total_pi=y_hat_ub
medium_UQ_range = y_hat_ub
best_UQ_range= y_hat.tolist()
ymean_upper=[]
ymean_lower=[]
y_hat_m=y_hat.tolist()
for i in y_hat_m:
y_hat_m_range= (i*20/100)
x=i+y_hat_m_range
y=i-y_hat_m_range
ymean_upper.append(x)
ymean_lower.append(y)
min_best_uq_dist=round(min(best_UQ_range))
max_best_uq_dist=round(max(best_UQ_range))
# initializing ranges
list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi))
list_best = y_hat_m
X_test = np.squeeze(X_test)
'''
uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m,
'Best__upper':ymean_upper,
'Best__lower':ymean_lower,
'Total_Low_PI': y_hat_lb,
'Total_Upper_PI': upper_bound,
}
print(uq_dict)
uq_pred_df = pd.DataFrame(data=uq_dict)
uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values')
uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False)
csv_path=str(self.Deployment)+"/uq_pred_df.csv"
df=pd.read_csv(csv_path)
self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\n.')
#Callconfidence olot fn only for UQTest interface
if (aionstatus.lower() == 'aionuq'):
#No need to showcase confidence plot for aion main
pass
else:
self.aion_confidence_plot(df)
'''
return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject)
except Exception as inst:
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
uq_main.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from matplotlib import pyplot
import sys
import os
import matplotlib.pyplot as plt
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
from uq360.metrics.regression_metrics import compute_regression_metrics
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error
from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature
import sys
import time
from sklearn.metrics import confusion_matrix
from pathlib import Path
import logging
import logging.config
from os.path import expanduser
import platform
from sklearn.utils import shuffle
class aionUQ:
# def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model):
def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature):
try:
self.data=df
self.dfFeatures=dfp
self.uqconfig_base=Params
self.uqconfig_meta=Params
self.targetFeature=targetfeature
self.log = logging.getLogger('aionUQ')
self.target=target
self.selectedfeature=modelfeatures
self.y=self.target
self.X=self.dfFeatures
from appbe.dataPath import DEPLOY_LOCATION
self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time()))))
os.makedirs(self.Deployment,exist_ok=True)
self.basemodel=model
self.model_name=ProblemName
# self.X, self.y = shuffle(self.X, self.y)
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0)
self.xtrain = X_train
self.xtest = X_test
self.ytrain = y_train
self.ytest = y_test
# self.deployLocation=deployLocation
except Exception as e:
# self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
# self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def totalUncertainty(self,df,basemodel,model_params):
try:
# from sklearn.model_selection import train_test_split
# df=self.data
# y=df[self.targetFeature]
# X = df.drop(self.targetFeature, axis=1)
if (isinstance(self.selectedfeature,list)):
selectedfeature=[self.selectedfeature[0]]
selectedfeature=' '.join(map(str,selectedfeature))
if (isinstance(self.targetFeature,list)):
targetFeature=[self.targetFeature[0]]
targetFeature=' '.join(map(str,targetFeature))
X = self.data[selectedfeature]
y = self.data[targetFeature]
X = X.values.reshape((-1,1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# from sklearn.tree import DecisionTreeRegressor
# from sklearn.linear_model import LinearRegression,Lasso,Ridge
# from sklearn import linear_model
# from sklearn.ensemble import RandomForestRegressor
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
y_hat_total_mean=np.mean(y_hat)
y_hat_lb_total_mean=np.mean(y_hat_lb)
y_hat_ub_total_mean=np.mean(y_hat_ub)
mpiw_20_per=(y_hat_total_mean*20/100)
mpiw_lower_range = y_hat_total_mean - mpiw_20_per
mpiw_upper_range = y_hat_total_mean + mpiw_20_per
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
# self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw))
# self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range))
# self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range))
# self.log.info('Model total picp_percentage : '+str(picp_percentage))
except Exception as e:
print("totalUncertainty fn error: \n",e)
return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range
def display_results(self,X_test, y_test, y_mean, y_lower, y_upper):
try:
global x_feature,y_feature
if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)):
x_feature=','.join(map(str, self.selectedfeature))
else:
x_feature= str(self.selectedfeature)
# self.selectedfeature=str(self.selectedfeature)
X_test=np.squeeze(X_test)
y_feature=str(self.targetFeature)
pred_dict = {x_feature: X_test,
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
x_feature1 = x_feature.split(',')
pred_df_sorted = pred_df.sort_values(by=x_feature1)
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y'], 'o', label='Observed')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound')
plt.legend()
plt.xlabel(x_feature1[0])
plt.ylabel(y_feature)
plt.title('UQ Confidence Interval Plot.')
# plt.savefig('uq_test_plt.png')
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png')
'''
plt.savefig(str(self.Deployment)+'/uq_test_plt.png')
#plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png')
confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
pltreg=plot_picp_by_feature(X_test, y_test,
y_lower, y_upper,
xlabel=x_feature)
#pltreg.savefig('x.png')
pltr=pltreg.figure
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png')
'''
pltr.savefig(str(self.Deployment)+'/picp_per_feature.png')
picpPlot = os.path.join(self.Deployment,'picp_per_feature.png')
#pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
except Exception as e:
print("display exception: \n",e)
# self.log.info('<!------------- UQ model Display Error ---------------> '+str(e))
return confidencePlot,picpPlot
def classUncertainty(self,predprob_base):
# from collections import Counter
predc="Class_"
classes = np.unique(self.y)
total = len(self.y)
list_predprob=[]
counter = Counter(self.y)
#for loop for test class purpose
for k,v in counter.items():
n_samples = len(self.y[self.y==k])
per = ((v/total) * 100)
prob_c=predprob_base[:,int(k)]
list_predprob.append(prob_c)
# #print("Class_{} : {}/{} percentage={}% \n".format(k,n_samples,total,per ))
outuq={}
for k in classes:
predc += str(k)
mean_predprob_class=np.mean(list_predprob[int(k)])
uncertainty=1-mean_predprob_class
predc+='_Uncertainty'
outuq[predc]=uncertainty
predc="Class_"
return outuq
def uqMain_BBMClassification(self):
# self.log.info('<!------------- Inside BlackBox MetaModel Classification process. ---------------> ')
# import matplotlib.pyplot as plt
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification
except:
##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification.
from uq360.algorithms.blackbox_metamodel import MetamodelClassification
# from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
# from sklearn import datasets
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import accuracy_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# from sklearn.linear_model import LogisticRegression
# import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
try:
#geting used features
model_used_features=self.basemodel.feature_names_in_
except:
pass
X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest
uq_scoring_param='accuracy'
basemodel=None
if (model_name == "GradientBoostingClassifier"):
basemodel=GradientBoostingClassifier
elif (model_name == "SGDClassifier"):
basemodel=SGDClassifier
elif (model_name == "GaussianNB"):
basemodel=GaussianNB
elif (model_name == "DecisionTreeClassifier"):
basemodel=DecisionTreeClassifier
elif(model_name == "RandomForestClassifier"):
basemodel=RandomForestClassifier
elif (model_name == "SVC"):
basemodel=SVC
elif(model_name == "KNeighborsClassifier"):
basemodel=KNeighborsClassifier
elif(model_name == "LogisticRegression"):
basemodel=LogisticRegression
else:
basemodel=LogisticRegression
try:
try:
##Removed meta_config because leave meta model config as default ml model params
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params)
except:
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params)
except:
##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification
try:
##Removed meta_config because leave meta model config as default ml model params
uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params)
except:
uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
try:
X_train=X_train[model_used_features]
X_test=X_test[model_used_features]
except:
pass
uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train))
# uqmodel_fit = uq_model.fit(X_train, y_train)
#Test data pred, score
y_t_pred, y_t_score = uq_model.predict(X_test)
#predict probability
# uq_pred_prob=uq_model.predict_proba(X_test)
# predprob_base=basemodel.predict_proba(X_test)[:, :]
#if (model_name == "SVC" or model_name == "SGDClassifier"):
# if model_name in ['SVC','SGDClassifier']:
if (model_name == "SVC"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SVC(**model_params)
calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_svc.fit(X_train, y_train)
basepredict = basemodel.predict(X_test)
predprob_base = calibrated_svc.predict_proba(X_test)[:, :]
elif (model_name == "SGDClassifier"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SGDClassifier(**model_params)
calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_svc.fit(X_train, y_train)
basepredict = basemodel.predict(X_test)
predprob_base = calibrated_svc.predict_proba(X_test)[:, :]
else:
base_mdl = basemodel(**model_params)
basemodelfit = base_mdl.fit(X_train, y_train)
basepredict = base_mdl.predict(X_test)
predprob_base=base_mdl.predict_proba(X_test)[:, :]
acc_score=accuracy_score(y_test, y_t_pred)
test_accuracy_perc=round(100*acc_score)
'''
bbm_c_plot = plot_risk_vs_rejection_rate(
y_true=y_test,
y_prob=predprob_base,
selection_scores=y_t_score,
y_pred=y_t_pred,
plot_label=['UQ_risk_vs_rejection'],
risk_func=accuracy_score,
num_bins = 10 )
# This done by kiran, need to uncomment for GUI integration.
try:
bbm_c_plot_sub = bbm_c_plot[4]
bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png')
riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png')
except Exception as e:
print(e)
pass
riskPlot = ''
'''
riskPlot = ''
'''
try:
re_plot=plot_reliability_diagram(y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
plot_label=['UQModel reliability_diagram'],
num_bins=10)
# This done by kiran, need to uncomment for GUI integration.
re_plot_sub = re_plot[4]
# re_plot_sub = re_plot
re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png')
reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png')
except Exception as e:
print(e)
pass
reliability_plot = ''
'''
reliability_plot = ''
uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
selection_scores=y_t_score,
attributes=None,
risk_func=accuracy_score,subgroup_ids=None, return_counts=False,
num_bins=10)
uq_aurrrc=uq_aurrrc
test_accuracy_perc=round(test_accuracy_perc)
#metric_all=compute_classification_metrics(y_test, y_prob, option='all')
metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy')
#expected_calibration_error
uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False)
uq_aurrrc=uq_aurrrc
confidence_score=acc_score-uq_ece
ece_confidence_score=round(confidence_score,2)
# Model uncertainty using ECE score
# model_uncertainty_ece = 1-ece_confidence_score
# #print("model_uncertainty1: \n",model_uncertainty_ece)
#Uncertainty Using model inherent predict probability
mean_predprob_total=np.mean(predprob_base)
model_uncertainty = 1-mean_predprob_total
model_confidence=mean_predprob_total
model_confidence = round(model_confidence,2)
# To get each class values and uncertainty
outuq = self.classUncertainty(predprob_base)
# Another way to get conf score
model_uncertainty_per=round((model_uncertainty*100),2)
# model_confidence_per=round((model_confidence*100),2)
model_confidence_per=round((ece_confidence_score*100),2)
acc_score_per = round((acc_score*100),2)
uq_ece_per=round((uq_ece*100),2)
output={}
recommendation = ""
if (uq_ece > 0.5):
# RED text
recommendation = 'Model has high ece (expected calibration error) score compare to threshold (50%),not good to deploy. Add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).'
msg = 'Bad'
else:
# self.log.info('Model has good ECE score and accuracy, ready to deploy.\n.')
if (uq_ece <= 0.1 and model_confidence >= 0.9):
# Green Text
recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. '
msg = 'Best'
else:
# Orange
recommendation = 'Model has average confidence score (ideal is >90% confidence) and good ECE score (ideal is <10% error).Model can be improved by adding more training data across all feature ranges and re-training the model.'
msg = 'Good'
#Adding each class uncertainty value
output['Problem']= 'Classification'
output['recommend']= 'recommend'
output['msg']= msg
output['UQ_Area_Under_Risk_Rejection_Rate_Curve']=round(uq_aurrrc,4)
output['Model_Total_Confidence']=(str(model_confidence_per)+str('%'))
output['Expected_Calibration_Error']=(str(uq_ece_per)+str('%'))
output['Model_Total_Uncertainty']=(str(model_uncertainty_per)+str('%'))
# output['Risk Plot'] = str(riskPlot)
# output['Reliability Plot'] = str(reliability_plot)
for k,v in outuq.items():
output[k]=(str(round((v*100),2))+str(' %'))
output['Recommendation']=recommendation
# output['user_msg']='Please check the plot for more understanding of model uncertainty'
output['Metric_Accuracy_Score']=(str(acc_score_per)+str(' %'))
outputs = json.dumps(output)
with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f:
json.dump(output, f)
return test_accuracy_perc,uq_ece,outputs
def aion_confidence_plot(self,df):
try:
global x_feature
df=df
df = df.sort_values(by=self.selectedfeature)
best_values=df.Best_values.to_list()
best_upper=df.Best__upper.to_list()
best_lower=df.Best__lower.to_list()
Total_Upper_PI=df.Total_Upper_PI.to_list()
Total_Low_PI=df.Total_Low_PI.to_list()
Obseved = df.Observed.to_list()
x_feature1 = x_feature.split(',')
plt.plot(df[x_feature1[0]], df['Observed'], 'o', label='Observed')
plt.plot(df[x_feature1[0]], df['Best__upper'],'r--', lw=2, color='grey')
plt.plot(df[x_feature1[0]], df['Best__lower'],'r--', lw=2, color='grey')
plt.plot(df[x_feature1[0]], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red')
plt.fill_between(df[x_feature1[0]], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5)
plt.fill_between(df[x_feature1[0]],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5)
plt.legend()
plt.xlabel(x_feature1[0])
plt.ylabel(self.targetFeature)
plt.title('UQ Best & Good Area Plot')
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png')
plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png')
'''
plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png')
uq_confidence_plt = os.path.join(str(self.Deployment),'uq_confidence_plt.png')
except Exception as inst:
print('-----------dsdas->',inst)
uq_confidence_plt = ''
return uq_confidence_plt
def uqMain_BBMRegression(self):
# modelName = ""
# self.log.info('<!------------- Inside BlockBox MetaModel Regression process. ---------------> ')
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
# #print("model_params['criterion']: \n",model_params['criterion'])
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# modelname='sklearn.linear_model'+'.'+model_name
# self.xtrain = self.xtrain.values.reshape((-1,1))
# self.xtest = self.xtest.values.reshape((-1,1))
if (isinstance(self.selectedfeature,list)):
selectedfeature=[self.selectedfeature[0]]
selectedfeature=' '.join(map(str,selectedfeature))
if (isinstance(self.targetFeature,list)):
targetFeature=[self.targetFeature[0]]
targetFeature=' '.join(map(str,targetFeature))
X = self.data[selectedfeature]
y = self.data[targetFeature]
X = X.values.reshape((-1,1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
#Geeting trained model name and to use the model in BlackboxMetamodelRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.ensemble import RandomForestRegressor
if (model_name == "DecisionTreeRegressor"):
basemodel=DecisionTreeRegressor
elif (model_name == "LinearRegression"):
basemodel=LinearRegression
elif (model_name == "Lasso"):
basemodel=Lasso
elif (model_name == "Ridge"):
basemodel=Ridge
elif(model_name == "RandomForestRegressor"):
basemodel=RandomForestRegressor
else:
basemodel=LinearRegression
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
if (uq_scoring_param.lower() == 'picp'):
uq_scoring_param='prediction interval coverage probability score (picp)'
else:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='prediction interval coverage probability score (picp)'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
# UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2”
metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None)
metric_used=''
for k,v in metric_all.items():
metric_used=str(round(v,2))
# Determine the confidence level and recommentation to the tester
# test_data=y_test
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
#Calculate total uncertainty for all features
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data)
# df1=self.data
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params)
recommendation=""
observed_widths_mpiw = round((observed_widths_mpiw/1000000)*100)
if observed_widths_mpiw > 100:
observed_widths_mpiw = 100
output={}
if (observed_alphas_picp >= 0.90 and total_picp >= 0.75):
# GREEN text
recommendation = "Model has good confidence and MPIW score, ready to deploy."
msg='Good'
elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.90) and (total_picp >= 0.50)):
# Orange
recommendation = " Model has average confidence compare to threshold (ideal is both model confidence and MPIW should be >90%) .Model can be improved by adding more training data across all feature ranges and re-training the model."
msg = 'Average'
else:
# RED text
recommendation = "Model has less confidence compare to threshold (ideal is both model confidence and MPIW should be >90%), need to be add more input data across all feature ranges and retrain base model, also try with different regression algorithms/ensembling."
msg = 'Bad'
#Build uq json info dict
output['Model_total_confidence']=(str(total_picp_percentage)+'%')
output['Model_total_Uncertainty']=(str(total_Uncertainty_percentage)+'%')
output['Selected_feature_confidence']=(str(picp_percentage)+'%')
output['Selected_feature_Uncertainty']=(str(Uncertainty_percentage)+'%')
output['Prediction_Interval_Coverage_Probability']=observed_alphas_picp
output['Mean_Prediction_Interval_Width']=str(observed_widths_mpiw)+'%'
output['Desirable_MPIW_range']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range)))
output['Recommendation']=str(recommendation)
output['Metric_used']=uq_scoring_param
output['Metric_value']=metric_used
output['Problem']= 'Regression'
output['recommend']= 'recommend'
output['msg'] = msg
with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f:
json.dump(output, f)
#To get best and medium UQ range of values from total predict interval
y_hat_m=y_hat.tolist()
y_hat_lb=y_hat_lb.tolist()
upper_bound=y_hat_ub.tolist()
y_hat_ub=y_hat_ub.tolist()
for x in y_hat_lb:
y_hat_ub.append(x)
total_pi=y_hat_ub
medium_UQ_range = y_hat_ub
best_UQ_range= y_hat.tolist()
ymean_upper=[]
ymean_lower=[]
y_hat_m=y_hat.tolist()
for i in y_hat_m:
y_hat_m_range= (i*20/100)
x=i+y_hat_m_range
y=i-y_hat_m_range
ymean_upper.append(x)
ymean_lower.append(y)
min_best_uq_dist=round(min(best_UQ_range))
max_best_uq_dist=round(max(best_UQ_range))
# initializing ranges
list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi))
list_best = y_hat_m
'''
print(X_test)
print(X_test)
X_test = np.squeeze(X_test)
print(x_feature)
'''
uq_dict = pd.DataFrame(X_test)
#print(uq_dict)
uq_dict['Observed'] = y_test
uq_dict['Best_values'] = y_hat_m
uq_dict['Best__upper'] = ymean_upper
uq_dict['Best__lower'] = ymean_lower
uq_dict['Total_Low_PI'] = y_hat_lb
uq_dict['Total_Upper_PI'] = upper_bound
'''
uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m,
'Best__upper':ymean_upper,
'Best__lower':ymean_lower,
'Total_Low_PI': y_hat_lb,
'Total_Upper_PI': upper_bound,
}'''
uq_pred_df = pd.DataFrame(data=uq_dict)
uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values')
uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False)
csv_path=str(self.Deployment)+"/uq_pred_df.csv"
df=pd.read_csv(csv_path)
# self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\n.')
# confidenceplot = self.aion_confidence_plot(df)
# output['Confidence Plot']= confidenceplot
uq_jsonobject = json.dumps(output)
print("UQ regression problem training completed...\n")
return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject
except Exception as inst:
print('-------',inst)
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
associationrules.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
from mlxtend.frequent_patterns import apriori, association_rules
from mlxtend.preprocessing import TransactionEncoder
import matplotlib.pyplot as plt
import json
import logging
import os,sys
def hot_encode(x):
if(int(x)<= 0):
return 0
if(int(x)>= 1):
return 1
class associationrules:
def __init__(self,dataframe,association_rule_conf,modelparam,invoiceNoFeature,itemFeature):
self.minSupport = modelparam['minSupport']
self.metric = modelparam['metric']
self.minThreshold = modelparam['minThreshold']
self.data = dataframe
self.invoiceNoFeature = invoiceNoFeature
self.itemFeature = itemFeature
self.log = logging.getLogger('eion')
def apply_associationRules(self,outputLocation):
self.data= self.data[[self.itemFeature,self.invoiceNoFeature]]
self.data[self.itemFeature] = self.data[self.itemFeature].str.strip()
self.data.dropna(axis = 0, subset =[self.invoiceNoFeature], inplace = True)
self.data[self.invoiceNoFeature] = self.data[self.invoiceNoFeature].astype('str')
self.data = self.data.groupby([self.invoiceNoFeature,self.itemFeature]).size()
self.data=self.data.unstack().reset_index().fillna('0').set_index(self.invoiceNoFeature)
self.data = self.data.applymap(hot_encode)
ohe_df = self.data
'''
print(self.data)
sys.exit()
items = []
for col in list(self.data):
ucols = self.data[col].dropna().unique()
#print('ucols :',ucols)
if len(ucols) > 0:
items = items + list(set(ucols) - set(items))
#items = self.data.apply(lambda col: col.unique())
#print(items)
#items = (self.data[self.masterColumn].unique())
#print(items)
self.log.info("-------> Total Unique Items: "+str(len(items)))
encoded_vals = []
for index, row in self.data.iterrows():
labels = {}
uncommons = list(set(items) - set(row))
commons = list(set(items).intersection(row))
for uc in uncommons:
labels[uc] = 0
for com in commons:
labels[com] = 1
encoded_vals.append(labels)
ohe_df = pd.DataFrame(encoded_vals)
#print(ohe_df)
'''
freq_items = apriori(ohe_df, min_support=self.minSupport, use_colnames=True)
self.log.info('Status:- |... AssociationRule Algorithm applied: Apriori')
if not freq_items.empty:
self.log.info("\n------------ Frequent Item Set --------------- ")
self.log.info(freq_items)
save_freq_items = pd.DataFrame()
save_freq_items["itemsets"] = freq_items["itemsets"].apply(lambda x: ', '.join(list(x))).astype("unicode")
outputfile = os.path.join(outputLocation,'frequentItems.csv')
save_freq_items.to_csv(outputfile)
self.log.info('-------> FreqentItems File Name:'+outputfile)
rules = association_rules(freq_items, metric=self.metric, min_threshold=self.minThreshold)
if not rules.empty:
#rules = rules.sort_values(['confidence', 'lift'], ascending =[False, False])
self.log.info("\n------------ Rules --------------- ")
for index, row in rules.iterrows():
self.log.info("------->Rule: "+ str(row['antecedents']) + " -> " + str(row['consequents']))
self.log.info("---------->Support: "+ str(row['support']))
self.log.info("---------->Confidence: "+ str(row['confidence']))
self.log.info("---------->Lift: "+ str(row['lift']))
#rules['antecedents'] = list(rules['antecedents'])
#rules['consequents'] = list(rules['consequents'])
rules["antecedents"] = rules["antecedents"].apply(lambda x: ', '.join(list(x))).astype("unicode")
rules["consequents"] = rules["consequents"].apply(lambda x: ', '.join(list(x))).astype("unicode")
self.log.info("\n------------ Rules End --------------- ")
outputfile = os.path.join(outputLocation,'associationRules.csv')
self.log.info('-------> AssciationRule File Name:'+outputfile)
rules.to_csv(outputfile)
else:
self.log.info("\n------------ Frequent Item Set --------------- ")
self.log.info("Status:- |... There are no association found in frequent items above that threshold (minThreshold)")
else:
self.log.info("\n------------ Frequent Item Set --------------- ")
self.log.info("Status:- |... There are no frequent items above that threshold (minSupport)")
evaulatemodel = '{"Model":"Apriori","Score":"NA"}'
return(evaulatemodel)
|
featureReducer.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from statsmodels.tsa.stattools import adfuller
from scipy.stats.stats import pearsonr
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from numpy.random import randn
#SDP1 class import
from feature_engineering.featureImportance import featureImp
from sklearn.feature_selection import VarianceThreshold
import logging
class featureReducer():
def __init__(self):
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.log = logging.getLogger('eion')
def startReducer(self,df,data_columns,target,var_threshold):
self.log.info('\n---------- Feature Reducer Start ----------')
dataframe = df
columns=data_columns
target = target
corrThreshold=1.0
categoricalFeatures=[]
nonNumericFeatures=[]
constFeatures=[]
qconstantColumns=[]
DtypesDic={}
numericFeatures=[]
nonNumericalFeatures=[]
similarFeatureGroups=[]
try:
dataFDtypes=self.dataFramecolType(dataframe)
for item in dataFDtypes:
DtypesDic[item[0]] = item[1]
if item[1] in self.pandasNumericDtypes:
numericFeatures.append(item[0])
else:
nonNumericFeatures.append(item[0])
#Checking for constant data features
for col in columns:
try:
distCount = len(dataframe[col].unique())
if(distCount == 1):
constFeatures.append(col)
except Exception as inst:
self.log.info('Unique Testing Fail for Col '+str(col))
numericalDataCols,nonNumericalDataCols = [],[]
#Removing constant data features
if(len(constFeatures) != 0):
self.log.info( '-------> Constant Features: '+str(constFeatures))
numericalDataCols = list(set(numericFeatures) - set(constFeatures))
nonNumericalDataCols = list(set(nonNumericFeatures) - set(constFeatures))
else:
numericalDataCols = list(set(numericFeatures))
nonNumericalDataCols = list(set(nonNumericFeatures))
if(len(numericalDataCols) > 1):
if var_threshold !=0:
qconstantFilter = VarianceThreshold(threshold=var_threshold)
tempDf=df[numericalDataCols]
qconstantFilter.fit(tempDf)
qconstantColumns = [column for column in numericalDataCols if column not in tempDf.columns[qconstantFilter.get_support()]]
if(len(qconstantColumns) != 0):
if target != '' and target in qconstantColumns:
qconstantColumns.remove(target)
self.log.info( '-------> Low Variant Features: '+str(qconstantColumns))
self.log.info('Status:- |... Low variance feature treatment done: '+str(len(qconstantColumns))+' low variance features found')
numericalDataCols = list(set(numericalDataCols) - set(qconstantColumns))
else:
self.log.info('Status:- |... Low variance feature treatment done: Found zero or 1 numeric feature')
#Minimum of two columns required for data integration
if(len(numericalDataCols) > 1):
numColPairs = list(itertools.product(numericalDataCols, numericalDataCols))
noDupList = []
for item in numColPairs:
if(item[0] != item[1]):
noDupList.append(item)
numColPairs = noDupList
tempArray = []
for item in numColPairs:
tempCorr = np.abs(dataframe[item[0]].corr(dataframe[item[1]]))
if(tempCorr > corrThreshold):
tempArray.append(item[0])
tempArray = np.unique(tempArray)
nonsimilarNumericalCols = list(set(numericalDataCols) - set(tempArray))
'''
Notes:
tempArray: List of all similar/equal data features
nonsimilarNumericalCols: List of all non-correlatable data features
'''
#Grouping similar/equal features
groupedFeatures = []
if(len(numericalDataCols) != len(nonsimilarNumericalCols)):
#self.log.info( '-------> Similar/Equal Features: Not Any')
#Correlation dictionary
corrDic = {}
for feature in tempArray:
temp = []
for col in tempArray:
tempCorr = np.abs(dataframe[feature].corr(dataframe[col]))
temp.append(tempCorr)
corrDic[feature] = temp
#Similar correlation dataframe
corrDF = pd.DataFrame(corrDic,index = tempArray)
corrDF.loc[:,:] = np.tril(corrDF, k=-1)
alreadyIn = set()
similarFeatures = []
for col in corrDF:
perfectCorr = corrDF[col][corrDF[col] > corrThreshold].index.tolist()
if perfectCorr and col not in alreadyIn:
alreadyIn.update(set(perfectCorr))
perfectCorr.append(col)
similarFeatures.append(perfectCorr)
self.log.info( '-------> No Similar/Equal Features: '+str(len(similarFeatures)))
for i in range(0,len(similarFeatures)):
similarFeatureGroups.append(similarFeatures[i])
#self.log.info((str(i+1)+' '+str(similarFeatures[i])))
self.log.info('-------> Similar/Equal Features: '+str(similarFeatureGroups))
self.log.info('-------> Non Similar Features :'+str(nonsimilarNumericalCols))
updatedSimFeatures = []
for items in similarFeatures:
if(target != '' and target in items):
for p in items:
updatedSimFeatures.append(p)
else:
updatedSimFeatures.append(items[0])
newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))
updatedNumFeatures = newTempFeatures
#self.log.info( '\n <--- Merged similar/equal features into one ---> ')
updatedFeatures = list(set(newTempFeatures + nonNumericalDataCols))
self.log.info('Status:- |... Similar feature treatment done: '+str(len(similarFeatures))+' similar features found')
else:
updatedNumFeatures = numericalDataCols
updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))
self.log.info( '-------> Similar/Equal Features: Not Any')
self.log.info('Status:- |... Similar feature treatment done: No similar features found')
else:
updatedNumFeatures = numericalDataCols
updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))
self.log.info( '\n-----> Need minimum of two numerical features for data integration.')
self.log.info('Status:- |... Similar feature treatment done: Found zero or 1 numeric feature')
self.log.info('---------- Feature Reducer End ----------\n')
return updatedNumFeatures,updatedFeatures,similarFeatureGroups
except Exception as inst:
self.log.info("feature Reducer failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return [],[]
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except:
self.log.info("error in dataFramecolyType")
return dataFDtypes
|
featureSelector.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from statsmodels.tsa.stattools import adfuller
from scipy.stats.stats import pearsonr
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from numpy.random import randn
from sklearn.metrics import normalized_mutual_info_score
from sklearn.feature_selection import mutual_info_regression
import logging
#SDP1 class import
from feature_engineering.featureImportance import featureImp
from feature_engineering.featureReducer import featureReducer
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import FactorAnalysis
from sklearn.decomposition import FastICA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
# noinspection PyPep8Naming
class featureSelector():
def __init__(self):
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.log = logging.getLogger('eion')
def startSelector(self,df,conf_json,textFeatures,targetFeature,problem_type):
try:
categoricalMaxLabel = int(conf_json['categoryMaxLabel'])
pca='None'
pcaReducerStatus = conf_json['featureEngineering']['PCA']
svdReducerStatus = conf_json['featureEngineering']['SVD']
factorReducerStatus = conf_json['featureEngineering']['FactorAnalysis']
icaReducerStatus = conf_json['featureEngineering']['ICA']
nfeatures=float(conf_json['featureEngineering']['numberofComponents'])
statisticalConfig = conf_json['statisticalConfig']
corrThresholdInput = float(statisticalConfig.get('correlationThresholdFeatures',0.50))
corrThresholdTarget = float(statisticalConfig.get('correlationThresholdTarget',0.85))
pValThresholdInput = float(statisticalConfig.get('pValueThresholdFeatures',0.05))
pValThresholdTarget = float(statisticalConfig.get('pValueThresholdTarget',0.04))
varThreshold = float(statisticalConfig.get('varianceThreshold',0.01))
allFeaturesSelector = conf_json['featureSelection']['allFeatures']
correlationSelector = conf_json['featureSelection']['statisticalBased']
modelSelector = conf_json['featureSelection']['modelBased']
featureSelectionMethod = conf_json['selectionMethod']['featureSelection']
featureEngineeringSelector = conf_json['selectionMethod']['featureEngineering']
if featureSelectionMethod == 'True':
featureEngineeringSelector = 'False'
# if feature engineering is true then we check weather PCA is true or svd is true. By default we will run PCA
if featureEngineeringSelector == 'True':
if pcaReducerStatus == 'True':
svdReducerStatus = 'False'
factorReducerStatus=='False'
icaReducerStatus == 'False'
elif svdReducerStatus == 'True':
pcaReducerStatus = 'False'
factorReducerStatus=='False'
icaReducerStatus == 'False'
elif factorReducerStatus=='True':
pcaReducerStatus=='False'
svdReducerStatus=='False'
icaReducerStatus=='False'
elif icaReducerStatus=='True':
pcaReducerStatus=="False"
svdReducerStatus=="False"
factorReducerStatus=="False"
else:
pcaReducerStatus = 'True'
if featureSelectionMethod == 'False' and featureEngineeringSelector == 'False':
featureSelectionMethod = 'True'
if featureSelectionMethod == 'True':
if modelSelector == 'False' and correlationSelector == 'False' and allFeaturesSelector == 'False':
modelSelector = 'True'
reductionMethod = 'na'
bpca_features = []
#nfeatures = 0
if 'maxClasses' in conf_json:
maxclasses = int(conf_json['maxClasses'])
else:
maxClasses = 20
target = targetFeature
self.log.info('-------> Feature: '+str(target))
dataFrame = df
pThresholdInput=pValThresholdInput
pThresholdTarget=pValThresholdTarget
cThresholdInput=corrThresholdInput
cThresholdTarget=corrThresholdTarget
numericDiscreteFeatures=[]
similarGruops=[]
numericContinuousFeatures=[]
categoricalFeatures=[]
nonNumericFeatures=[]
apca_features = []
dTypesDic={}
dataColumns = list(dataFrame.columns)
features_list = list(dataFrame.columns)
modelselectedFeatures=[]
topFeatures=[]
allFeatures=[]
targetType=""
# just to make sure feature engineering is false
#print(svdReducerStatus)
if featureEngineeringSelector.lower() == 'false' and correlationSelector.lower() == "true" and len(textFeatures) <= 0:
reducerObj=featureReducer()
self.log.info(featureReducer.__doc__)
self.log.info('Status:- |... Feature reduction started')
updatedNumericFeatures,updatedFeatures,similarGruops=reducerObj.startReducer(dataFrame,dataColumns,target,varThreshold)
if len(updatedFeatures) <= 1:
self.log.info('=======================================================')
self.log.info('Most of the features are of low variance. Use Model based feature engineering for better result')
self.log.info('=======================================================')
raise Exception('Most of the features are of low variance. Use Model based feature engineering for better result')
dataFrame=dataFrame[updatedFeatures]
dataColumns=list(dataFrame.columns)
self.log.info('Status:- |... Feature reduction completed')
elif (pcaReducerStatus.lower() == "true" or svdReducerStatus.lower() == 'true' or factorReducerStatus.lower() == 'true' or icaReducerStatus.lower()=='true') and featureEngineeringSelector.lower() == 'true':
# check is PCA or SVD is true
pcaColumns=[]
#print(svdReducerStatus.lower())
if target != "":
dataColumns.remove(target)
targetArray=df[target].values
targetArray.shape = (len(targetArray), 1)
if pcaReducerStatus.lower() == "true":
if nfeatures == 0:
pca = PCA(n_components='mle',svd_solver = 'full')
elif nfeatures < 1:
pca = PCA(n_components=nfeatures,svd_solver = 'full')
else:
pca = PCA(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'PCA'
elif svdReducerStatus.lower() == 'true':
if nfeatures < 2:
nfeatures = 2
pca = TruncatedSVD(n_components=int(nfeatures), n_iter=7, random_state=42)
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'SVD'
elif factorReducerStatus.lower()=='true':
if int(nfeatures) == 0:
pca=FactorAnalysis()
else:
pca=FactorAnalysis(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'FactorAnalysis'
elif icaReducerStatus.lower()=='true':
if int(nfeatures) == 0:
pca=FastICA()
else:
pca=FastICA(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'IndependentComponentAnalysis'
pcaDF=pd.DataFrame(pcaArray)
#print(pcaDF)
for i in range(len(pcaDF.columns)):
pcaColumns.append(method+str(i))
topFeatures=pcaColumns
apca_features= pcaColumns.copy()
if target != '':
pcaColumns.append(target)
scaledDf = pd.DataFrame(np.hstack((pcaArray, targetArray)),columns=pcaColumns)
else:
scaledDf = pd.DataFrame(pcaArray,columns=pcaColumns)
self.log.info("<--- dataframe after dimensionality reduction using "+method)
self.log.info(scaledDf.head())
dataFrame=scaledDf
dataColumns=list(dataFrame.columns)
self.log.info('Status:- |... Feature reduction started')
self.log.info('Status:- |... '+method+' done')
self.log.info('Status:- |... Feature reduction completed')
self.numofCols = dataFrame.shape[1]
self.numOfRows = dataFrame.shape[0]
dataFDtypes=[]
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
#Categoring datatypes
for item in dataFDtypes:
dTypesDic[item[0]] = item[1]
if item[0] != target:
if item[1] in ['int16', 'int32', 'int64'] :
numericDiscreteFeatures.append(item[0])
elif item[1] in ['float16', 'float32', 'float64']:
numericContinuousFeatures.append(item[0])
else:
nonNumericFeatures.append(item[0])
self.numOfRows = dataFrame.shape[0]
'''
cFRatio = 0.01
if(self.numOfRows < 1000):
cFRatio = 0.2
elif(self.numOfRows < 10000):
cFRatio = 0.1
elif(self.numOfRows < 100000):
cFRatio = 0.01
'''
for i in numericDiscreteFeatures:
nUnique=len(dataFrame[i].unique().tolist())
nRows=self.numOfRows
if nUnique <= categoricalMaxLabel:
categoricalFeatures.append(i)
for i in numericContinuousFeatures:
nUnique=len(dataFrame[i].unique().tolist())
nRows=self.numOfRows
if nUnique <= categoricalMaxLabel:
categoricalFeatures.append(i)
discreteFeatures=list(set(numericDiscreteFeatures)-set(categoricalFeatures))
numericContinuousFeatures=list(set(numericContinuousFeatures)-set(categoricalFeatures))
self.log.info('-------> Numerical continuous features :'+(str(numericContinuousFeatures))[:500])
self.log.info('-------> Numerical discrete features :'+(str(discreteFeatures))[:500])
self.log.info('-------> Non numerical features :'+(str(nonNumericFeatures))[:500])
self.log.info('-------> Categorical Features :'+(str(categoricalFeatures))[:500])
if target !="" and featureEngineeringSelector.lower() == "false" and correlationSelector.lower() == "true":
self.log.info('\n------- Feature Based Correlation Analysis Start ------')
start = time.time()
featureImpObj = featureImp()
topFeatures,targetType= featureImpObj.FFImpNew(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThresholdInput,pThresholdTarget,cThresholdInput,cThresholdTarget,categoricalMaxLabel,problem_type,maxClasses)
#topFeatures,targetType= featureImpObj.FFImp(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThreshold,cThreshold,categoricalMaxLabel,problem_type,maxClasses)
self.log.info('-------> Highly Correlated Features Using Correlation Techniques'+(str(topFeatures))[:500])
executionTime=time.time() - start
self.log.info('-------> Time Taken: '+str(executionTime))
self.log.info('Status:- |... Correlation based feature selection done: '+str(len(topFeatures))+' out of '+str(len(dataColumns))+' selected')
self.log.info('------- Feature Based Correlation Analysis End ------>\n')
if targetType == '':
if problem_type.lower() == 'classification':
targetType = 'categorical'
if problem_type.lower() == 'regression':
targetType = 'continuous'
if target !="" and featureEngineeringSelector.lower() == "false" and modelSelector.lower() == "true":
self.log.info('\n------- Model Based Correlation Analysis Start -------')
start = time.time()
updatedFeatures = dataColumns
updatedFeatures.remove(target)
#targetType = problem_type.lower()
modelselectedFeatures=[]
if targetType == 'categorical':
try:
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
etc = ExtraTreesClassifier(n_estimators=100)
etc.fit(xtrain, ytrain)
rfe = RFE(etc, n_features_to_select=1, verbose =0 )
rfe.fit(xtrain, ytrain)
# total list of features
ranks = {}
ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1)
for item in ranks["RFE_LR"]:
if ranks["RFE_LR"][item]>0.30: #threshold as 30%
modelselectedFeatures.append(item)
modelselectedFeatures = list(modelselectedFeatures)
self.log.info('-------> Highly Correlated Features Using Treeclassifier + RFE: '+(str(modelselectedFeatures))[:500])
except Exception as e:
self.log.info('---------------->'+str(e))
selector = SelectFromModel(ExtraTreesClassifier())
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
selector.fit(xtrain,ytrain)
modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist()
self.log.info('-------> Highly Correlated Features Using Treeclassifier: '+(str(modelselectedFeatures))[:500])
else:
try:
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
ls = Lasso()
ls.fit(xtrain, ytrain)
rfe = RFE(ls, n_features_to_select=1, verbose = 0 )
rfe.fit(xtrain, ytrain)
# total list of features
ranks = {}
ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1)
for item in ranks["RFE_LR"]:
if ranks["RFE_LR"][item]>0.30: #threshold as 30%
modelselectedFeatures.append(item)
modelselectedFeatures = list(modelselectedFeatures)
self.log.info('-------> Highly Correlated Features Using LASSO + RFE: '+(str(modelselectedFeatures))[:500])
except Exception as e:
self.log.info('---------------->'+str(e))
selector = SelectFromModel(Lasso())
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
selector.fit(xtrain,ytrain)
modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist()
self.log.info('-------> Highly Correlated Features Using LASSO: '+(str(modelselectedFeatures))[:500])
executionTime=time.time() - start
self.log.info('-------> Time Taken: '+str(executionTime))
self.log.info('Status:- |... Model based feature selection done: '+str(len(modelselectedFeatures))+' out of '+str(len(dataColumns))+' selected')
self.log.info('--------- Model Based Correlation Analysis End -----\n')
if target !="" and featureEngineeringSelector.lower() == "false" and allFeaturesSelector.lower() == "true":
allFeatures = features_list
if target != '':
allFeatures.remove(target)
#print(allFeatures)
if len(topFeatures) == 0 and len(modelselectedFeatures) == 0 and len(allFeatures) == 0:
allFeatures = features_list
return dataFrame,target,topFeatures,modelselectedFeatures,allFeatures,targetType,similarGruops,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pca,bpca_features,apca_features,featureEngineeringSelector
except Exception as inst:
self.log.info('Feature selector failed: '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
|
featureImportance.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from sklearn.metrics import normalized_mutual_info_score
from sklearn.feature_selection import f_regression,mutual_info_regression
from sklearn.feature_selection import chi2,f_classif,mutual_info_classif
import scipy.stats
from scipy.stats import pearsonr, spearmanr, pointbiserialr, f_oneway, kendalltau, chi2_contingency
import statsmodels.api as sm
import statsmodels.formula.api as smf
import logging
def getHigherSignificanceColName(featureDict, colname1, colname2):
if featureDict[colname1]<featureDict[colname2]:
return colname2
else:
return colname1
class featureImp():
def __init__(self):
self.dTypesDic = {}
self.featureImpDic={}
self.indexedDic = {}
self.log = logging.getLogger('eion')
def FFImpNew(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValThInput,pValThTarget,corrThInput,corrThTarget,categoricalMaxLabel,problem_type,maxClasses):
try:
dataframe = df
contiFeatures= contFeatures
quantFeatures=discreteFeatures+contiFeatures
categoricalFeatures=categoricalFeatures
targetData=dataframe[target]
nUnique=len(targetData.unique().tolist())
if nUnique <= categoricalMaxLabel:
targetType="categorical"
else:
targetType="continuous"
if problem_type.lower() == 'classification' and targetType == 'continuous':
targetType = 'categorical'
self.log.info( '-------> Change Target Type to Categorial as user defined')
if problem_type.lower() == 'regression' and targetType == 'categorical':
targetType = 'continuous'
self.log.info( '-------> Change Target Type to Continuous as user defined')
self.log.info( '-------> Target Type: '+str(targetType))
impFeatures=[]
catFeature = []
numFeature = []
catFeatureXYcat = []
numFeatureXYcat = []
catFeatureXYnum= []
numFeatureXYnum = []
dropFeatureCat= []
dropFeatureNum = []
featureDict = {}
if targetType =="categorical":
if len(categoricalFeatures) !=0:
# input vs target
# chi-square
for col in categoricalFeatures:
contingency = pd.crosstab(dataframe[col], targetData)
stat, p, dof, expected = chi2_contingency(contingency)
if p <= pValThTarget:
catFeatureXYcat.append(col) # categorical feature xy when target is cat
featureDict[col] = p
#input vs input
# chi_square
if len(catFeatureXYcat) != 0:
length = len(catFeatureXYcat)
for i in range(length):
for j in range(i+1, length):
contingency = pd.crosstab(dataframe[catFeatureXYcat[i]], dataframe[catFeatureXYcat[j]])
stat, p, dof, expected = chi2_contingency(contingency)
if p > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYcat[i], catFeatureXYcat[j])
dropFeatureCat.append(highSignificanceColName)
break
catFeature = list(set(catFeatureXYcat) - set(dropFeatureCat))
featureDict.clear()
dropFeatureCat.clear()
if len(quantFeatures) !=0:
# input vs target
# one way anova
for col in quantFeatures:
CategoryGroupLists = dataframe.groupby(target)[col].apply(list)
AnovaResults = f_oneway(*CategoryGroupLists)
if AnovaResults[1] <= pValThTarget:
numFeatureXYcat.append(col) #numeric feature xy when target is cat
featureDict[col] = AnovaResults[1]
#input vs input
# preason/spearman/ols # numeric feature xx when target is cat
if len(numFeatureXYcat) != 0:
df_xx = dataframe[numFeatureXYcat]
rows, cols = df_xx.shape
flds = list(df_xx.columns)
corr_pearson = df_xx.corr(method='pearson').values
corr_spearman = df_xx.corr(method='spearman').values
for i in range(cols):
for j in range(i+1, cols):
if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput:
if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput:
#f = "'"+flds[i]+"'"+' ~ '+"'"+flds[j]+"'"
#reg = smf.ols(formula=f, data=dataframe).fit()
tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]})
reg = smf.ols('y~x', data=tmpdf).fit()
if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
numFeature = list(set(numFeatureXYcat) - set(dropFeatureNum))
dropFeatureNum.clear()
featureDict.clear()
impFeatures = numFeature+catFeature
hCorrFeatures=list(set((impFeatures)))
else: # targetType =="continuous":
if len(categoricalFeatures) !=0:
# input vs target
# Anova
for col in categoricalFeatures:
#f = target+' ~ C('+col+')'
#model = smf.ols(f, data=dataframe).fit()
#table = sm.stats.anova_lm(model, typ=2)
tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]})
model = smf.ols('y~x', data=tmpdf).fit()
table = sm.stats.anova_lm(model, typ=2)
if table['PR(>F)'][0] <= pValThTarget:
catFeatureXYnum.append(col) #categorical feature xy when target is numeric
featureDict[col]=table['PR(>F)'][0]
#input vs input
# chi_square
if len(catFeatureXYnum) != 0:
length = len(catFeatureXYnum)
for i in range(length):
for j in range(i+1, length):
contingency = pd.crosstab(dataframe[catFeatureXYnum[i]], dataframe[catFeatureXYnum[j]])
stat, p, dof, expected = chi2_contingency(contingency)
if p > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYnum[i], catFeatureXYnum[j])
dropFeatureCat.append(highSignificanceColName)
break
catFeature = list(set(catFeatureXYnum) - set(dropFeatureCat))
dropFeatureCat.clear()
featureDict.clear()
if len(quantFeatures) !=0:
# input vs target
# preason/spearman/ols
for col in quantFeatures:
pearson_corr = pearsonr(dataframe[col], targetData)
coef = round(pearson_corr[0],5)
p_value = round(pearson_corr[1],5)
if coef > -corrThTarget and coef < corrThTarget:
spearman_corr = spearmanr(dataframe[col], targetData)
coef = round(spearman_corr[0],5)
p_value = round(spearman_corr[1],5)
if coef > -corrThTarget and coef < corrThTarget:
#f = target+' ~ '+col
#reg = smf.ols(formula=f, data=dataframe).fit()
tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]})
reg = smf.ols('y~x', data=tmpdf).fit()
if len(reg.pvalues) > 1 and reg.pvalues[1] <= pValThTarget:
numFeatureXYnum.append(col) # numeric feature xx when target is numeric
featureDict[col]=reg.pvalues[1]
else:
numFeatureXYnum.append(col)
featureDict[col]=p_value
else:
numFeatureXYnum.append(col)
featureDict[col]=p_value
#input vs input
# preason/spearman/ols
if len(numFeatureXYnum) != 0:
df_xx = dataframe[numFeatureXYnum]
rows, cols = df_xx.shape
flds = list(df_xx.columns)
corr_pearson = df_xx.corr(method='pearson').values
corr_spearman = df_xx.corr(method='spearman').values
for i in range(cols):
for j in range(i+1, cols):
if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput:
if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput:
#f = flds[i]+' ~ '+flds[j]
#reg = smf.ols(formula=f, data=dataframe).fit()
tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]})
reg = smf.ols('y~x', data=tmpdf).fit()
if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
numFeature = list(set(numFeatureXYnum) - set(dropFeatureNum))
featureDict.clear()
dropFeatureNum.clear()
impFeatures = numFeature+catFeature
hCorrFeatures=list(set(impFeatures))
return hCorrFeatures,targetType
except Exception as inst:
self.log.info( '\n--> Failed calculating feature importance '+str(inst))
hCorrFeatures=[]
targetType=''
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
self.log.info('\n--> Taking all the features as highest correlation features')
hCorrFeatures = list(dataframe.columns)
return hCorrFeatures,targetType
def FFImp(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValTh,corrTh,categoricalMaxLabel,problem_type,maxClasses):
'''
Input: dataframe, numeric continuous features, numeric discrete features
Output: feature importance dictionary
'''
try:
dataframe =df
contiFeatures= contFeatures
discreteFeatures = discreteFeatures
nonNumeric = nonNumericFeatures
categoricalFeatures=categoricalFeatures
self.dTypesDic = dTypesDic
numericFeatures = contiFeatures + discreteFeatures+categoricalFeatures
quantFeatures=discreteFeatures+contiFeatures
scorrDict={}
fScoreDict={}
pcorrDict={}
miDict={}
targetData=dataframe[target]
data=dataframe[numericFeatures]
nUnique=len(targetData.unique().tolist())
nRows=targetData.shape[0]
'''
print("\n ===> nUnique :")
print(nUnique)
print("\n ===> nRows :")
print(nRows)
print("\n ===> cFRatio :")
print(cFRatio)
print("\n ===> nUnique/nRows :")
'''
#calratio = nUnique
self.log.info( '-------> Target Column Unique Stats: '+str(nUnique)+' nRows: '+str(nRows)+' Unique:'+str(nUnique))
#sys.exit()
if nUnique <= categoricalMaxLabel:
targetType="categorical"
else:
targetType="continuous"
if problem_type.lower() == 'classification' and targetType == 'continuous':
targetType = 'categorical'
self.log.info( '-------> Change Target Type to Categorial as user defined')
if problem_type.lower() == 'regression' and targetType == 'categorical':
targetType = 'continuous'
self.log.info( '-------> Change Target Type to Continuous as user defined')
self.log.info( '-------> Target Type: '+str(targetType))
impFeatures=[]
featureImpDict={}
if targetType =="categorical":
try:
if len(categoricalFeatures) !=0:
categoricalData=dataframe[categoricalFeatures]
chiSqCategorical=chi2(categoricalData,targetData)[1]
corrSeries=pd.Series(chiSqCategorical, index=categoricalFeatures)
impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())
corrDict=corrSeries.to_dict()
featureImpDict['chiSquaretestPValue']=corrDict
except Exception as inst:
self.log.info("Found negative values in categorical variables "+str(inst))
if len(quantFeatures) !=0:
try:
quantData=dataframe[quantFeatures]
fclassScore=f_classif(quantData,targetData)[1]
miClassScore=mutual_info_classif(quantData,targetData)
fClassSeries=pd.Series(fclassScore,index=quantFeatures)
miClassSeries=pd.Series(miClassScore,index=quantFeatures)
impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())
impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())
featureImpDict['anovaPValue']=fClassSeries.to_dict()
featureImpDict['MIScore']=miClassSeries.to_dict()
except MemoryError as inst:
self.log.info( '-------> MemoryError in feature selection. '+str(inst))
pearsonScore=dataframe.corr()
targetPScore=abs(pearsonScore[target])
impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())
featureImpDict['pearsonCoff']=targetPScore.to_dict()
hCorrFeatures=list(set(sum(impFeatures, [])))
else:
if len(quantFeatures) !=0:
try:
quantData =dataframe[quantFeatures]
fregScore=f_regression(quantData,targetData)[1]
miregScore=mutual_info_regression(quantData,targetData)
fregSeries=pd.Series(fregScore,index=quantFeatures)
miregSeries=pd.Series(miregScore,index=quantFeatures)
impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())
impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())
featureImpDict['anovaPValue']=fregSeries.to_dict()
featureImpDict['MIScore']=miregSeries.to_dict()
except MemoryError as inst:
self.log.info( '-------> MemoryError in feature selection. '+str(inst))
pearsonScore=dataframe.corr()
targetPScore=abs(pearsonScore[target])
impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())
featureImpDict['pearsonCoff']=targetPScore.to_dict()
hCorrFeatures=list(set(sum(impFeatures, [])))
return hCorrFeatures,targetType
except Exception as inst:
self.log.info( '\n--> Failed calculating feature importance '+str(inst))
hCorrFeatures=[]
targetType=''
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return hCorrFeatures,targetType
'''
Importance degree
Computes set of relational parameters
pearson correlation, mutual information
'''
def importanceDegree(self,dataframe,feature1,feature2):
try:
tempList = []
#Parameter 1: pearson correlation
pcorr = self.pearsonCoff(dataframe,feature1,feature2)
tempList.append(pcorr)
#Parameter 2: mutual information
#Testing
mi = self.mutualInfo(dataframe,feature1,feature2,self.dTypesDic)
tempList.append(mi)
#return the highest parameter
return np.max(tempList)
except:
return 0.0
'''
Compute pearson correlation
'''
def pearsonCoff(self,dataframe,feature1,feature2):
try:
value=dataframe[feature1].corr(dataframe[feature2])
return np.abs(value)
except:
return 0.0
'''
Compute mutual information
'''
def mutualInfo(self,dataframe,feature1,feature2,typeDic):
try:
numType = {'int64': 'discrete','int32' : 'discrete','int16' : 'discrete','float16' : 'continuous','float32' : 'continuous','float64' : 'continuous'}
featureType1 = numType[typeDic[feature1]]
featureType2 = numType[typeDic[feature2]]
bufferList1=dataframe[feature1].values.tolist()
bufferList2=dataframe[feature2].values.tolist()
#Case 1: Only if both are discrete
if(featureType1 == 'discrete' and featureType2 == 'discrete'):
tempResult = discreteMI(bufferList1,bufferList2)
return np.mean(tempResult)
#Case 2: If one of the features is continuous
elif(featureType1 == 'continuous' and featureType2 == 'discrete'):
tempResult = self.categoricalMI(bufferList1,bufferList2)
return np.mean(tempResult)
else:
tempResult = self.continuousMI(bufferList1,bufferList2)
return np.mean(tempResult)
except:
return 0.0
def continuousMI(self,bufferList1,bufferList2):
mi = 0.0
#Using mutual info regression from feature selection
mi = mutual_info_regression(self.vec(bufferList1),bufferList2)
return mi
def categoricalMI(self,bufferList1,bufferList2):
mi = 0.0
#Using mutual info classification from feature selection
mi = mutual_info_classif(self.vec(bufferList1),bufferList2)
return mi
def discreteMI(self,bufferList1,bufferList2):
mi = 0.0
#Using scikit normalized mutual information function
mi = normalized_mutual_info_score(bufferList1,bufferList2)
return mi
def vec(self,x):
return [[i] for i in x]
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
AION_Gluon_MultiModalPrediction.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
import pandas as pd
import numpy as np
from pandas import json_normalize
from autogluon.text import TextPredictor
import os.path
def predict(data):
try:
if os.path.splitext(data)[1] == ".tsv":
df=pd.read_csv(data,encoding='utf-8',sep='\t')
elif os.path.splitext(data)[1] == ".csv":
df=pd.read_csv(data,encoding='utf-8')
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
df = json_normalize(jsonData)
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'text_prediction')
predictor = TextPredictor.load(model_path)
predictions = predictor.predict(df)
df['predict'] = predictions
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
output = json.dumps(outputjson)
print("predictions:",output)
return(output)
except KeyError as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
AION_Gluon_MultiLabelPrediction.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
import pandas as pd
from pandas import json_normalize
#from selector import selector
#from inputprofiler import inputprofiler
#from trained_model import trained_model
#from output_format import output_format
from autogluon.tabular import TabularDataset, TabularPredictor
from autogluon.core.utils.utils import setup_outputdir
from autogluon.core.utils.loaders import load_pkl
from autogluon.core.utils.savers import save_pkl
import os.path
class MultilabelPredictor():
""" Tabular Predictor for predicting multiple columns in table.
Creates multiple TabularPredictor objects which you can also use individually.
You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)`
Parameters
----------
labels : List[str]
The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object.
path : str
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors.
problem_types : List[str]
The ith element is the `problem_type` for the ith TabularPredictor stored in this object.
eval_metrics : List[str]
The ith element is the `eval_metric` for the ith TabularPredictor stored in this object.
consider_labels_correlation : bool
Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others.
If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion).
Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels.
kwargs :
Arguments passed into the initialization of each TabularPredictor.
"""
multi_predictor_file = 'multilabel_predictor.pkl'
def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs):
if len(labels) < 2:
raise ValueError("MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column).")
self.path = setup_outputdir(path, warn_if_exist=False)
self.labels = labels
self.consider_labels_correlation = consider_labels_correlation
self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label
if eval_metrics is None:
self.eval_metrics = {}
else:
self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))}
problem_type = None
eval_metric = None
for i in range(len(labels)):
label = labels[i]
path_i = self.path + "Predictor_" + label
if problem_types is not None:
problem_type = problem_types[i]
if eval_metrics is not None:
eval_metric = self.eval_metrics[i]
self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs)
def fit(self, train_data, tuning_data=None, **kwargs):
""" Fits a separate TabularPredictor to predict each of the labels.
Parameters
----------
train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame
See documentation for `TabularPredictor.fit()`.
kwargs :
Arguments passed into the `fit()` call for each TabularPredictor.
"""
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
train_data_og = train_data.copy()
if tuning_data is not None:
tuning_data_og = tuning_data.copy()
save_metrics = len(self.eval_metrics) == 0
for i in range(len(self.labels)):
label = self.labels[i]
predictor = self.get_predictor(label)
if not self.consider_labels_correlation:
labels_to_drop = [l for l in self.labels if l!=label]
else:
labels_to_drop = [labels[j] for j in range(i+1,len(self.labels))]
train_data = train_data_og.drop(labels_to_drop, axis=1)
if tuning_data is not None:
tuning_data = tuning_data_og.drop(labels_to_drop, axis=1)
print(f"Fitting TabularPredictor for label: {label} ...")
predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs)
self.predictors[label] = predictor.path
if save_metrics:
self.eval_metrics[label] = predictor.eval_metric
self.save()
def predict(self, data, **kwargs):
""" Returns DataFrame with label columns containing predictions for each label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`.
kwargs :
Arguments passed into the predict() call for each TabularPredictor.
"""
return self._predict(data, as_proba=False, **kwargs)
def predict_proba(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`.
kwargs :
Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call).
"""
return self._predict(data, as_proba=True, **kwargs)
def evaluate(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`.
kwargs :
Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call).
"""
data = self._get_data(data)
eval_dict = {}
for label in self.labels:
print(f"Evaluating TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
eval_dict[label] = predictor.evaluate(data, **kwargs)
if self.consider_labels_correlation:
data[label] = predictor.predict(data, **kwargs)
return eval_dict
def save(self):
""" Save MultilabelPredictor to disk. """
for label in self.labels:
if not isinstance(self.predictors[label], str):
self.predictors[label] = self.predictors[label].path
save_pkl.save(path=self.path+self.multi_predictor_file, object=self)
print(f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path}')")
@classmethod
def load(cls, path):
""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """
path = os.path.expanduser(path)
if path[-1] != os.path.sep:
path = path + os.path.sep
return load_pkl.load(path=path+cls.multi_predictor_file)
def get_predictor(self, label):
""" Returns TabularPredictor which is used to predict this label. """
predictor = self.predictors[label]
if isinstance(predictor, str):
return TabularPredictor.load(path=predictor)
return predictor
def _get_data(self, data):
if isinstance(data, str):
return TabularDataset(data)
return data.copy()
def _predict(self, data, as_proba=False, **kwargs):
data = self._get_data(data)
if as_proba:
predproba_dict = {}
for label in self.labels:
print(f"Predicting with TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
if as_proba:
predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs)
data[label] = predictor.predict(data, **kwargs)
if not as_proba:
return data[self.labels]
else:
return predproba_dict
def predict(data):
try:
if os.path.splitext(data)[1] == ".tsv":
df=pd.read_csv(data,encoding='utf-8',sep='\t')
elif os.path.splitext(data)[1] == ".csv":
df=pd.read_csv(data,encoding='utf-8')
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
df = json_normalize(jsonData)
#df0 = df.copy()
#profilerobj = inputprofiler()
#df = profilerobj.apply_profiler(df)
#selectobj = selector()
#df = selectobj.apply_selector(df)
#modelobj = trained_model()
#output = modelobj.predict(df,"")
# Load the Test data for Prediction
# ----------------------------------------------------------------------------#
test_data = df#TabularDataset(data) #'testingDataset.csv'
#subsample_size = 2
# ----------------------------------------------------------------------------#
# Specify the corresponding target features to be used
# ----------------------------------------------------------------------------#
#labels = ['education-num','education','class']
configFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),'etc','predictionConfig.json')
with open(configFile, 'rb') as cfile:
data = json.load(cfile)
labels = data['targetFeature']
# ----------------------------------------------------------------------------#
for x in labels:
if x in list(test_data.columns):
test_data.drop(x,axis='columns', inplace=True)
# ----------------------------------------------------------------------------#
#test_data = test_data.sample(n=subsample_size, random_state=0)
#print(test_data)
#test_data_nolab = test_data.drop(columns=labels)
#test_data_nolab.head()
test_data_nolab = test_data
# ----------------------------------------------------------------------------#
# Load the trained model from where it's stored
# ----------------------------------------------------------------------------#
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'ModelPath')
multi_predictor = MultilabelPredictor.load(model_path)
# ----------------------------------------------------------------------------#
# Start the prediction and perform the evaluation
# ----------------------------------------------------------------------------#
predictions = multi_predictor.predict(test_data_nolab)
for label in labels:
df[label+'_predict'] = predictions[label]
#evaluations = multi_predictor.evaluate(test_data)
#print(evaluations)
#print("Evaluated using metrics:", multi_predictor.eval_metrics)
# ----------------------------------------------------------------------------#
# ----------------------------------------------------------------------------#
#outputobj = output_format()
#output = outputobj.apply_output_format(df0,output)
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
output = json.dumps(outputjson)
print("predictions:",output)
return(output)
# ----------------------------------------------------------------------------#
except KeyError as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
|
regression_metrics.py | import numpy as np
from scipy.stats import norm
from sklearn.metrics import mean_squared_error, r2_score
from ..utils.misc import fitted_ucc_w_nullref
def picp(y_true, y_lower, y_upper):
"""
Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies
within predicted interval. Measures the prediction interval calibration for regression.
Args:
y_true: Ground truth
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the fraction of samples for which the grounds truth lies within predicted interval.
"""
satisfies_upper_bound = y_true <= y_upper
satisfies_lower_bound = y_true >= y_lower
return np.mean(satisfies_upper_bound * satisfies_lower_bound)
def mpiw(y_lower, y_upper):
"""
Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the
sharpness of intervals.
Args:
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the average width the prediction interval across samples.
"""
return np.mean(np.abs(y_lower - y_upper))
def auucc_gain(y_true, y_mean, y_lower, y_upper):
""" Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference
with constant band.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: AUUCC gain
"""
u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper)
auucc = u.get_AUUCC()
assert(isinstance(auucc, list) and len(auucc) == 2), "Failed to calculate auucc gain"
assert (not np.isclose(auucc[1], 0.)), "Failed to calculate auucc gain"
auucc_gain = (auucc[1]-auucc[0])/auucc[0]
return auucc_gain
def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper):
""" Computes Gaussian negative_log_likelihood assuming symmetric band around the mean.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: nll
"""
y_std = (y_upper - y_lower) / 4.0
nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze()))
return nll
def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option="all", nll_fn=None):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] metrics.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
option: string or list of string contained the name of the metrics to be computed.
nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower.
Returns:
dict: dictionary containing the computed metrics.
"""
assert y_true.shape == y_mean.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_mean.shape)
assert y_true.shape == y_lower.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_lower.shape)
assert y_true.shape == y_upper.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_upper.shape)
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"]
else:
option_list = [option]
if "rmse" in option_list:
results["rmse"] = mean_squared_error(y_true, y_mean, squared=False)
if "nll" in option_list:
if nll_fn is None:
nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper)
results["nll"] = nll
else:
results["nll"] = np.mean(nll_fn(y_true))
if "auucc_gain" in option_list:
gain = auucc_gain(y_true, y_mean, y_lower, y_upper)
results["auucc_gain"] = gain
if "picp" in option_list:
results["picp"] = picp(y_true, y_lower, y_upper)
if "mpiw" in option_list:
results["mpiw"] = mpiw(y_lower, y_upper)
if "r2" in option_list:
results["r2"] = r2_score(y_true, y_mean)
return results
def _check_not_tuple_of_2_elements(obj, obj_name='obj'):
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7,
ax=None, figsize=None, dpi=None,
title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs):
"""
Plot the uncertainty distribution for a single distribution.
Args:
dist: scipy.stats._continuous_distns.
A scipy distribution object.
show_quantile_dots: boolean.
Whether to show quantil dots on top of the density plot.
qd_sample: int.
Number of dots for the quantile dot plot.
qd_bins: int.
Number of bins for the quantile dot plot.
ax: matplotlib.axes.Axes or None, optional (default=None).
Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None).
Figure size.
dpi : int or None, optional (default=None).
Resolution of the figure.
title : string or None, optional (default=Prediction Distribution)
Axes title.
If None, title is disabled.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xlabel : string or None, optional (default=Prediction)
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default=Density)
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with prediction distribution.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100)
ax.plot(x, dist.pdf(x), **kwargs)
if show_quantile_dots:
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
import matplotlib.ticker as ticker
data = dist.rvs(size=10000)
p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample)
x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf)
# Create bins
hist = np.histogram(x_, bins=qd_bins)
bins, edges = hist
radius = (edges[1] - edges[0]) / 2
ax2 = ax.twinx()
patches = []
max_y = 0
for i in range(qd_bins):
x_bin = (edges[i + 1] + edges[i]) / 2
y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])]
max_y = max(y_bins) if max(y_bins) > max_y else max_y
for _, y_bin in enumerate(y_bins):
circle = Circle((x_bin, y_bin), radius)
patches.append(circle)
p = PatchCollection(patches, alpha=0.4)
ax2.add_collection(p)
# Axis tweek
y_scale = (max_y + radius) / max(dist.pdf(x))
ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale))
ax2.yaxis.set_major_formatter(ticks_y)
ax2.set_yticklabels([])
if xlims is not None:
ax2.set_xlim(left=xlims[0], right=xlims[1])
else:
ax2.set_xlim([min(x_) - radius, max(x) + radius])
ax2.set_ylim([0, max_y + radius])
ax2.set_aspect(1)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10,
ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: One dimensional ndarray.
Feature column of the test dataset.
y_test: One dimensional ndarray.
Ground truth label of the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
num_bins: int.
Number of bins used to discritize x_test into equal-sample-sized bins.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature.
"""
from scipy.stats.mstats import mquantiles
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
num_unique = len(x_uniques_sorted)
sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test)
if len(x_uniques_sorted) > 10: # bin the values
q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:])
q_sample_bin_ids = np.digitize(x_test, q_bins)
picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin],
y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)])
unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins)
picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)]
picp_replicated = np.array([item for sublist in picp_replicated for item in sublist])
else:
picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin],
y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)])
picp_replicated = picps
ax.plot(x_uniques_sorted, picp_replicated, label='PICP')
ax.axhline(0.95, linestyle='--', label='95%')
ax.set_ylabel('PICP')
ax.legend(loc='best')
if title is None:
title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format(
picp(y_test,
y_test_pred_lower_total,
y_test_pred_upper_total),
mpiw(y_test_pred_lower_total,
y_test_pred_upper_total))
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if ylims is not None:
ax.set_ylim(bottom=ylims[0], top=ylims[1])
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total,
y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None,
ax=None, figsize=None, dpi=None, xlims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: one dimensional ndarray.
Feature column of the test dataset.
y_test_pred_mean: One dimensional ndarray.
Model prediction for the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
y_test_pred_lower_epistemic: One dimensional ndarray.
Lower bound of the epistemic uncertainty range.
y_test_pred_upper_epistemic: One dimensional ndarray.
Upper bound of the epistemic uncertainty range.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2
agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted])
agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted])
ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction')
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std,
agg_y_mean + 2.0 * agg_y_std,
alpha=0.3, label='total uncertainty')
if y_test_pred_lower_epistemic is not None:
y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2
agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted])
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std_epistemic,
agg_y_mean + 2.0 * agg_y_std_epistemic,
alpha=0.3, label='model uncertainty')
ax.legend(loc='best')
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
|
classification_metrics.py | import numpy as np
import pandas as pd
from scipy.stats import entropy
from sklearn.metrics import roc_auc_score, log_loss, accuracy_score
def entropy_based_uncertainty_decomposition(y_prob_samples):
""" Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components.
References:
.. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of
uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference
on Machine Learning (pp. 1184-1193). PMLR.
Args:
y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities
corresponding to samples from the model posterior.
Returns:
tuple:
- total_uncertainty: entropy of the predictive distribution.
- aleatoric_uncertainty: aleatoric component of the total_uncertainty.
- epistemic_uncertainty: epistemic component of the total_uncertainty.
"""
y_preds_samples_stacked = np.stack(y_prob_samples)
preds_mean = np.mean(y_preds_samples_stacked, 0)
total_uncertainty = entropy(preds_mean, axis=1)
aleatoric_uncertainty = np.mean(
np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1),
axis=1)
epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty
return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty
def multiclass_brier_score(y_true, y_prob):
"""Brier score for multi-class.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
Returns:
float: Brier score.
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_target = np.zeros_like(y_prob)
y_target[:, y_true] = 1.0
return np.mean(np.sum((y_target - y_prob) ** 2, axis=1))
def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score,
attributes=None, num_bins=10, subgroup_ids=None,
return_counts=False):
""" Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where
coverage instead of rejection rate is used.
References:
.. [3] Franc, Vojtech, and Daniel Prusa. "On discriminative learning of prediction uncertainty."
In International Conference on Machine Learning, pp. 1963-1971. 2019.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
selection_scores: scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- aurrrc (float): area under risk rejection rate curve.
- rejection_rates (list): rejection rates for each bin (returned only if return_counts is True).
- selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True).
- risks (list): risk in each bin (returned only if return_counts is True).
"""
if selection_scores is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)]
if y_pred is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_pred = np.argmax(y_prob, axis=1)
order = np.argsort(selection_scores)[::-1]
rejection_rates = []
selection_thresholds = []
risks = []
for bin_id in range(num_bins):
samples_in_bin = len(y_true) // num_bins
selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]]
selection_thresholds.append(selection_threshold)
ids = selection_scores >= selection_threshold
if sum(ids) > 0:
if attributes is None:
if isinstance(y_true, pd.Series):
y_true_numpy = y_true.values
else:
y_true_numpy = y_true
if subgroup_ids is None:
risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids])
else:
if sum(subgroup_ids & ids) > 0:
risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids])
else:
risk_value = 0.0
else:
risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes)
else:
risk_value = 0.0
risks.append(risk_value)
rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true))
aurrrc = np.nanmean(risks)
if not return_counts:
return aurrrc
else:
return aurrrc, rejection_rates, selection_thresholds, risks
def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False):
""" Computes the reliability curve and the expected calibration error [1]_ .
References:
.. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference
on Machine Learning, PMLR 70:1321-1330, 2017.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
num_bins: number of bins.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- ece (float): expected calibration error.
- confidences_in_bins: average confidence in each bin (returned only if return_counts is True).
- accuracies_in_bins: accuracy in each bin (returned only if return_counts is True).
- frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True).
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
num_samples, num_classes = y_prob.shape
top_scores = np.max(y_prob, axis=1)
if y_pred is None:
y_pred = np.argmax(y_prob, axis=1)
if num_classes == 2:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0))
else:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0))
non_boundary_bin_edges = bins_edges[1:-1]
bin_centers = (bins_edges[1:] + bins_edges[:-1])/2
sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges)
num_samples_in_bins = np.zeros(num_bins)
accuracies_in_bins = np.zeros(num_bins)
confidences_in_bins = np.zeros(num_bins)
for bin in range(num_bins):
num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin])
if num_samples_in_bins[bin] > 0:
accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin]
confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin]
ece = np.sum(
num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples
)
frac_samples_in_bins = num_samples_in_bins / num_samples
if not return_counts:
return ece
else:
return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers
def compute_classification_metrics(y_true, y_prob, option='all'):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the [aurrrc, ece, auroc, nll, brier, accuracy] metrics.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
option: string or list of string contained the name of the metrics to be computed.
Returns:
dict: a dictionary containing the computed metrics.
"""
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["aurrrc", "ece", "auroc", "nll", "brier", "accuracy"]
else:
option_list = [option]
if "aurrrc" in option_list:
results["aurrrc"] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob)
if "ece" in option_list:
results["ece"] = expected_calibration_error(y_true=y_true, y_prob=y_prob)
if "auroc" in option_list:
results["auroc"], _ = roc_auc_score(y_true=y_true, y_score=y_prob)
if "nll" in option_list:
results["nll"] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
if "brier" in option_list:
results["brier"] = multiclass_brier_score(y_true=y_true, y_prob=y_prob)
if "accuracy" in option_list:
results["accuracy"] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
return results
def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""], num_bins=10):
"""
Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
plot_label: (optional) list of names identifying each curve.
num_bins: number of bins.
Returns:
tuple:
- ece_list: ece: list containing expected calibration error for each curve.
- accuracies_in_bins_list: list containing binned average accuracies for each curve.
- frac_samples_in_bins_list: list containing binned sample frequencies for each curve.
- confidences_in_bins_list: list containing binned average confidence for each curve.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
ece_list = []
accuracies_in_bins_list = []
frac_samples_in_bins_list = []
confidences_in_bins_list = []
for idx in range(len(plot_label)):
ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx],
y_prob[idx],
y_pred[idx],
num_bins=num_bins,
return_counts=True)
ece_list.append(ece)
accuracies_in_bins_list.append(accuracies_in_bins)
frac_samples_in_bins_list.append(frac_samples_in_bins)
confidences_in_bins_list.append(confidences_in_bins)
fig = plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx])
plt.title("Confidence Histogram")
plt.xlabel("Confidence")
plt.ylabel("Fraction of Samples")
plt.grid()
plt.ylim([0.0, 1.0])
plt.legend()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(bins, accuracies_in_bins_list[idx], 'o-',
label="{} ECE = {:.2f}".format(plot_label[idx], ece_list[idx]))
plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label="Perfect Calibration")
plt.title("Reliability Plot")
plt.xlabel("Confidence")
plt.ylabel("Accuracy")
plt.grid()
plt.legend()
plt.show()
return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list
def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""], risk_func=None,
attributes=None, num_bins=10, subgroup_ids=None):
"""
Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a
subgroup of the samples specified by subgroup_ids.
Returns:
tuple:
- aurrrc_list: list containing the area under risk rejection rate curves.
- rejection_rate_list: list containing the binned rejection rates.
- selection_thresholds_list: list containing the binned selection thresholds.
- risk_list: list containing the binned risks.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
aurrrc_list = []
rejection_rate_list = []
risk_list = []
selection_thresholds_list = []
for idx in range(len(plot_label)):
aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve(
y_true[idx],
y_prob[idx],
y_pred[idx],
selection_scores=selection_scores[idx],
risk_func=risk_func,
attributes=attributes,
num_bins=num_bins,
subgroup_ids=subgroup_ids[idx],
return_counts=True
)
aurrrc_list.append(aursrc)
rejection_rate_list.append(rejection_rates)
risk_list.append(risks)
selection_thresholds_list.append(selection_thresholds)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(rejection_rate_list[idx], risk_list[idx], label="{} AURRRC={:.5f}".format(plot_label[idx], aurrrc_list[idx]))
plt.legend(loc="best")
plt.xlabel("Rejection Rate")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Rejection Rate Plot")
plt.grid()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(selection_thresholds_list[idx], risk_list[idx], label="{}".format(plot_label[idx]))
plt.legend(loc="best")
plt.xlabel("Selection Threshold")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Selection Threshold Plot")
plt.grid()
plt.show()
return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list
|
__init__.py | from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \
compute_classification_metrics, entropy_based_uncertainty_decomposition
from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \
plot_uncertainty_by_feature, plot_picp_by_feature
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
|
__init__.py | from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
|
uncertainty_characteristics_curve.py | from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps, trapz
from sklearn.isotonic import IsotonicRegression
DEFAULT_X_AXIS_NAME = 'excess'
DEFAULT_Y_AXIS_NAME = 'missrate'
class UncertaintyCharacteristicsCurve:
"""
Class with main functions of the Uncertainty Characteristics Curve (UCC).
"""
def __init__(self, normalize=True, precompute_bias_data=True):
"""
:param normalize: set initial axes normalization flag (can be changed via set_coordinates())
:param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based
UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call
if bias-based UCC is not needed.
"""
self.axes_name2idx = {"missrate": 1, "bandwidth": 2, "excess": 3, "deficit": 4}
self.axes_idx2descr = {1: "Missrate", 2: "Bandwidth", 3: "Excess", 4: "Deficit"}
self.x_axis_idx = None
self.y_axis_idx = None
self.norm_x_axis = False
self.norm_y_axis = False
self.std_unit = None
self.normalize = normalize
self.d = None
self.gt = None
self.lb = None
self.ub = None
self.precompute_bias_data = precompute_bias_data
self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize)
def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None):
"""
Assigns user-specified type to the axes and normalization behavior (sticky).
:param x_axis_name: None-> unchanged, or name from self.axes_name2idx
:param y_axis_name: ditto
:param normalize: True/False will activate/deactivate norming for specified axes. Behavior for
Axes_name that are None will not be changed.
Value None will leave norm status unchanged.
Note, axis=='missrate' will never get normalized, even with normalize == True
:return: none
"""
normalize = self.normalize if normalize is None else normalize
if x_axis_name is None and self.x_axis_idx is None:
raise ValueError("ERROR(UCC): x-axis has not been defined.")
if y_axis_name is None and self.y_axis_idx is None:
raise ValueError("ERROR(UCC): y-axis has not been defined.")
if x_axis_name is None and y_axis_name is None and normalize is not None:
# just set normalization on/off for both axes and return
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
return
if x_axis_name is not None:
self.x_axis_idx = self.axes_name2idx[x_axis_name]
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
if y_axis_name is not None:
self.y_axis_idx = self.axes_name2idx[y_axis_name]
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
def set_std_unit(self, std_unit=None):
"""
Sets the UCC's unit to be used when displaying normalized axes.
:param std_unit: if None, the unit will be calculated as stddev of the ground truth data
(ValueError raised if data has not been set at this point)
or set to the user-specified value.
:return:
"""
if std_unit is None: # set it to stddev of data
if self.gt is None:
raise ValueError("ERROR(UCC): No data specified - cannot set stddev unit.")
self.std_unit = np.std(self.gt)
if np.isclose(self.std_unit, 0.):
print("WARN(UCC): data-based stddev is zero - resetting axes unit to 1.")
self.std_unit = 1.
else:
self.std_unit = float(std_unit)
def fit(self, X, gt):
"""
Calculates internal arrays necessary for other methods (plotting, auc, cost minimization).
Re-entrant.
:param X: [numsamples, 3] numpy matrix, or list of numpy matrices.
Col 1: predicted values
Col 2: lower band (deviate) wrt predicted value (always positive)
Col 3: upper band wrt predicted value (always positive)
If list is provided, all methods will output corresponding metrics as lists as well!
:param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X
:return: self
"""
if not isinstance(X, list):
X = [X]
newX = []
for x in X:
assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt))
newX.append(self._sanitize_input(x))
self.d = [gt - x[:, 0] for x in newX]
self.lb = [x[:, 1] for x in newX]
self.ub = [x[:, 2] for x in newX]
self.gt = gt
self.set_std_unit()
self.plotdata_for_scale = []
self.plotdata_for_bias = []
# precompute plotdata:
for i in range(len(self.d)):
self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False))
if self.precompute_bias_data:
self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True))
return self
def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True,
search=('scale', 'bias')):
"""
Find minima of a linear cost function for each component.
Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value.
A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg).
The function returns a 'recipe' how to achieve the corresponding minimum, for each component.
:param x_axis_cost: weight of one unit on x_axis
:param y_axis_cost: weight of one unit on y_axis
:param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be
pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes.
:param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'.
:return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are -
'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to
add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that
minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point).
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if augment_cost_by_normfactor:
if self.norm_x_axis:
x_axis_cost /= self.std_unit
if self.norm_y_axis:
y_axis_cost /= self.std_unit
print("INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f" %
(x_axis_cost, y_axis_cost))
if isinstance(search, tuple):
search = list(search)
if not isinstance(search, list):
search = [search]
min_costs = []
for d in range(len(self.d)):
# original OP cost
m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d])
original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][
self.y_axis_idx]
plotdata = self.plotdata_for_scale[d]
cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_scale_multiplier = plotdata[minidx_scale][0]
mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx]
mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx]
if 'bias' in search:
if not self.precompute_bias_data:
raise ValueError(
"ERROR(UCC): Cannot perform minimization - instantiated without bias data computation")
plotdata = self.plotdata_for_bias[d]
cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_bias_add = plotdata[minidx_bias][0]
mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx]
mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx]
if 'bias' in search and 'scale' in search:
if cost_bias < cost_scale:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'scale' in search:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'bias' in search:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
raise ValueError("(ERROR): Unknown search element (%s) requested." % ",".join(search))
if len(min_costs) < 2:
return min_costs[0]
else:
return min_costs
def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None,
req_critical_value=None, vary_bias=False):
"""
Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns
a list of recipes how to achieve the point (x,y), for each component. If there is only one component,
returns a single recipe dict.
:param req_x_axis_value: requested x value on UCC (normalization status is taken from current display)
:param req_y_axis_value: requested y value on UCC (normalization status is taken from current display)
:param vary_bias: set to True when referring to bias-induced UCC (scale UCC default)
:return: list of dicts (recipes), or a single dict
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1:
raise ValueError("ERROR(UCC): exactly one axis value must be requested at a time.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
recipe = []
for dc in range(len(self.d)):
plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc]
if req_x_axis_value is not None:
tgtidx = self.x_axis_idx
req_value = req_x_axis_value * xnorm
elif req_y_axis_value is not None:
tgtidx = self.y_axis_idx
req_value = req_y_axis_value * ynorm
elif req_critical_value is not None:
req_value = req_critical_value
tgtidx = 0 # first element in plotdata is always the critical value (scale of bias)
else:
raise RuntimeError("Unhandled case")
closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata]))
recipe.append({'operation': ('bias' if vary_bias else 'scale'),
'modvalue': plotdata[closestidx][0],
'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm,
'new_y': plotdata[closestidx][self.y_axis_idx] / ynorm})
if len(recipe) < 2:
return recipe[0]
else:
return recipe
def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2):
"""
Find s minimum cost function value and corresp. position index in plotdata
:param plotdata: liste of tuples
:param idx1: idx of x-axis item within the tuple
:param idx2: idx of y-axis item within the tuple
:param cost1: cost factor for x-axis unit
:param cost2: cost factor for y-axis unit
:return: min cost value, index within plotdata where minimum occurs
"""
raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata]
minidx = np.argmin(raw)
return raw[minidx], minidx
def _sanitize_input(self, x):
"""
Replaces problematic values in input data (e.g, zero error bars)
:param x: single matrix of input data [n, 3]
:return: sanitized version of x
"""
if np.isclose(np.sum(x[:, 1]), 0.):
raise ValueError("ERROR(UCC): Provided lower bands are all zero.")
if np.isclose(np.sum(x[:, 2]), 0.):
raise ValueError("ERROR(UCC): Provided upper bands are all zero.")
for i in [1, 2]:
if any(np.isclose(x[:, i], 0.)):
print("WARN(UCC): some band values are 0. - REPLACING with positive minimum")
m = np.min(x[x[:, i] > 0, i])
x = np.where(np.isclose(x, 0.), m, x)
return x
def _calc_avg_excess(self, d, lb, ub):
"""
Excess is amount an error bar overshoots actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average excess over array
"""
excess = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx])
negidx = np.where(d < 0)[0]
excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx])
return np.mean(excess)
def _calc_avg_deficit(self, d, lb, ub):
"""
Deficit is error bar insufficiency: bar falls short of actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average deficit over array
"""
deficit = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx])
negidx = np.where(d < 0)[0]
deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx])
return np.mean(deficit)
def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0):
"""
Calculates recall at a given scale/bias, average bandwidth and average excess
:param d: delta
:param lb: lower band
:param ub: upper band
:param scale: scale * (x + bias)
:param bias:
:return: miss rate, average bandwidth, avg excess, avg deficit
"""
abslband = scale * np.where((lb + bias) < 0., 0., lb + bias)
absuband = scale * np.where((ub + bias) < 0., 0., ub + bias)
recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d)
avgbandwidth = np.mean([absuband, abslband])
avgexcess = self._calc_avg_excess(d, abslband, absuband)
avgdeficit = self._calc_avg_deficit(d, abslband, absuband)
return 1 - recall, avgbandwidth, avgexcess, avgdeficit
def _calc_plotdata(self, d, lb, ub, vary_bias=False):
"""
Generates data necessary for various UCC metrics.
:param d: delta (predicted - actual) vector
:param ub: upper uncertainty bandwidth (above predicted)
:param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth)
:param vary_bias: True will switch to additive bias instead of scale
:return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit)
"""
# step 1: collect critical scale or bias values
critval = []
for i in range(len(d)):
if not vary_bias:
if d[i] >= 0:
critval.append(d[i] / ub[i])
else:
critval.append(-d[i] / lb[i])
else:
if d[i] >= 0:
critval.append(d[i] - ub[i])
else:
critval.append(-lb[i] - d[i])
critval = sorted(critval)
plotdata = []
for i in range(len(critval)):
if not vary_bias:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
scale=critval[i])
else:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
bias=critval[i])
plotdata.append((critval[i], missrate, bandwidth, excess, deficit))
return plotdata
def get_AUUCC(self, vary_bias=False, aucfct="trapz", partial_x=None, partial_y=None):
"""
returns approximate area under the curve on current coordinates, for each component.
:param vary_bias: False == varies scale, True == varies bias
:param aucfct: specifies AUC integrator (can be "trapz", "simps")
:param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC.
The interval bounds refer to axes as visualized (ie. potentially normed)
:param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None.
:return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if partial_x is not None and partial_y is not None:
raise ValueError("ERROR(UCC): partial_x and partial_y can not be specified at the same time.")
assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2))
assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2))
# find starting point (where the x axis value starts to actually change)
rv = []
# do this for individual streams
xind = self.x_axis_idx
aucfct = simps if aucfct == "simps" else trapz
for s in range(len(self.d)):
plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s]
prev = plotdata[0][xind]
t = 1
cval = plotdata[t][xind]
while cval == prev and t < len(plotdata) - 1:
t += 1
prev = cval
cval = plotdata[t][xind]
startt = t - 1 # from here, it's a valid function
endtt = len(plotdata)
if startt >= endtt - 2:
rvs = 0. # no area
else:
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)]
x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)]
if partial_x is not None:
from_i = self._find_closest_index(partial_x[0], x)
to_i = self._find_closest_index(partial_x[1], x) + 1
elif partial_y is not None:
from_i = self._find_closest_index(partial_y[0], y)
to_i = self._find_closest_index(partial_y[1], y)
if from_i > to_i: # y is in reverse order
from_i, to_i = to_i, from_i
to_i += 1 # as upper bound in array indexing
else:
from_i = 0
to_i = len(x)
to_i = min(to_i, len(x))
if to_i < from_i:
raise ValueError("ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data.")
if to_i - from_i < 2:
raise RuntimeError("ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified")
rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i])
rv.append(rvs)
if len(rv) < 2:
return rv[0]
else:
return rv
@ staticmethod
def _find_closest_index(value, array):
"""
Returns an index of the 'array' element closest in value to 'value'
:param value:
:param array:
:return:
"""
return np.argmin(np.abs(np.asarray(array)-value))
def _get_single_OP(self, d, lb, ub, scale=1., bias=0.):
"""
Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: single tuple (x point, y point, unit of x, unit of y)
"""
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias)
op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here)
return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm)
def get_OP(self, scale=1., bias=0.):
"""
Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only
1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
op = []
for dc in range(len(self.d)):
op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias))
if len(op) < 2:
return op[0]
else:
return op
def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None,
xlim=None, ylim=None, **kwargs):
""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown
if there are multiple data components (via fit())
:param titlestr: Plot title string
:param syslabel: list is label strings to appear in the plot legend. Can be single, if one component.
:param outfn: base name of an image file to be created (will append .png before creating)
:param vary_bias: True will switch to varying additive bias (default is multiplicative scale)
:param markers: None or a list of marker styles to be used for each curve.
List must be same or longer than number of components.
Markers can be one among these ['o', 's', 'v', 'D', '+'].
:param xlim: tuples or lists of specifying the range for the x axis, or None (auto)
:param ylim: tuples or lists of specifying the range for the y axis, or None (auto)
:param `**kwargs`: Additional arguments passed to the main plot call.
:return: list of areas under the curve (or single area, if one data component)
list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit)
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if not isinstance(syslabel, list):
syslabel = [syslabel]
assert (len(syslabel) == len(self.d))
assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d)))
# main plot of (possibly multiple) datasets
plt.figure()
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
op_info = []
auucc = self.get_AUUCC(vary_bias=vary_bias)
auucc = [auucc] if not isinstance(auucc, list) else auucc
for s in range(len(self.d)):
# original operating point
x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s])
op_info.append((x_op, y_op, x_unit, y_unit))
# display chart
plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s]
axisX_data = [i[self.x_axis_idx] / xnorm for i in plotdata]
axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata]
marker = None
if markers is not None: marker = markers[s]
p = plt.plot(axisX_data, axisY_data, label=syslabel[s] + (" (AUC=%.3f)" % auucc[s]), marker=marker, **kwargs)
if s + 1 == len(self.d):
oplab = 'OP'
else:
oplab = None
plt.plot(x_op, y_op, marker='o', color=p[0].get_color(), label=oplab, markerfacecolor='w',
markeredgewidth=1.5, markeredgecolor=p[0].get_color())
axisX_label = self.axes_idx2descr[self.x_axis_idx]
axisY_label = self.axes_idx2descr[self.y_axis_idx]
axisX_units = "(raw)" if np.isclose(xnorm, 1.0) else "[in std deviations]"
axisY_units = "(raw)" if np.isclose(ynorm, 1.0) else "[in std deviations]"
axisX_label += ' ' + axisX_units
axisY_label += ' ' + axisY_units
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
plt.xlabel(axisX_label)
plt.ylabel(axisY_label)
plt.legend()
plt.title(titlestr)
plt.grid()
if outfn is None:
plt.show()
else:
plt.savefig(outfn)
if len(auucc) < 2:
auucc = auucc[0]
op_info = op_info[0]
return auucc, op_info
|
heteroscedastic_mlp.py | import torch
import torch.nn.functional as F
from uq360.models.noise_models.heteroscedastic_noise_models import GaussianNoise
class GaussianNoiseMLPNet(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(GaussianNoiseMLPNet, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
self.noise_layer = GaussianNoise()
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
def loss(self, y_true=None, mu_pred=None, log_var_pred=None):
return self.noise_layer.loss(y_true, mu_pred, log_var_pred, reduce_mean=True) |
layer_utils.py | """
Contains implementations of various utilities used by Horseshoe Bayesian layers
"""
import numpy as np
import torch
from torch.nn import Parameter
td = torch.distributions
gammaln = torch.lgamma
def diag_gaussian_entropy(log_std, D):
return 0.5 * D * (1.0 + torch.log(2 * np.pi)) + torch.sum(log_std)
def inv_gamma_entropy(a, b):
return torch.sum(a + torch.log(b) + torch.lgamma(a) - (1 + a) * torch.digamma(a))
def log_normal_entropy(log_std, mu, D):
return torch.sum(log_std + mu + 0.5) + (D / 2) * np.log(2 * np.pi)
class InvGammaHalfCauchyLayer(torch.nn.Module):
"""
Uses the inverse Gamma parameterization of the half-Cauchy distribution.
a ~ C^+(0, b) <==> a^2 ~ IGamma(0.5, 1/lambda), lambda ~ IGamma(0.5, 1/b^2), where lambda is an
auxiliary latent variable.
Uses a factorized variational approximation q(ln a^2)q(lambda) = N(mu, sigma^2) IGamma(ahat, bhat).
This layer places a half Cauchy prior on the scales of each output node of the layer.
"""
def __init__(self, out_features, b):
"""
:param out_fatures: number of output nodes in the layer.
:param b: scale of the half Cauchy
"""
super(InvGammaHalfCauchyLayer, self).__init__()
self.b = b
self.out_features = out_features
# variational parameters for q(ln a^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
# self.log_sigma = torch.FloatTensor(out_features)
# variational parameters for q(lambda). These will be updated via fixed point updates, hence not parameters.
self.ahat = torch.FloatTensor([1.]) # The posterior parameter is always 1.
self.bhat = torch.ones(out_features) * (1.0 / self.b ** 2)
self.const = torch.FloatTensor([0.5])
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
# sample from half cauchy and log to initialize the mean of the log normal
sample = np.abs(self.b * (np.random.randn(self.out_features) / np.random.randn(self.out_features)))
self.mu.data = torch.FloatTensor(np.log(sample))
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(a^2 | lambda)] + E[ln p(lambda)]
"""
expected_a_given_lambda = -gammaln(self.const) - 0.5 * (torch.log(self.bhat) - torch.digamma(self.ahat)) + (
-0.5 - 1.) * self.mu - torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) * (self.ahat / self.bhat)
expected_lambda = -gammaln(self.const) - 2 * 0.5 * np.log(self.b) + (-self.const - 1.) * (
torch.log(self.bhat) - torch.digamma(self.ahat)) - (1. / self.b ** 2) * (self.ahat / self.bhat)
return torch.sum(expected_a_given_lambda) + torch.sum(expected_lambda)
def entropy(self):
"""
Computes entropy of q(ln a^2) and q(lambda)
"""
return self.entropy_lambda() + self.entropy_a2()
def entropy_lambda(self):
return inv_gamma_entropy(self.ahat, self.bhat)
def entropy_a2(self):
return log_normal_entropy(self.log_sigma, self.mu, self.out_features)
def kl(self):
"""
Computes KL(q(ln(a^2)q(lambda) || IG(a^2 | 0.5, 1/lambda) IG(lambda | 0.5, 1/b^2))
"""
return -self.expectation_wrt_prior() - self.entropy()
def fixed_point_updates(self):
# update lambda moments
self.bhat = torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + (1. / self.b ** 2)
class InvGammaLayer(torch.nn.Module):
"""
Approximates the posterior of c^2 with prior IGamma(c^2 | a , b)
using a log Normal approximation q(ln c^2) = N(mu, sigma^2)
"""
def __init__(self, a, b, out_features=1):
super(InvGammaLayer, self).__init__()
self.a = torch.FloatTensor([a])
self.b = torch.FloatTensor([b])
# variational parameters for q(ln c^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
self.out_features = out_features
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
self.mu.data = torch.log(self.b / (self.a + 1) * torch.ones(self.out_features)) # initialize at the mode
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(c^2 | a, b)]
"""
# return self.c_a * np.log(self.c_b) - gammaln(self.c_a) + (
# - self.c_a - 1) * c_mu - self.c_b * Ecinv
return self.a * torch.log(self.b) - gammaln(self.a) + (- self.a - 1) \
* self.mu - self.b * torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2)
def entropy(self):
return log_normal_entropy(self.log_sigma, self.mu, 1)
def kl(self):
"""
Computes KL(q(ln(c^2) || IG(c^2 | a, b))
"""
return -self.expectation_wrt_prior().sum() - self.entropy()
|
layers.py | """
Contains implementations of various Bayesian layers
"""
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from uq360.models.bayesian_neural_networks.layer_utils import InvGammaHalfCauchyLayer, InvGammaLayer
td = torch.distributions
def reparam(mu, logvar, do_sample=True, mc_samples=1):
if do_sample:
std = torch.exp(0.5 * logvar)
eps = torch.FloatTensor(std.size()).normal_()
sample = mu + eps * std
for _ in np.arange(1, mc_samples):
sample += mu + eps * std
return sample / mc_samples
else:
return mu
class BayesianLinearLayer(torch.nn.Module):
"""
Affine layer with N(0, v/H) or N(0, user specified v) priors on weights and
fully factorized variational Gaussian approximation
"""
def __init__(self, in_features, out_features, cuda=False, init_weight=None, init_bias=None, prior_stdv=None):
super(BayesianLinearLayer, self).__init__()
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
# weight mean params
self.weights = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(out_features))
# weight variance params
self.weights_logvar = Parameter(torch.Tensor(out_features, in_features))
self.bias_logvar = Parameter(torch.Tensor(out_features))
# numerical stability
self.fudge_factor = 1e-8
if not prior_stdv:
# We will use a N(0, 1/num_inputs) prior over weights
self.prior_stdv = torch.FloatTensor([1. / np.sqrt(self.weights.size(1))])
else:
self.prior_stdv = torch.FloatTensor([prior_stdv])
# self.prior_stdv = torch.Tensor([1. / np.sqrt(1e+3)])
self.prior_mean = torch.FloatTensor([0.])
# for Bias use a prior of N(0, 1)
self.prior_bias_stdv = torch.FloatTensor([1.])
self.prior_bias_mean = torch.FloatTensor([0.])
# init params either random or with pretrained net
self.init_parameters(init_weight, init_bias)
def init_parameters(self, init_weight, init_bias):
# init means
if init_weight is not None:
self.weights.data = torch.Tensor(init_weight)
else:
self.weights.data.normal_(0, np.float(self.prior_stdv.numpy()[0]))
if init_bias is not None:
self.bias.data = torch.Tensor(init_bias)
else:
self.bias.data.normal_(0, 1)
# init variances
self.weights_logvar.data.normal_(-9, 1e-2)
self.bias_logvar.data.normal_(-9, 1e-2)
def forward(self, x, do_sample=True, scale_variances=False):
# local reparameterization trick
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
if scale_variances:
activ = reparam(mu_activations, var_activations.log() - np.log(self.in_features), do_sample=do_sample)
else:
activ = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return activ
def kl(self):
"""
KL divergence (q(W) || p(W))
:return:
"""
weights_logvar = self.weights_logvar
kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \
(weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (
2 * self.prior_stdv.pow(2)) - 0.5
kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \
(self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (
2 * self.prior_bias_stdv.pow(2)) \
- 0.5
return kld_weights.sum() + kld_bias.sum()
class HorseshoeLayer(BayesianLinearLayer):
"""
Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k
are vectors of all weights incident into the unit
"""
def __init__(self, in_features, out_features, cuda=False, scale=1.):
super(HorseshoeLayer, self).__init__(in_features, out_features)
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.)
self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale)
# prior on beta is N(0, I) when employing non centered parameterization
self.prior_stdv = torch.Tensor([1])
self.prior_mean = torch.Tensor([0.])
def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample scales
scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu)
scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2)
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return scale_sample * activ_sample
def kl(self):
return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl()
def fixed_point_updates(self):
self.nodescales.fixed_point_updates()
self.layerscale.fixed_point_updates()
class RegularizedHorseshoeLayer(HorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b).
c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b)
def forward(self, x, do_sample=True, **kwargs):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample regularized scales
scale_mean = self.nodescales.mu + self.layerscale.mu
scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp()
regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample)
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return torch.sqrt(regularized_scale_sample) * activ_sample
def kl(self):
return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl()
class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b).
c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe
Note that we now have a per-node c_k.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features)
|
misc.py | import numpy as np
import torch
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision
def compute_test_ll(y_test, y_pred_samples, std_y=1.):
"""
Computes test log likelihoods = (1 / Ntest) * \sum_n p(y_n | x_n, D_train)
:param y_test: True y
:param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples
q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train)
:param std_y: True std of y (assumed known)
"""
S, _ = y_pred_samples.shape
noise = GaussianNoiseFixedPrecision(std_y=std_y)
ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False)
ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples
return torch.mean(ll) # mean over test points
|
horseshoe_mlp.py | from abc import ABC
import numpy as np
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class HshoeBNN(nn.Module, ABC):
"""
Bayesian neural network with Horseshoe layers.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1,
hshoe_scale=1e-1, use_reg_hshoe=False):
if use_reg_hshoe:
layer = RegularizedHorseshoeLayer
else:
layer = HorseshoeLayer
super(HshoeBNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes))
self.fc_out = BayesianLinearLayer(num_nodes, op_dim)
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def fixed_point_updates(self):
if hasattr(self.fc1, 'fixed_point_updates'):
self.fc1.fixed_point_updates()
if hasattr(self.fc_out, 'fixed_point_updates'):
self.fc_out.fixed_point_updates()
for layer in self.fc_hidden:
if hasattr(layer, 'fixed_point_updates'):
layer.fixed_point_updates()
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class HshoeRegressionNet(HshoeBNN, ABC):
"""
Horseshoe net with N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class HshoeClassificationNet(HshoeBNN, ABC):
"""
Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w()) / num_batches - Elik
return neg_elbo
|
bayesian_mlp.py | from abc import ABC
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class BayesianNN(nn.Module, ABC):
"""
Bayesian neural network with zero mean Gaussian priors over weights.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50,
activation_type='relu', num_layers=1):
super(BayesianNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes,)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes, ))
self.fc_out = layer(num_nodes, op_dim, )
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class BayesianRegressionNet(BayesianNN, ABC):
"""
Bayesian neural net with N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class BayesianClassificationNet(BayesianNN, ABC):
"""
Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = self.kl_divergence_w() / num_batches - Elik
return neg_elbo
|
homoscedastic_noise_models.py | import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b).
Uses a variational approximation; q(lambda) = Gamma(ahat, bhat)
"""
def __init__(self, a0=6, b0=6, cuda=False):
super(GaussianNoiseGammaPrecision, self).__init__()
self.cuda = cuda
self.a0 = a0
self.b0 = b0
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
# variational parameters
self.ahat = Parameter(torch.FloatTensor([10.]))
self.bhat = Parameter(torch.FloatTensor([3.]))
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * E_q(\lambda)[ln N (y_pred | y_true, \lambda^-1)], where q(lambda) = Gamma(ahat, bhat)
:param y_pred:
:param y_true:
:return:
"""
n = y_pred.shape[0]
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \
- 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum())
def kl(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \
self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat
def get_noise_var(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (bhat / ahat).data.numpy()[0]
class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), sigma_y**2); known sigma_y
"""
def __init__(self, std_y=1., cuda=False):
super(GaussianNoiseFixedPrecision, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
self.sigma_y = std_y
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * ln N (y_pred | y_true, sigma_y**2)
:param y_pred:
:param y_true:
:return:
"""
ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)
return -ll.sum(dim=0)
def get_noise_var(self):
return self.sigma_y ** 2 |
heteroscedastic_noise_models.py | import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoise(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f_\mu(x, w), f_\sigma^2(x, w))
"""
def __init__(self, cuda=False):
super(GaussianNoise, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True):
"""
computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred))
:param y_true:
:param mu_pred:
:param log_var_pred:
:return:
"""
var_pred = transform(log_var_pred)
ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2)
if reduce_mean:
return -ll.mean(dim=0)
else:
return -ll.sum(dim=0)
def get_noise_var(self, log_var_pred):
return transform(log_var_pred)
|
noisemodel.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class AbstractNoiseModel(ABC):
""" Abstract class. All noise models inherit from here.
"""
def __init__(self, *argv, **kwargs):
""" Initialize an AbstractNoiseModel object.
"""
@abc.abstractmethod
def loss(self, *argv, **kwargs):
""" Compute loss given predictions and groundtruth labels
"""
raise NotImplementedError
@abc.abstractmethod
def get_noise_var(self, *argv, **kwargs):
"""
Return the current estimate of noise variance
"""
raise NotImplementedError
|
builtinuq.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class BuiltinUQ(ABC):
""" BuiltinUQ is the base class for any algorithm that has UQ built into it.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def fit(self, *argv, **kwargs):
""" Learn the UQ related parameters..
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
|
posthocuq.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class PostHocUQ(ABC):
""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def _process_pretrained_model(self, *argv, **kwargs):
""" Method to process the pretrained model that requires UQ.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self):
"""
This method should not take any arguments and returns a dict of the __init__ parameters.
"""
raise NotImplementedError
|
__init__.py | from .ucc_recalibration import UCCRecalibration
|
ucc_recalibration.py | from collections import namedtuple
from uq360.algorithms.posthocuq import PostHocUQ
from uq360.utils.misc import form_D_for_auucc
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
class UCCRecalibration(PostHocUQ):
""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve.
"""
def __init__(self, base_model):
"""
Args:
base_model: pretrained model to be recalibrated.
"""
super(UCCRecalibration).__init__()
self.base_model = self._process_pretrained_model(base_model)
self.ucc = None
def get_params(self, deep=True):
return {"base_model": self.base_model}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
"""
Fit the Uncertainty Characteristics Curve.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
self.ucc = UncertaintyCharacteristicsCurve()
self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze())
return self
def predict(self, X, missrate=0.05):
"""
Generate prediction and uncertainty bounds for data X.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
missrate: desired missrate of the new operating point, set to 0.05 by default.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False)
new_scale = C['modvalue']
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
if C['operation'] == 'bias':
calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width
else:
calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper)
return res
|
__init__.py | from .classification_calibration import ClassificationCalibration
|
classification_calibration.py | from collections import namedtuple
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
from sklearn.preprocessing import LabelEncoder
from uq360.utils.misc import DummySklearnEstimator
from uq360.algorithms.posthocuq import PostHocUQ
class ClassificationCalibration(PostHocUQ):
"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows
non-sklearn models to be calibrated.
"""
def __init__(self, num_classes, fit_mode="features", method='isotonic', base_model_prediction_func=None):
"""
Args:
num_classes: number of classes.
fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores,
useful when these are precomputed.
method: isotonic or sigmoid.
base_model_prediction_func: the function that takes in the input features and produces base model's
probability scores. This is ignored when operating in `probs` mode.
"""
super(ClassificationCalibration).__init__()
if fit_mode == "probs":
# In this case, the fit assumes that it receives the probability scores of the base model.
# create a dummy estimator
self.base_model = DummySklearnEstimator(num_classes, lambda x: x)
else:
self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func)
self.method = method
def get_params(self, deep=True):
return {"num_classes": self.num_classes, "fit_mode": self.fit_mode, "method": self.method,
"base_model_prediction_func": self.base_model_prediction_func}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
""" Fits calibration model using the provided calibration set.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.base_model.label_encoder_ = LabelEncoder().fit(y)
self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model,
cv="prefit",
method=self.method)
self.calib_model.fit(X, y)
return self
def predict(self, X):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
"""
y_prob = self.calib_model.predict_proba(X)
if len(np.shape(y_prob)) == 1:
y_pred_labels = y_prob > 0.5
else:
y_pred_labels = np.argmax(y_prob, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob'])
res = Result(y_pred_labels, y_prob)
return res
|
auxiliary_interval_predictor.py | from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class _MLPNet_Main(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Main, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
class _MLPNet_Aux(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Aux, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
log_var = self.fc_log_var(x)
return log_var
class AuxiliaryIntervalPredictor(BuiltinUQ):
""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model.
References:
.. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep
models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on
Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079
"""
def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The model type used to build the main model and the auxiliary model. Currently supported values
are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user
provide `main_model` and `aux_model`.
main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance.
aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(AuxiliaryIntervalPredictor).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.main_model = _MLPNet_Main(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
self.aux_model = _MLPNet_Aux(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.main_model = main_model
self.aux_model = aux_model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "main_model": self.main_model,
"aux_model": self.aux_model, "device": self.device, "verbose": self.verbose}
def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux):
r = torch.abs(y_true - y_pred_mu)
# + 0.5 * y_pred_log_var +
loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \
self.config["lambda_match"] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux)))
return loss
def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux):
deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux)
upper = y_pred_mu + deltau
lower = y_pred_mu - deltal
width = upper - lower
r = torch.abs(y_true - y_pred_mu)
emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000))
loss_emce = torch.abs(self.config["calibration_alpha"]-emce)
loss_noise = torch.mean(torch.abs(0.5 * width - r))
loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true))
#print(emce)
return loss_emce + self.config["lambda_noise"] * loss_noise + self.config["lambda_sharpness"] * loss_sharpness
def fit(self, X, y):
""" Fit the Auxiliary Interval Predictor model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer_main_model = torch.optim.Adam(self.main_model.parameters(), lr=self.config["lr"])
optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config["lr"])
for it in range(self.config["num_outer_iters"]):
# Train the main model
for epoch in range(self.config["num_main_iters"]):
avg_mean_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.main_model.train()
self.aux_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux)
optimizer_main_model.zero_grad()
main_loss.backward()
optimizer_main_model.step()
avg_mean_model_loss += main_loss.item()/len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, main_model_loss = {}".format(it, epoch, avg_mean_model_loss))
# Train the auxiliary model
for epoch in range(self.config["num_aux_iters"]):
avg_aux_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.aux_model.train()
self.main_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux)
optimizer_aux_model.zero_grad()
aux_loss.backward()
optimizer_aux_model.step()
avg_aux_model_loss += aux_loss.item() / len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, aux_model_loss = {}".format(it, epoch, avg_aux_model_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.main_model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
|
__init__.py | from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor
|
bnn.py | import copy
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.utils.data as data_utils
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp
class BnnRegression(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression.
References:
.. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. "Structured variational learning of Bayesian neural
networks with horseshoe priors." International Conference on Machine Learning. PMLR, 2018.
"""
def __init__(self, config, prior="Gaussian"):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnRegression, self).__init__()
self.config = config
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config}
def fit(self, X, y):
""" Fit the BNN regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
neg_elbo[epoch] = loss.item()
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0],
self.net.get_noise_var()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
mc_samples: Number of Monte-Carlo samples.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
epistemic_out = np.zeros([mc_samples, X.shape[0]])
total_out = np.zeros([mc_samples, X.shape[0]])
for s in np.arange(mc_samples):
pred = self.net(X).data.numpy().ravel()
epistemic_out[s] = pred
total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0])
y_total_std = np.std(total_out, axis=0)
y_epi_std = np.std(epistemic_out, axis=0)
y_mean = np.mean(total_out, axis=0)
y_lower = y_mean - 2 * y_total_std
y_upper = y_mean + 2 * y_total_std
y_epi_lower = y_mean - 2 * y_epi_std
y_epi_upper = y_mean + 2 * y_epi_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',))
res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
class BnnClassification(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification.
"""
def __init__(self, config, prior="Gaussian", device=None):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnClassification, self).__init__()
self.config = config
self.device = device
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
if "batch_size" not in self.config:
self.config["batch_size"] = 50
self.net = self.net.to(device)
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config, "device": self.device}
def fit(self, X=None, y=None, train_loader=None):
""" Fits BNN regression model.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Ignored if train_loader is not None.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Ignored if train_loader is not None.
train_loader: pytorch train_loader object.
Returns:
self
"""
if train_loader is None:
train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long())
train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True)
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
avg_loss = 0.0
for batch_x, batch_y in train_loader:
loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
avg_loss += loss.item()
neg_elbo[epoch] = avg_loss / len(train_loader)
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
mc_samples: Number of Monte-Carlo samples.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
y_prob_var: ndarray of shape (n_samples,)
Variance of the prediction on the test points.
y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes)
Samples from the predictive distribution.
"""
X = torch.Tensor(X)
y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)]
y_prob_samples_stacked = np.stack(y_prob_samples)
prob_mean = np.mean(y_prob_samples_stacked, 0)
prob_var = np.std(y_prob_samples_stacked, 0) ** 2
if len(np.shape(prob_mean)) == 1:
y_pred_labels = prob_mean > 0.5
else:
y_pred_labels = np.argmax(prob_mean, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples'])
res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples)
return res
|
homoscedastic_gaussian_process_regression.py | from collections import namedtuple
import botorch
import gpytorch
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.utils.transforms import normalize
from gpytorch.constraints import GreaterThan
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class HomoscedasticGPRegression(BuiltinUQ):
""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise.
References:
.. [1] https://botorch.org/api/models.html#singletaskgp
"""
def __init__(self,
kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
likelihood=None,
config=None):
"""
Args:
kernel: gpytorch kernel function with default set to `RBFKernel` with output scale.
likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`.
config: dictionary containing the config parameters for the model.
"""
super(HomoscedasticGPRegression).__init__()
self.config = config
self.kernel = kernel
self.likelihood = likelihood
self.model = None
self.scaler = StandardScaler()
self.X_bounds = None
def get_params(self, deep=True):
return {"kernel": self.kernel, "likelihood": self.likelihood, "config": self.config}
def fit(self, X, y, **kwargs):
"""
Fit the GP Regression model.
Additional arguments relevant for SingleTaskGP fitting can be passed to this function.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
**kwargs: Additional arguments relevant for SingleTaskGP fitting.
Returns:
self
"""
y = self.scaler.fit_transform(y)
X, y = torch.tensor(X), torch.tensor(y)
self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]),
X.max() * torch.ones(X.shape[1])])
X = normalize(X, X_bounds)
model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs)
model_homo.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e-5))
model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo)
botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik)
model_homo_marginal_log_lik.eval()
self.model = model_homo_marginal_log_lik
self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze()
return self
def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
X = torch.tensor(X)
X_test_norm = normalize(X, self.X_bounds)
self.model.eval()
with torch.no_grad():
posterior = self.model.model.posterior(X_test_norm)
y_mean = posterior.mean
#y_epi_std = torch.sqrt(posterior.variance)
y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region()
predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True)
#y_std = torch.sqrt(predictive_posterior.variance)
y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region()
y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \
self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\
self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\
self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\
self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze()
y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0
y_std = (y_upper_total - y_lower_total) / 4.0
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',))
res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
|
__init__.py | from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression |
quantile_regression.py | from collections import namedtuple
from sklearn.ensemble import GradientBoostingRegressor
from uq360.algorithms.builtinuq import BuiltinUQ
class QuantileRegression(BuiltinUQ):
"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile
to obtain the prediction intervals.
"""
def __init__(self, model_type="gbr", config=None):
"""
Args:
model_type: The base model used for predicting a quantile. Currently supported values are [gbr].
gbr is sklearn GradientBoostingRegressor.
config: dictionary containing the config parameters for the model.
"""
super(QuantileRegression).__init__()
if config is not None:
self.config = config
else:
self.config = {}
if "alpha" not in self.config:
self.config["alpha"] = 0.95
if model_type == "gbr":
self.model_type = model_type
self.model_mean = GradientBoostingRegressor(
loss='ls',
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_upper = GradientBoostingRegressor(
loss='quantile',
alpha=self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_lower = GradientBoostingRegressor(
loss='quantile',
alpha=1.0 - self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"])
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config}
def fit(self, X, y):
""" Fit the Quantile Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.model_mean.fit(X, y)
self.model_lower.fit(X, y)
self.model_upper.fit(X, y)
return self
def predict(self, X):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_mean = self.model_mean.predict(X)
y_lower = self.model_lower.predict(X)
y_upper = self.model_upper.predict(X)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
|
__init__.py | from .quantile_regression import QuantileRegression
|
__init__.py | from .infinitesimal_jackknife import InfinitesimalJackknife
|
infinitesimal_jackknife.py | from collections import namedtuple
import numpy as np
from uq360.algorithms.posthocuq import PostHocUQ
class InfinitesimalJackknife(PostHocUQ):
"""
Performs a first order Taylor series expansion around MLE / MAP fit.
Requires the model being probed to be twice differentiable.
"""
def __init__(self, params, gradients, hessian, config):
""" Initialize IJ.
Args:
params: MLE / MAP fit around which uncertainty is sought. d*1
gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n
hessian: Hessian evaluated at the MLE / MAP fit. d*d
"""
super(InfinitesimalJackknife).__init__()
self.params_one = params
self.gradients = gradients
self.hessian = hessian
self.d, self.n = gradients.shape
self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients)
self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients)
self.w_one = np.ones([self.n])
self.config = config
def get_params(self, deep=True):
return {"params": self.params, "config": self.config, "gradients": self.gradients,
"hessian": self.hessian}
def _process_pretrained_model(self, *argv, **kwargs):
pass
def get_parameter_uncertainty(self):
if (self.config['resampling_strategy'] == "jackknife") or (self.config['resampling_strategy'] == "jackknife+"):
w_query = np.ones_like(self.w_one)
resampled_params = np.zeros([self.n, self.d])
for i in np.arange(self.n):
w_query[i] = 0
resampled_params[i] = self.ij(w_query)
w_query[i] = 1
return np.cov(resampled_params), resampled_params
elif self.config['resampling_strategy'] == "bootstrap":
pass
else:
raise NotImplementedError("Only jackknife, jackknife+, and bootstrap resampling strategies are supported")
def predict(self, X, model):
"""
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
model: model object, must implement a set_parameters function
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
n, _ = X.shape
y_all = model.predict(X)
_, d_out = y_all.shape
params_cov, params = self.get_parameter_uncertainty()
if d_out > 1:
print("Quantiles are computed independently for each dimension. May not be accurate.")
y = np.zeros([params.shape[0], n, d_out])
for i in np.arange(params.shape[0]):
model.set_parameters(params[i])
y[i] = model.predict(X)
y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0)
y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0)
y_mean = y.mean(axis=0)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
def ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T
def approx_ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T |
blackbox_metamodel_classification.py | import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelClassification(PostHocUQ):
""" Extracts confidence scores from black-box classification models using a meta-model [4]_ .
References:
.. [4] Chen, Tongfei, et al. "Confidence scoring using whitebox meta-models with linear classifier probes."
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
""" Instantiates a model by name passed in 'mdltype'.
Args:
mdltype: string with name (must be supported)
config: dict with args passed in the instantiation call
Returns:
mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'lr':
mdl = LogisticRegression(**config)
elif mdltype == 'gbm':
mdl = GradientBoostingClassifier(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance.
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'),
(3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelClassification).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbm'
self.meta_model_default = 'lr'
self.base_config_default = {'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def _process_pretrained_model(self, X, y_hat_proba):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat_proba: [nsamples, nclasses]
:return: array with new features [nsamples, newdim]
"""
assert (len(y_hat_proba.shape) == 2)
assert (X.shape[0] == y_hat_proba.shape[0])
# sort the probs sample by sample
faux1 = np.sort(y_hat_proba, axis=-1)
# add delta between top and second candidate
faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1)
return np.hstack([X, faux1, faux2])
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model,
array-like of shape (n_samples, n_features).
Features vectors of the training data.
:param y: ground truth for the base model,
array-like of shape (n_samples,)
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert (len(meta_train_data) == 2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta_proba = self.base_model.predict_proba(X_meta)
# determine correct-incorrect outcome - these are targets for the meta model trainer
# y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8)
y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# get input features for meta training
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba)
# train meta model to predict 'correct' vs. 'incorrect' of the base
self.meta_model.fit(X_meta_in, y_hat_meta_targets)
return self
def predict(self, X):
"""
Generate a base prediction along with uncertainty/confidence for data X.
:param X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
:return: namedtuple: A namedtuple that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_score: ndarray of shape (n_samples,)
Confidence score the test points.
"""
y_hat_proba = self.base_model.predict_proba(X)
y_hat = np.argmax(y_hat_proba, axis=-1)
X_meta_in = self._process_pretrained_model(X, y_hat_proba)
z_hat = self.meta_model.predict_proba(X_meta_in)
index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome
Result = namedtuple('res', ['y_pred', 'y_score'])
res = Result(y_hat, z_hat[:, index_of_class_1])
return res
|
__init__.py | from .blackbox_metamodel_regression import BlackboxMetamodelRegression
from .blackbox_metamodel_classification import BlackboxMetamodelClassification
|
blackbox_metamodel_regression.py | import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
|
__init__.py | from .heteroscedastic_regression import HeteroscedasticRegression |
heteroscedastic_regression.py | from collections import namedtuple
import numpy as np
import torch
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet
np.random.seed(42)
torch.manual_seed(42)
class HeteroscedasticRegression(BuiltinUQ):
""" Wrapper for heteroscedastic regression. We learn to predict targets given features,
assuming that the targets are noisy and that the amount of noise varies between data points.
https://en.wikipedia.org/wiki/Heteroscedasticity
"""
def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The base model architecture. Currently supported values are [mlp].
mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the
mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n))
model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(HeteroscedasticRegression).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.model = _MLPNet(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.model = model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "model": self.model,
"device": self.device, "verbose": self.verbose}
def _loss(self, y_true, y_pred_mu, y_pred_log_var):
return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 +
0.5 * y_pred_log_var)
def fit(self, X, y):
""" Fit the Heteroscedastic Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
for epoch in range(self.config["num_epochs"]):
avg_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.model.train()
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()/len(dataset_loader)
if self.verbose:
print("Epoch: {}, loss = {}".format(epoch, avg_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
|
__init__.py | from .meps_dataset import MEPSDataset
|
meps_dataset.py | # Adapted from https://github.com/Trusted-AI/AIX360/blob/master/aix360/datasets/meps_dataset.py
# Utilization target is kept as a continuous target.
import os
import pandas as pd
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 19
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION.
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 19]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION'})
df = df[['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION','PERWT15F']]
return df
class MEPSDataset():
"""
The Medical Expenditure Panel Survey (MEPS) [#]_ data consists of large scale surveys of families and individuals,
medical providers, and employers, and collects data on health services used, costs & frequency of services,
demographics, health status and conditions, etc., of the respondents.
This specific dataset contains MEPS survey data for calendar year 2015 obtained in rounds 3, 4, and 5 of Panel 19,
and rounds 1, 2, and 3 of Panel 20.
See :file:`uq360/datasets/data/meps_data/README.md` for more details on the dataset and instructions on downloading/processing the data.
References:
.. [#] `Medical Expenditure Panel Survey data <https://meps.ahrq.gov/mepsweb/>`_
"""
def __init__(self, custom_preprocessing=default_preprocessing, dirpath=None):
self._dirpath = dirpath
if not self._dirpath:
self._dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'meps_data')
self._filepath = os.path.join(self._dirpath, 'h181.csv')
try:
df = pd.read_csv(self._filepath, sep=',', na_values=[])
except IOError as err:
print("IOError: {}".format(err))
print("To use this class, please place the heloc_dataset.csv:")
print("file, as-is, in the folder:")
print("\n\t{}\n".format(os.path.abspath(os.path.join(
os.path.abspath(__file__), 'data', 'meps_data'))))
import sys
sys.exit(1)
if custom_preprocessing:
self._data = custom_preprocessing(df)
def data(self):
return self._data |
logistic_regression.py | import autograd
import autograd.numpy as np
import numpy.random as npr
import scipy.optimize
sigmoid = lambda x: 0.5 * (np.tanh(x / 2.) + 1)
get_num_train = lambda inputs: inputs.shape[0]
logistic_predictions = lambda params, inputs: sigmoid(np.dot(inputs, params))
class LogisticRegression:
def __init__(self):
self.params = None
def set_parameters(self, params):
self.params = params
def predict(self, X):
if self.params is not None:
# Outputs probability of a label being true according to logistic model
return np.atleast_2d(sigmoid(np.dot(X, self.params))).T
else:
raise RuntimeError("Params need to be fit before predictions can be made.")
def loss(self, params, weights, inputs, targets):
# Training loss is the negative log-likelihood of the training labels.
preds = logistic_predictions(params, inputs)
label_probabilities = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(weights * np.log(label_probabilities + 1e-16))
def fit(self, weights, init_params, inputs, targets, verbose=True):
training_loss_fun = lambda params: self.loss(params, weights, inputs, targets)
# Define a function that returns gradients of training loss using Autograd.
training_gradient_fun = autograd.grad(training_loss_fun, 0)
# optimize params
if verbose:
print("Initial loss:", self.loss(init_params, weights, inputs, targets))
# opt_params = sgd(training_gradient_fun, params, hyper=1, num_iters=5000, step_size=0.1)
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options={'disp': verbose})
opt_params = res.x
if verbose:
print("Trained loss:", self.loss(opt_params, weights, inputs, targets))
self.params = opt_params
return opt_params
def get_test_acc(self, params, test_targets, test_inputs):
preds = np.round(self.predict(test_inputs).T).astype(np.int)
err = np.abs(test_targets - preds).sum()
return 1 - err/ test_targets.shape[1]
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one, inputs, targets):
return autograd.hessian(self.loss, argnum=0)(params_one, weights_one, inputs, targets)
def compute_jacobian(self, params_one, weights_one, inputs, targets):
return autograd.jacobian(autograd.jacobian(self.loss, argnum=0), argnum=1)\
(params_one, weights_one, inputs, targets).squeeze()
###################################################
@staticmethod
def synthetic_lr_data(N=10000, D=10):
x = 1. * npr.randn(N, D)
x_test = 1. * npr.randn(int(0.3 * N), D)
w = npr.randn(D, 1)
y = sigmoid((x @ w)).ravel()
y = npr.binomial(n=1, p=y) # corrupt labels
y_test = sigmoid(x_test @ w).ravel()
# y_test = np.round(y_test)
y_test = npr.binomial(n=1, p=y_test)
return x, np.atleast_2d(y), x_test, np.atleast_2d(y_test)
|
hidden_markov_model.py | import autograd
import autograd.numpy as np
import scipy.optimize
from autograd import grad
from autograd.scipy.special import logsumexp
from sklearn.cluster import KMeans
class HMM:
"""
A Hidden Markov Model with Gaussian observations with
unknown means and known precisions.
"""
def __init__(self, X, config_dict=None):
self.N, self.T, self.D = X.shape
self.K = config_dict['K'] # number of HMM states
self.I = np.eye(self.K)
self.Precision = np.zeros([self.D, self.D, self.K])
self.X = X
if config_dict['precision'] is None:
for k in np.arange(self.K):
self.Precision[:, :, k] = np.eye(self.D)
else:
self.Precision = config_dict['precision']
self.dParams_dWeights = None
self.alphaT = None # Store the final beliefs.
self.beta1 = None # store the first timestep beliefs from the beta recursion.
self.forward_trellis = {} # stores \alpha
self.backward_trellis = {} # stores \beta
def initialize_params(self, seed=1234):
np.random.seed(seed)
param_dict = {}
A = np.random.randn(self.K, self.K)
# use k-means to initialize the mean parameters
X = self.X.reshape([-1, self.D])
kmeans = KMeans(n_clusters=self.K, random_state=seed,
n_init=15).fit(X)
labels = kmeans.labels_
_, counts = np.unique(labels, return_counts=True)
pi = counts
phi = kmeans.cluster_centers_
param_dict['A'] = np.exp(A)
param_dict['pi0'] = pi
param_dict['phi'] = phi
return self.pack_params(param_dict)
def unpack_params(self, params):
param_dict = dict()
K = self.K
# For unpacking simplex parameters: have packed them as
# log(pi[:-1]) - log(pi[-1]).
unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1),
np.zeros((K, 1)),
axis=1)
)
Z = np.sum(unnorm_A[:, :-1], axis=1)
unnorm_A /= Z[:, np.newaxis]
norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True)
param_dict['A'] = norm_A
unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], 0.0))
Z = np.sum(unnorm_pi[:-1])
unnorm_pi /= Z
param_dict['pi0'] = unnorm_pi / unnorm_pi.sum()
param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K)
return param_dict
def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Computes the weighted marginal probability of the sequence xseq given parameters;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
alpha = np.log(pi.ravel()) + wseq[0] * ll[0]
if wseq[0] == 0:
self.forward_trellis[0] = alpha[:, np.newaxis]
for t in np.arange(1, self.T):
alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t]
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.forward_trellis[t] = alpha[:, np.newaxis]
if store_belief:
# store the final belief
self.alphaT = alpha
return logsumexp(alpha)
def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Runs beta recursion;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
beta = np.zeros_like(pi.ravel()) # log(\beta) of all ones.
max_t = ll.shape[0]
if wseq[max_t - 1] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[max_t - 1] = beta[:, np.newaxis]
for i in np.arange(1, max_t):
t = max_t - i - 1
beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1)
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[t] = beta[:, np.newaxis]
# account for the init prob
beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel())
if store_belief:
# store the final belief
self.beta1 = beta
return logsumexp(beta)
def weighted_loss(self, params, weights):
"""
For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion
:param params:
:param weights:
:return:
"""
param_dict = self.unpack_params(params)
logp = self.get_prior_contrib(param_dict)
logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights)
return -logp
def loss_at_missing_timesteps(self, weights, params):
"""
:param weights: zeroed out weights indicate missing values
:param params: packed parameters
:return:
"""
# empty forward and backward trellis
self.clear_trellis()
param_dict = self.unpack_params(params)
# populate forward and backward trellis
lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True )
lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True)
assert np.allclose(lpx, lpx_alt) # sanity check
test_ll = []
# compute loo likelihood
ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision)
# compute posterior p(z_t | x_1,...t-1, t+1,...T) \forall missing t
tsteps = []
for t in self.forward_trellis.keys():
lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx
test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel()))
tsteps.append(t)
# empty forward and backward trellis
self.clear_trellis()
return -np.array(test_ll)
def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None):
if maxiter:
options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter}
else:
options_dict = {'disp': verbose, 'gtol': 1e-10}
# Define a function that returns gradients of training loss using Autograd.
training_loss_fun = lambda params: self.weighted_loss(params, weights)
training_gradient_fun = grad(training_loss_fun, 0)
if init_params is None:
init_params = self.initialize_params()
if verbose:
print("Initial loss: ", training_loss_fun(init_params))
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options=options_dict)
if verbose:
print('grad norm =', np.linalg.norm(res.jac))
return res.x
def clear_trellis(self):
self.forward_trellis = {}
self.backward_trellis = {}
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one):
return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one)
def compute_jacobian(self, params_one, weights_one):
return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\
(params_one, weights_one).squeeze()
###################################################
@staticmethod
def log_obs_lik(x, phi, Sigma):
"""
:param x: T*D*1
:param phi: 1*D*K
:param Sigma: D*D*K --- precision matrices per state
:return: ll
"""
centered_x = x - phi
ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma )
return ll
@staticmethod
def pack_params(params_dict):
param_list = [(np.log(params_dict['A'][:, :-1]) -
np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(),
np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]),
params_dict['phi'].ravel()]
return np.concatenate(param_list)
@staticmethod
def get_prior_contrib(param_dict):
logp = 0.0
# Prior
logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum()
logp += (1.1 - 1) * np.log(param_dict['A']).sum()
logp += (1.1 - 1) * np.log(param_dict['pi0']).sum()
return logp
@staticmethod
def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False):
"""
:param T: length of the sequence
:param pct_to_drop: % of T in the held out fold
:param contiguous: if True generate a block of indices to drop else generate indices by iid sampling
:return: o (the set of indices in the fold)
"""
if contiguous:
l = np.floor(pct_to_drop / 100. * T)
anchor = np.random.choice(np.arange(l + 1, T))
o = np.arange(anchor - l, anchor).astype(int)
else:
# i.i.d LWCV
o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1
return o
@staticmethod
def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0,
diagonal_upweight=False):
"""
:param K: Number of HMM states
:param T: length of the sequence
"""
N = 1 # For structured IJ we will remove data / time steps from a single sequence
np.random.seed(seed)
if sigma0 is None:
sigma0 = np.eye(D)
A = np.random.dirichlet(alpha=np.ones(K), size=K)
if diagonal_upweight:
A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions
A = A / A.sum(axis=1)
pi0 = np.random.dirichlet(alpha=np.ones(K))
mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean))
zs = np.empty((N, T), dtype=np.int)
X = np.empty((N, T, D))
for n in range(N):
zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0))
X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0)
for t in range(1, T):
zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :]))
X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0)
return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus}
|
misc.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
from copy import deepcopy
import numpy as np
import numpy.random as npr
def make_batches(n_data, batch_size):
return [slice(i, min(i+batch_size, n_data)) for i in range(0, n_data, batch_size)]
def generate_regression_data(seed, data_count=500):
"""
Generate data from a noisy sine wave.
:param seed: random number seed
:param data_count: number of data points.
:return:
"""
np.random.seed(seed)
noise_var = 0.1
x = np.linspace(-4, 4, data_count)
y = 1*np.sin(x) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
x_train = x[idx[:train_count], np.newaxis ]
x_test = x[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(x_train, 0)
std = np.std(x_train, 0)
x_train = (x_train - mu) / std
x_test = (x_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
y_train = (y_train - mu) / std
train_stats = dict()
train_stats['mu'] = mu
train_stats['sigma'] = std
return x_train, y_train, x_test, y_test, train_stats
def form_D_for_auucc(yhat, zhatl, zhatu):
# a handy routine to format data as needed by the UCC fit() method
D = np.zeros([yhat.shape[0], 3])
D[:, 0] = yhat.squeeze()
D[:, 1] = zhatl.squeeze()
D[:, 2] = zhatu.squeeze()
return D
def fitted_ucc_w_nullref(y_true, y_pred_mean, y_pred_lower, y_pred_upper):
"""
Instantiates an UCC object for the target predictor plus a 'null' (constant band) reference
:param y_pred_lower:
:param y_pred_mean:
:param y_pred_upper:
:param y_true:
:return: ucc object fitted for two systems: target + null reference
"""
# form matrix for ucc:
X_for_ucc = form_D_for_auucc(y_pred_mean.squeeze(),
y_pred_mean.squeeze() - y_pred_lower.squeeze(),
y_pred_upper.squeeze() - y_pred_mean.squeeze())
# form matrix for a 'null' system (constant band)
X_null = deepcopy(X_for_ucc)
X_null[:,1:] = np.std(y_pred_mean) # can be set to any other constant (no effect on AUUCC)
# create an instance of ucc and fit data
from uq360.metrics.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
u = ucc()
u.fit([X_for_ucc, X_null], y_true.squeeze())
return u
def make_sklearn_compatible_scorer(task_type, metric, greater_is_better=True, **kwargs):
"""
Args:
task_type: (str) regression or classification.
metric: (str): choice of metric can be one of these - [aurrrc, ece, auroc, nll, brier, accuracy] for
classification and ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] for regression.
greater_is_better: is False the scores are negated before returning.
**kwargs: additional arguments specific to some metrics.
Returns:
sklearn compatible scorer function.
"""
from uq360.metrics.classification_metrics import compute_classification_metrics
from uq360.metrics.regression_metrics import compute_regression_metrics
def sklearn_compatible_score(model, X, y_true):
"""
Args:
model: The model being scored. Currently uq360 and sklearn models are supported.
X: Input features.
y_true: ground truth values for the target.
Returns:
Computed score of the model.
"""
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.algorithms.posthocuq import PostHocUQ
if isinstance(model, BuiltinUQ) or isinstance(model, PostHocUQ):
# uq360 models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict(X).y_prob,
option=metric,
**kwargs
)[metric]
elif task_type == "regression":
y_mean, y_lower, y_upper = model.predict(X)
score = compute_regression_metrics(
y_true=y_true,
y_mean=y_mean,
y_lower=y_lower,
y_upper=y_upper,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError
else:
# sklearn models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict_proba(X),
option=metric,
**kwargs
)[metric]
else:
if metric in ["rmse", "r2"]:
score = compute_regression_metrics(
y_true=y_true,
y_mean=model.predict(X),
y_lower=None,
y_upper=None,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError("{} is not supported for sklearn regression models".format(metric))
if not greater_is_better:
score = -score
return score
return sklearn_compatible_score
class DummySklearnEstimator(ABC):
def __init__(self, num_classes, base_model_prediction_fn):
self.base_model_prediction_fn = base_model_prediction_fn
self.classes_ = [i for i in range(num_classes)]
def fit(self):
pass
def predict_proba(self, X):
return self.base_model_prediction_fn(X)
|
optimizers.py | from builtins import range
import autograd.numpy as np
def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False):
"""Adapted from autograd.misc.optimizers"""
m = np.zeros(len(x))
v = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g, polyak)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x = x - step_size*mhat/(np.sqrt(vhat) + eps)
return x |
generate_1D_regression_data.py | import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \sigma^2). \sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
X_train = X[idx[:train_count], np.newaxis ]
X_test = X[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(X_train, 0)
std = np.std(X_train, 0)
X_train = (X_train - mu) / std
X_test = (X_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
# mu = 0
# std = 1
y_train = (y_train - mu) / std
y_test = (y_test -mu) / std
train_stats = dict()
train_stats['mu'] = torch.FloatTensor([mu])
train_stats['sigma'] = torch.FloatTensor([std])
return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\
train_stats |
dataTransformer.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import os
import sys
import json
import datetime,time,timeit
import itertools
import numpy as np
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
import logging
class dataTransformer():
def __init__(self):
self.log = logging.getLogger('eion')
def startTransformer(self,df,features,target,transType):
scaler ='None'
if target in features:
features.remove(target)
transFeatures=features
transDfColumns=[]
dataframe=df[transFeatures]
#targetArray=np.array(df[target])
#targetArray.shape = (len(targetArray), 1)
self.log.info("Data Normalization has started")
if transType.lower() =='standardscaler':
scaler = StandardScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='minmax':
scaler=MinMaxScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='lognormal':
print(dataframe)
scaler = PowerTransformer(method='yeo-johnson', standardize=False).fit(dataframe)
transDf = scaler.transform(dataframe)
else:
self.log.info("Need to implement")
#features.append(target)
#scaledDf = pd.DataFrame(np.hstack((transDf, targetArray)),columns=features)
return transDf,features,scaler |
preprocess.py | import pandas as pd
tab = ' '
VALID_AGGREGATION_METHODS = ['mean','sum']
VALID_GRANULARITY_UNITS = ['second','minute','hour','day','week','month','year']
VALID_INTERPOLATE_KWARGS = {'linear':{},'spline':{'order':5},'timebased':{}}
VALID_INTERPOLATE_METHODS = list( VALID_INTERPOLATE_KWARGS.keys())
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def get_source_delta( data: pd.DataFrame):
MAX_SAMPLE_TRY = 20
if len( data) <= 1:
return None
time_delta = data.index[-1] - data.index[-2]
count = {}
for i in range(len(data)):
if i == MAX_SAMPLE_TRY or i == data.index[-1]:
break
delta = data.index[i+1] - data.index[i]
if delta not in count.keys():
count[delta] = 1
else:
count[delta] += 1
if count:
return max(count, key=count.get)
else:
return None
class timeSeries():
def __init__( self, config, datetime, log=None):
self.datetime = datetime
self.validate_config(config)
self.log = log
def validate_config( self, config):
if not self.datetime or self.datetime.lower() == 'na':
raise ValueError('date time feature is not provided')
self.config = {}
method = get_one_true_option(config.get('interpolation',None))
self.config['interpolate'] = {}
self.config['interpolate']['enabled'] = method in VALID_INTERPOLATE_METHODS
self.config['interpolate']['method'] = method
self.config['rolling'] = {}
self.config['rolling']['enabled'] = get_boolean( config.get('rollingWindow',False))
self.config['rolling']['size'] = int( config.get('rollingWindowSize',1))
if self.config['rolling']['size'] < 1:
raise ValueError('Rolling window size should be greater than 0.')
self.config['aggregation'] = {}
aggregation = config.get('aggregation',{})
agg_method = get_one_true_option(aggregation['type'])
self.config['aggregation'] = {}
self.config['aggregation']['enabled'] = agg_method in VALID_AGGREGATION_METHODS
self.config['aggregation']['method'] = agg_method
granularity = aggregation.get('granularity',{})
granularity_unit = get_one_true_option( granularity.get('unit',None))
if granularity_unit in VALID_GRANULARITY_UNITS:
granularity_mapping = {'second':'S','minute':'Min','hour':'H','day':'D','week':'W','month':'M','year':'Y'}
size = int(granularity.get('size',10))
granularity_unit = granularity_mapping.get(granularity_unit,granularity_unit)
self.config['aggregation']['granularity'] = {}
self.config['aggregation']['granularity']['unit'] = granularity_unit
self.config['aggregation']['granularity']['size'] = size
def log_info(self, msg, type='info'):
if self.log:
if type == 'error':
self.log.error( msg)
else:
self.log.info( msg)
else:
print( msg)
def is_down_sampling(self, data, size, granularity_unit):
down_sampling = False
if granularity_unit in ['M', 'Y']:
return True
else:
target_delta = pd.Timedelta(size , granularity_unit)
source_delta = get_source_delta(data)
if not source_delta:
raise ValueError('Could not find the data frame time frequency')
return source_delta < target_delta
def run( self, data):
if self.datetime not in data.columns:
raise ValueError(f"Date time feature '{self.datetime}' is not present in data")
try:
# data[self.datetime] = pd.to_datetime( data[self.datetime])
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime])
except:
#for utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime],utc=True)
data.set_index( self.datetime, inplace=True)
except:
raise ValueError(f"can not convert '{self.datetime}' to dateTime")
if self.config.get('interpolate',{}).get('enabled',False):
method = self.config['interpolate']['method']
self.log_info(f"Applying Interpolation using {method}")
methods_mapping = {'timebased': 'time'}
self.config['interpolate']['mapped_method'] = methods_mapping.get(method, method)
data.interpolate(method=self.config['interpolate']['mapped_method'], inplace=True, **VALID_INTERPOLATE_KWARGS[method])
if self.config.get('rolling',{}).get('enabled',False):
if self.config['rolling']['size'] > len( data):
raise ValueError('Rolling window size is greater than dataset size')
self.log_info(f"Applying rolling window( moving avg) with size {self.config['rolling']['size']}")
data = data.rolling( self.config['rolling']['size']).mean()
data = data.iloc[self.config['rolling']['size'] - 1:]
aggregation = self.config.get('aggregation',{})
if aggregation.get('enabled',False):
method = aggregation.get('method','mean')
self.rule = str(aggregation['granularity']['size']) + aggregation['granularity']['unit']
if self.is_down_sampling(data, aggregation['granularity']['size'], aggregation['granularity']['unit']):
self.log_info(f"Applying down sampling( {self.rule})")
if method == 'mean':
data = data.resample( self.rule).mean()
elif method == 'sum':
data = data.resample( self.rule).sum()
else:
self.log_info(f"Applying up sampling using forward fill method( {self.rule})")
data = data.resample( self.rule).ffill()
data.reset_index( inplace=True, names=self.datetime)
return data
def get_code(self, indent=0):
tab = ' '
code = ''
code += f"""
def preprocess( data):
try:
#for non utc timestamp
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'])
except:
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'],utc=True)
data.set_index( '{self.datetime}', inplace=True)
"""
if self.config.get('interpolate',{}).get('enabled',False):
code += tab + f"data.interpolate(method='{self.config['interpolate']['mapped_method']}', inplace=True, **{VALID_INTERPOLATE_KWARGS[self.config['interpolate']['method']]})\n"
if self.config.get('rolling',{}).get('enabled',False):
code += tab + f"data = data.rolling( {self.config['rolling']['size']}).mean().iloc[{self.config['rolling']['size'] - 1}:]\n"
if self.config.get('aggregation',{}).get('enabled',False):
code += tab + f"data = data.resample( '{self.rule}').{self.config.get('aggregation',{}).get('method','mean')}()\n"
code += tab + f"data.reset_index( inplace=True, names='{self.datetime}')\n"
code += tab + "return data\n"
return code
|
textDataProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import os
import sys
import string
import spacy
#import en_core_web_sm
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
try:
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
except:
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.base import TransformerMixin
from nltk.stem import WordNetLemmatizer
import re
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
class textDataProfiler():
def __init__(self):
self.data=None
#self.nlp=en_core_web_sm.load()
self.punctuations = string.punctuation
self.stopwords = list(STOP_WORDS)
def startTextProfiler(self,df,target):
try:
dataColumns = list(df.columns)
print(' \n No of rows and columns in dataFrame',df.shape)
print('\n features in dataFrame',dataColumns)
dataFDtypes=self.dataFramecolType(df)
print('\n feature types in dataFrame',dataFDtypes)
trainX=df['text']
trainY=df[target]
return trainX,trainY
except Exception as inst:
print('startTextProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except Exception as e:
print("error in dataFramecolyType",e)
return dataFDtypes
def textTokenizer(self,text):
try:
parser = English()
tokens = parser(text)
tokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in tokens ]
tokens = [ word for word in tokens if word not in self.stopwords and word not in self.punctuations ]
return tokens
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def cleanText(self,text):
try:
text=str(text).strip().lower()
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
except Exception as inst:
print('cleanText code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def textTokenization(self,text):
try:
tokenizedText=word_tokenize(text)
return tokenizedText
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def textLemmitizer(self,text):
try:
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
Final_words = []
word_Lemmatized = WordNetLemmatizer()
for word, tag in pos_tag(text):
if word not in stopwords.words('english') and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
return str(Final_words)
except Exception as inst:
print('textLemmitizer code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
class TextCleaner(TransformerMixin):
def clean_text(self,text):
try:
text=str(text).strip().lower()
text = text.replace("isn't", "is not")
text = text.replace("aren't", "are not")
text = text.replace("ain't", "am not")
text = text.replace("won't", "will not")
text = text.replace("didn't", "did not")
text = text.replace("shan't", "shall not")
text = text.replace("haven't", "have not")
text = text.replace("hadn't", "had not")
text = text.replace("hasn't", "has not")
text = text.replace("don't", "do not")
text = text.replace("wasn't", "was not")
text = text.replace("weren't", "were not")
text = text.replace("doesn't", "does not")
text = text.replace("'s", " is")
text = text.replace("'re", " are")
text = text.replace("'m", " am")
text = text.replace("'d", " would")
text = text.replace("'ll", " will")
text = re.sub(r'^https?:\/\/.*[\r\n]*', ' ', text, flags=re.MULTILINE)
text = re.sub(r'[\w\.-]+@[\w\.-]+', ' ', text, flags=re.MULTILINE)
for punctuation in string.punctuation:
text = text.replace(punctuation,' ')
text = re.sub(r'[^A-Za-z0-9\s]',r' ',text)
text = re.sub(r'\n',r' ',text)
text = re.sub(r'[0-9]',r' ',text)
wordnet_lemmatizer = WordNetLemmatizer()
text = " ".join([wordnet_lemmatizer.lemmatize(w, pos='v') for w in text.split()])
return text
except Exception as inst:
print('TextCleaner clean_text code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def text_cleaner(self,text):
text = self.clean_text(text)
stop_words = set(stopwords.words('english'))
text_tokens = word_tokenize(text)
out=' '.join(str(j) for j in text_tokens if j not in stop_words and (len(j)!=1))
return(out)
def transform(self, X, **transform_params):
# Cleaning Text
return [self.clean_text(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {} |
imageAug.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import random
from matplotlib import pyplot as plt
import cv2
import albumentations as A
import os
import pandas as pd
from pathlib import Path
class ImageAugmentation():
def __init__(self, dataLocation, csvFile):
self.AugmentationOptions = {"Flip": {"operation": A.HorizontalFlip, "suffix":"_flip"},
"Rotate": {"operation": A.Rotate, "suffix":"_rotate"},
"Shift": {"operation": A.RGBShift, "suffix":"_shift"},
"Crop": {"operation": [A.CenterCrop, A.RandomSizedBBoxSafeCrop], "suffix":"_crop"},
"Contrast": {"operation": A.RandomContrast, "suffix":"_cont"},
"Brightness": {"operation": A.RandomBrightness, "suffix":"_bright"},
"Blur": {"operation": A.GaussianBlur, "suffix":"_blur"}
}
self.dataLocation = dataLocation
self.csvFile = csvFile
def __applyAugmentationClass(self, image, augmentation,limit):
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transform = self.AugmentationOptions[augmentation]["operation"][0](height=int(height*crop_percentage), width=int(width*crop_percentage) )
elif augmentation == "Blur":
transform = self.AugmentationOptions[augmentation]["operation"](blur_limit = limit)
elif augmentation in ["Contrast","Brightness"]:
transform = self.AugmentationOptions[augmentation]["operation"](limit = limit)
else:
transform = self.AugmentationOptions[augmentation]["operation"]()
return transform(image=image)
def __applyAugmentation(self, image, augmentation,limit,bboxes=None, category_ids=None, seed=7):
transformOptions = []
if bboxes:
bbox_params = A.BboxParams(format='pascal_voc', label_fields=['category_ids'])
else:
bbox_params = None
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transformOptions.append(self.AugmentationOptions[augmentation]["operation"][1](height=int(height*crop_percentage), width=int(width*crop_percentage) ))
elif augmentation == "Blur":
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](blur_limit = limit))
elif augmentation in ["Contrast","Brightness"]:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](limit = limit))
else:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"]())
transform = A.Compose(
transformOptions,
bbox_params=bbox_params,
)
random.seed(seed)
return transform(image=image, bboxes=bboxes, category_ids=category_ids)
else:
return None
def getBBox(self, df, imageLoc, category_name_to_id):
subDf = df[df['loc']==imageLoc]
boxes = []
category = []
for index, row in subDf.iterrows():
boxes.append( [row['xmin'],row['ymin'],row['xmax'],row['ymax']])
category.append(category_name_to_id[row['Label']])
return boxes, category
def __objAug(self, imageLoc, df, classes_names, category_id_to_name, category_name_to_id,limit,numberofImages,op):
for x in range(numberofImages):
bbox, category_ids = self.getBBox(df, imageLoc, category_name_to_id)
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentation(image, op,limit,bbox, category_ids)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
count = 1
row = df[df['loc']==imageLoc].iloc[0]
filename = (Path(imageLoc).stem +'_'+str(x)+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
newImage = str(Path(imageLoc).parent/filename)
for index,bbox in enumerate(transformed['bboxes']):
data = {'File':filename, 'xmin':bbox[0],'ymin':bbox[1],'xmax':bbox[2],'ymax':bbox[3],'Label':category_id_to_name[transformed['category_ids'][index]],'id':count,'height':row['height'],'width':row['width'], 'angle':0.0, 'loc': newImage, 'AugmentedImage': True}
count += 1
df=df.append(data, ignore_index=True)
cv2.imwrite(newImage, transformed['image'])
return df
def __objectDetection(self, images, df, optionDf, classes_names, suffix='',augConf={}):
category_id_to_name = {v+1:k for v,k in enumerate(classes_names)}
category_name_to_id = {k:v+1 for v,k in enumerate(classes_names)}
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
df = self.__objAug(imageLoc, df, classes_names, category_id_to_name,category_name_to_id,limit,numberofImages,op=key)
return df
def __augClassificationImage(self, imageLoc, df,limit,imageindex,op):
data = {}
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentationClass(image, op,limit)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
location = Path(imageLoc).parent
filename = (Path(imageLoc).stem +'_'+'str(imageindex)'+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
cv2.imwrite(str(location/'AION'/'AugumentedImages'/filename), transformed['image'])
data['File'] = filename
data['Label'] = df[df['File']==Path(imageLoc).name]["Label"].iloc[0]
data['AugmentedImage'] = True
data['loc'] = str(location/filename)
return data
def __classification(self, images, df, optionDf,augConf,csv_file=None, outputDir=None):
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
for x in range(numberofImages):
rows = self.__augClassificationImage(imageLoc, df,limit,x,op=key)
df=df.append(rows, ignore_index=True)
return df
def removeAugmentedImages(self, df):
removeDf = df[df['AugmentedImage'] == True]['loc'].unique().tolist()
#df[df['imageAugmentationOriginalImage'] != True][loocationField].apply(lambda x: Path(x).unlink())
for file in removeDf:
if file:
Path(file).unlink()
def augment(self, modelType="imageclassification",params=None,csvSavePath = None,augConf={}):
if isinstance(params, dict) and any(params.values()):
df = pd.read_csv(self.csvFile)
if not self.dataLocation.endswith('/'):
images = self.dataLocation+'/'
else:
images = self.dataLocation
if modelType == "imageclassification":
images = images + df['File']
else:
images = images + df['File']
df['loc'] = images
images = set(images.tolist())
option = {}
for key in list(self.AugmentationOptions.keys()):
option[key] = params.get(key, False)
optionDf = pd.DataFrame(columns=list(option.keys()))
for i in range(len(images)):
optionDf = optionDf.append(option, ignore_index=True)
if modelType == "imageclassification":
df = self.__classification(images, df, optionDf,augConf)
else:
classes_names = sorted(df['Label'].unique().tolist())
df = self.__objectDetection(images, df, optionDf, classes_names,'',augConf)
df.to_csv(self.csvFile, index=False)
return self.csvFile |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
textProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import logging
from distutils.util import strtobool
import pandas as pd
from text import TextProcessing
def get_one_true_option(d, default_value):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
class textProfiler():
def __init__(self):
self.log = logging.getLogger('eion')
def textCleaning(self, textCorpus):
textProcessor = TextProcessing.TextProcessing()
textCorpus = textProcessor.transform(textCorpus)
return(textCorpus)
def textProfiler(self, textCorpus, conf_json, pipeList, max_features):
cleaning_kwargs = {}
textCleaning = conf_json.get('textCleaning')
self.log.info("Text Preprocessing config: ",textCleaning)
cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True'))
cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True'))
cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False'))
cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False'))
cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True'))
cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True'))
cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True'))
cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'),
'lemmatization').capitalize()
removeNoiseConfig = textCleaning.get('removeNoiseConfig')
if type(removeNoiseConfig) is dict:
cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True'))
cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True'))
cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True'))
cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True'))
cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace'
cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True'))
cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True'))
acronymConfig = textCleaning.get('acronymConfig')
if type(acronymConfig) is dict:
cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None)
stopWordsConfig = textCleaning.get('stopWordsConfig')
if type(stopWordsConfig) is dict:
cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', [])
cleaning_kwargs['extend_or_replace_stopwordslist'] = 'extend' if strtobool(stopWordsConfig.get('extend', 'True')) else 'replace'
removeNumericConfig = textCleaning.get('removeNumericConfig')
if type(removeNumericConfig) is dict:
cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True'))
removePunctuationConfig = textCleaning.get('removePunctuationConfig')
if type(removePunctuationConfig) is dict:
cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False'))
cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False'))
if cleaning_kwargs['fExpandContractions']:
cleaning_kwargs['expandContractions_googleNewsWordVectorPath'] = GOOGLE_NEWS_WORD_VECTORS_PATH
libConfig = textCleaning.get('libConfig')
if type(libConfig) is dict:
cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk')
cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk')
cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk')
textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs)
textCorpus = textProcessor.transform(textCorpus)
pipeList.append(("TextProcessing",textProcessor))
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('pos_tags', 'False')):
pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk')
posTagger = TextProcessing.PosTagging( pos_tags_lib)
textCorpus = posTagger.transform(textCorpus)
pipeList.append(("posTagger",posTagger))
ngram_min = 1
ngram_max = 1
if strtobool(textFeatureExtraction.get('n_grams', 'False')):
n_grams_config = textFeatureExtraction.get("n_grams_config")
ngram_min = int(n_grams_config.get('min_n', 1))
ngram_max = int(n_grams_config.get('max_n', 1))
if (ngram_min < 1) or ngram_min > ngram_max:
ngram_min = 1
ngram_max = 1
invalidNgramWarning = 'WARNING : invalid ngram config.\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max)
self.log.info(invalidNgramWarning)
ngram_range_tuple = (ngram_min, ngram_max)
textConversionMethod = conf_json.get('textConversionMethod')
conversion_method = get_one_true_option(textConversionMethod, None)
if conversion_method.lower() == "countvectors":
X, vectorizer = TextProcessing.ExtractFeatureCountVectors(textCorpus, ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: CountVectors')
elif conversion_method.lower() in ["word2vec","fasttext","glove"]:
embedding_method = conversion_method
wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method)
wordEmbeddingVecotrizer.checkAndDownloadPretrainedModel()
X = wordEmbeddingVecotrizer.transform(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",wordEmbeddingVecotrizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "sentencetransformer":
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('sentence-transformers/msmarco-distilroberta-base-v2')
X = model.encode(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",model))
self.log.info('----------> Conversion Method: SentenceTransformer')
elif conversion_method.lower() == 'tf_idf':
X, vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(textCorpus,ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: TF_IDF')
else:
df1 = pd.DataFrame()
df1['tokenize'] = textCorpus
self.log.info('----------> Conversion Method: NA')
return df1, pipeList,conversion_method
|
generate_tfrecord.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import glob
import pandas as pd
import io
import xml.etree.ElementTree as ET
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util, label_map_util
from collections import namedtuple
from pathlib import Path
def class_text_to_int(row_label, label_map_dict):
return label_map_dict[row_label]
def split(df, group):
data = namedtuple('data', ['File', 'object'])
gb = df.groupby(group)
return [data(File, gb.get_group(x)) for File, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path, label_map_dict):
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.File)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
File = group.File.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmin_n = min(row['xmin'], row['xmax'])
xmax_n = max(row['xmin'], row['xmax'])
ymin_n = min(row['ymin'], row['ymax'])
ymax_n = max(row['ymin'], row['ymax'])
xmin_new = min(xmin_n, width)
xmax_new = min(xmax_n, width)
ymin_new = min(ymin_n, height)
ymax_new = min(ymax_n, height)
xmn = xmin_new / width
xmins.append(xmn)
xmx = xmax_new / width
xmaxs.append(xmx)
ymn = ymin_new / height
ymins.append(ymn)
ymx = ymax_new / height
ymaxs.append(ymx)
classes_text.append(row['Label'].encode('utf8'))
classes.append(class_text_to_int(row['Label'], label_map_dict))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(File),
'image/source_id': dataset_util.bytes_feature(File),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def labelFile(classes_names, label_map_path):
pbtxt_content = ""
for i, class_name in enumerate(classes_names):
pbtxt_content = (
pbtxt_content
+ "item {{\n id: {0}\n name: '{1}'\n}}\n\n".format(i + 1, class_name)
)
pbtxt_content = pbtxt_content.strip()
with open(label_map_path, "w") as f:
f.write(pbtxt_content)
def createLabelFile(train_df, save_path):
labelmap_path = str(Path(save_path)/ 'label_map.pbtxt')
classes_names = sorted(train_df['Label'].unique().tolist())
labelFile(classes_names, labelmap_path)
return labelmap_path, len(classes_names)
def generate_TF_record(image_dir, output_dir, train_df, test_df, labelmap_path):
outputPath = str(Path(output_dir)/ 'train.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(train_df, 'File')
label_map = label_map_util.load_labelmap(labelmap_path )
label_map_dict = label_map_util.get_label_map_dict(label_map)
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
if len(test_df):
outputPath = str(Path(output_dir)/ 'test.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(test_df, 'File')
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
|
dataProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import io
import json
import logging
import pandas as pd
import sys
import numpy as np
from pathlib import Path
from word2number import w2n
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.compose import ColumnTransformer
from sklearn.base import TransformerMixin
from sklearn.ensemble import IsolationForest
from category_encoders import TargetEncoder
try:
import transformations.data_profiler_functions as cs
except:
import data_profiler_functions as cs
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = False
log_suffix = f'[{Path(__file__).stem}] '
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.data_path = data_path
self.encode_target = encode_target
self.label_encoder = None
self.data = xtrain
self.keep_unprocessed = keep_unprocessed
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = {}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
self.force_numeric_conv = []
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
self.log_input_feat_info()
def log_input_feat_info(self):
if self.featureDict:
feature_df = pd.DataFrame(self.featureDict)
log_text = '\nPreprocessing options:'
log_text += '\n\t'+str(feature_df.head( len(self.featureDict))).replace('\n','\n\t')
self.log.info(log_text)
def log_dataframe(self, msg=None):
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\n\t'+str(self.data.head(2)).replace('\n','\n\t')
log_text += ('\n\t' + buffer.getvalue().replace('\n','\n\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
print(self.data.info())
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
try:
if self.text_feature:
from text.textProfiler import set_pretrained_model
set_pretrained_model(pipe)
conversion_method = self.get_conversion_method()
process_data = pipe.fit_transform(self.data, y=self.target)
# save for testing
if DEBUG_ENABLED:
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data)
df.to_csv('debug_preprocessed.csv', index=False)
if self.text_feature and conversion_method == 'latentsemanticanalysis':
n_size = self.get_tf_idf_output_size( pipe)
dimensions = self.get_tf_idf_dimensions()
if n_size < dimensions or n_size > dimensions:
dimensions = n_size
from sklearn.decomposition import TruncatedSVD
reducer = TruncatedSVD( n_components = dimensions)
reduced_data = reducer.fit_transform( process_data[:,-n_size:])
text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process')
pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer))
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1)
last_step = self.feature_expender.pop()
self.feature_expender.append({'feature_reducer':list(last_step.values())[0]})
except EOFError as e:
if "Compressed file ended before the end-of-stream marker was reached" in str(e):
raise EOFError('Pretrained model is not downloaded properly')
self.update_output_features_names(pipe)
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if self.keep_unprocessed:
df[self.keep_unprocessed] = self.data[self.keep_unprocessed]
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio']))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio']))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel']))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.dropna()
if self.config['removeDuplicate']:
self.drop_duplicate()
#self.check_categorical_features()
#self.string_to_numeric()
self.process_target()
self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)}
self.parse_process_step_config()
self.process_drop_fillna()
self.log_type_conversion()
self.update_num_fill_dict()
if DEBUG_ENABLED:
print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
if DEBUG_ENABLED:
self.log.info(self.process_method)
self.log.info(self.pipeline)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.is_target_available():
# drop null values
self.__update_index( self.target.notna(), 'target')
if self.encode_target:
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{}))
normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none')
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{}))
cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{}))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def get_tf_idf_dimensions(self):
dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default')
return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim]
def get_tf_idf_output_size(self, pipe):
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
return len(v)
return 0
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
index_shifter = 0
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
for key,value in start_index.items():
for k,v in value.items():
index_shifter += len(v)
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
#print(start_index)
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
self.output_columns[key:key] = v
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
from text.textProfiler import textProfiler
from text.textProfiler import textCombine
pipeList = []
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.cleaner(conf_json, pipeList, self.data_path)
pipeList = obj.embedding(conf_json, pipeList)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in cs.supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_dict[f][en]
if not self.num_fill_method_dict[f]:
del self.num_fill_method_dict[f]
def update_cat_fill_dict(self):
self.cat_fill_method_dict = {}
if 'catFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['categorical']:
self.cat_fill_method_dict[f] = {}
for en in cs.supported_method['categoryEncoding']:
self.cat_fill_method_dict[f][en] = []
for col in self.cat_feature:
catFillDict = self.process_method.get('catFill',{})
catEncoderDict = self.process_method.get('catEncoder',{})
if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''):
self.cat_fill_method_dict[f][en].append(col)
if not self.cat_fill_method_dict[f][en] :
del self.cat_fill_method_dict[f][en]
if not self.cat_fill_method_dict[f]:
del self.cat_fill_method_dict[f]
def __update_type(self):
self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed))
self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed))
self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed))
self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed))
def update_user_provided_type(self, data_types):
allowed_types = ['numerical','categorical', 'text']
skipped_types = ['date','index']
type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),}
mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types}
skipped_features = [k for k,v in data_types.items() if v in skipped_types]
if skipped_features:
self.keep_unprocessed.extend( skipped_features)
self.keep_unprocessed = list(set(self.keep_unprocessed))
self.update_type(mapped_type, 'user provided data type')
def get_type(self, as_list=False):
if as_list:
return [self.colm_type.values()]
else:
return self.colm_type
def update_type(self, data_types={}, reason=''):
invalid_features = [x for x in data_types.keys() if x not in self.data.columns]
if invalid_features:
valid_feat = list(set(data_types.keys()) - set(invalid_features))
valid_feat_type = {k:v for k,v in data_types if k in valid_feat}
else:
valid_feat_type = data_types
for k,v in valid_feat_type.items():
if v != self.colm_type[k].name:
try:
self.data.astype({k:v})
self.colm_type.update({k:self.data[k].dtype})
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
except:
self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason)
if v == np.dtype('float64') and self.colm_type[k].name == 'object':
if self.check_numeric( k):
self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce')
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
self.force_numeric_conv.append( k)
else:
raise ValueError(f"Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%")
self.data = self.data.astype(valid_feat_type)
self.__update_type()
def check_numeric(self, feature):
col_values = self.data[feature].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
return True
return False
def string_to_numeric(self):
def to_number(x):
try:
return w2n.word_to_num(x)
except:
return np.nan
for col in self.text_feature:
col_values = self.data[col].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
self.text_to_num[col] = 'float64'
self.wordToNumericFeatures.append(col)
if self.text_to_num:
columns = list(self.text_to_num.keys())
self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast')
self.update_type(self.text_to_num)
self.log.info('----------- Inspecting Features -----------')
for col in self.text_feature:
self.log.info(f'-------> Feature : {col}')
if col in self.text_to_num:
self.log.info('----------> Numeric Status :Yes')
self.log.info('----------> Data Type Converting to numeric :Yes')
else:
self.log.info('----------> Numeric Status :No')
self.log.info(f'\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric')
self.log.info(f'\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}')
self.log.info('----------- Inspecting Features End -----------')
def check_categorical_features(self):
num_data = self.data.select_dtypes(include='number')
num_data_unique = num_data.nunique()
num_to_cat_col = {}
for i, value in enumerate(num_data_unique):
if value < self.config['categoryMaxLabel']:
num_to_cat_col[num_data_unique.index[i]] = 'category'
if num_to_cat_col:
self.update_type(num_to_cat_col, 'numerical to categorical')
str_to_cat_col = {}
str_data = self.data.select_dtypes(include='object')
str_data_unique = str_data.nunique()
for i, value in enumerate(str_data_unique):
if value < self.config['categoryMaxLabel']:
str_to_cat_col[str_data_unique.index[i]] = 'category'
for colm in str_data.columns:
if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']:
str_to_cat_col[colm] = 'category'
if str_to_cat_col:
self.update_type(str_to_cat_col, 'text to categorical')
def drop_features(self, features=[], reason='unspecified'):
if isinstance(features, str):
features = [features]
feat_to_remove = [x for x in features if x in self.data.columns]
if feat_to_remove:
self.data.drop(feat_to_remove, axis=1, inplace=True)
for feat in feat_to_remove:
self.dropped_features[feat] = reason
self.log_drop_feature(feat_to_remove, reason)
self.__update_type()
def __update_index(self, indices, reason=''):
if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data):
if not indices.all():
self.data = self.data[indices]
if self.is_target_available():
self.target = self.target[indices]
self.log_update_index((indices == False).sum(), reason)
def dropna(self):
self.data.dropna(how='all',inplace=True)
if self.is_target_available():
self.target = self.target[self.data.index]
def drop_duplicate(self):
index = self.data.duplicated(keep='first')
self.__update_index( ~index, reason='duplicate')
def log_drop_feature(self, columns, reason):
self.log.info(f'---------- Dropping {reason} features ----------')
self.log.info(f'\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found')
self.log.info(f'-------> Drop Features: {columns}')
self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}')
def log_update_index(self,count, reason):
if count:
if reason == 'target':
self.log.info('-------> Null Target Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'duplicate':
self.log.info('-------> Duplicate Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'outlier':
self.log.info(f'-------> Dropped rows count: {count}')
self.log.info('Status:- |... Outlier treatment done')
self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}')
def log_normalization(self):
if self.process_method.get('normalization', None):
self.log.info(f'\nStatus:- !... Normalization treatment done')
for method in cs.supported_method['normalization']:
cols = []
for col, m in self.process_method['normalization'].items():
if m == method:
cols.append(col)
if cols and method != 'none':
self.log.info(f'Running {method} on features: {cols}')
def log_numerical_fill(self):
if self.process_method.get('numFill', None):
self.log.info(f'\nStatus:- !... Fillna for numeric feature done')
for method in cs.supported_method['fillNa']['numeric']:
cols = []
for col, m in self.process_method['numFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def log_categorical_fill(self):
if self.process_method.get('catFill', None):
self.log.info(f'\nStatus:- !... FillNa for categorical feature done')
for method in cs.supported_method['fillNa']['categorical']:
cols = []
for col, m in self.process_method['catFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def remove_constant_feature(self):
unique_values = self.data.nunique()
constant_features = []
for i, value in enumerate(unique_values):
if value == 1:
constant_features.append(unique_values.index[i])
if constant_features:
self.drop_features(constant_features, "constant")
def remove_empty_feature(self, misval_ratio=1.0):
missing_ratio = self.data.isnull().sum() / len(self.data)
missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)}
empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio]
if empty_features:
self.drop_features(empty_features, "empty")
def remove_index_features(self):
index_feature = []
for feat in self.numeric_feature:
if self.data[feat].nunique() == len(self.data):
#if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)):
# index feature can be time based
count = (self.data[feat] - self.data[feat].shift() == 1).sum()
if len(self.data) - count == 1:
index_feature.append(feat)
self.drop_features(index_feature, "index")
def fill_missing_value_method(self, colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['fillNa']['numeric']:
if 'numFill' not in self.process_method.keys():
self.process_method['numFill'] = {}
if method == 'na' and self.process_method['numFill'].get(colm, None):
pass # don't overwrite
else:
self.process_method['numFill'][colm] = method
if colm in self.cat_feature:
if method in cs.supported_method['fillNa']['categorical']:
if 'catFill' not in self.process_method.keys():
self.process_method['catFill'] = {}
if method == 'na' and self.process_method['catFill'].get(colm, None):
pass
else:
self.process_method['catFill'][colm] = method
def check_encoding_method(self, method, colm,default=False):
if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]):
method = cs.target_encoding_method_change[method.lower()]
if default:
self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present")
return method
def fill_encoder_value_method(self,colm, method, default=False):
if colm in self.cat_feature:
if method.lower() in cs.supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}")
def apply_outlier(self):
inlier_indice = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = cs.findiqrOutlier(self.data[k])
elif v == 'zscore':
index = cs.findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indice = np.logical_and(inlier_indice, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
if self.numeric_feature:
index = cs.findiforestOutlier(self.data[self.numeric_feature])
inlier_indice = np.logical_and(inlier_indice, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indice.sum() != len(self.data):
self.__update_index(inlier_indice, 'outlier')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method not in ['Disable', 'na']:
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder()
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="ignore")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder()
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
def recommenderStartProfiler(self,modelFeatures):
return cs.recommenderStartProfiler(self,modelFeatures)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation)
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2)
def get_conversion_method(self):
return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower()
def set_features(features,profiler=None):
return cs.set_features(features,profiler)
|
data_profiler_functions.py | import os
import sys
import numpy as np
import scipy
import pandas as pd
from pathlib import Path
default_config = {
'misValueRatio': '1.0',
'numericFeatureRatio': '1.0',
'categoryMaxLabel': '20',
'str_to_cat_len_max': 10
}
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable', 'na'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
from sklearn.ensemble import IsolationForest
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def recommenderStartProfiler(self,modelFeatures):
try:
self.log.info('----------> FillNA:0')
self.data = self.data.fillna(value=0)
self.log.info('Status:- !... Missing value treatment done')
self.log.info('----------> Remove Empty Row')
self.data = self.data.dropna(axis=0,how='all')
self.log.info('Status:- !... Empty feature treatment done')
userId,itemId,rating = modelFeatures.split(',')
self.data[itemId] = self.data[itemId].astype(np.int32)
self.data[userId] = self.data[userId].astype(np.int32)
self.data[rating] = self.data[rating].astype(np.float32)
return self.data
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
return(self.data)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
try:
dataset_directory = Path(folderlocation)
dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name']
tfrecord_directory = Path(deployLocation)/'Video_TFRecord'
from savp import PreprocessSAVP
import csv
csvfile = open(dataset_csv_file, newline='')
csv_reader = csv.DictReader(csvfile)
PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory)
dataColumns = list(self.data.columns)
VideoProcessing = True
return dataColumns,VideoProcessing,tfrecord_directory
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
import os
try:
features = [doc_col_1, doc_col_2]
pipe = None
dataColumns = list(self.data.columns)
self.numofCols = self.data.shape[1]
self.numOfRows = self.data.shape[0]
from transformations.textProfiler import textProfiler
self.log.info('-------> Execute Fill NA With Empty String')
self.data = self.data.fillna(value=" ")
self.log.info('Status:- |... Missing value treatment done')
self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1])
self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2])
self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2)
self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
from tensorflow.keras.preprocessing.text import Tokenizer
pipe = Tokenizer()
pipe.fit_on_texts(self.data['text'].values)
self.log.info('-------> Tokenizer: Fit on Concatenate Field')
self.log.info('Status:- |... Tokenizer the text')
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
return (self.data, pipe, self.target_name, features)
except Exception as inst:
self.log.info("StartProfiler failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
def set_features(features,profiler=None):
if profiler:
features = [x for x in features if x not in profiler.added_features]
return features + profiler.text_feature
return features |
dataReader.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import sys
import os
import warnings
import logging
from pathlib import Path
import random
from sklearn.model_selection import train_test_split
import operator
import re
import pdfplumber
class dataReader():
def __init__(self):
self.dataDf =None
self.log = logging.getLogger('eion')
def readCsv(self,dataPath,featureList,targetColumn):
data=pd.read_csv(dataPath)
dataDf=data[featureList]
predictDf=data[targetColumn]
return dataDf,predictDf
def rowsfilter(self,filters,dataframe):
self.log.info('\n-------> No of rows before filtering: '+str(dataframe.shape[0])) #task-13479
filterexpression=''
firstexpressiondone = False
for x in filters:
if firstexpressiondone:
filterexpression += ' '
if x['combineOperator'].lower() == 'and':
filterexpression += '&'
elif x['combineOperator'].lower() == 'or':
filterexpression += '|'
filterexpression += ' '
firstexpressiondone = True
filterexpression += x['feature']
filterexpression += ' '
if x['condition'].lower() == 'equals':
filterexpression += '=='
elif x['condition'].lower() == 'notequals':
filterexpression += '!='
elif x['condition'].lower() == 'lessthan':
filterexpression += '<'
elif x['condition'].lower() == 'lessthanequalto':
filterexpression += '<='
elif x['condition'].lower() == 'greaterthan':
filterexpression += '>'
elif x['condition'].lower() == 'greaterthanequalto':
filterexpression += '>='
filterexpression += ' '
if dataframe[x['feature']].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
filterexpression += x['value']
else:
filterexpression += '\''+x['value']+'\''
dataframe = dataframe.query(filterexpression)
self.log.info('-------> Row filter: '+str(filterexpression)) #task-13479
self.log.info('-------> No of rows after filtering: '+str(dataframe.shape[0]))
return dataframe,filterexpression
def grouping(self,grouper,dataframe):
grouperbyjson= {}
groupbyfeatures = grouper['groupby']
dataframe = dataframe.reset_index()
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_sum'] = 'sum'
temp[feature+'_max'] = 'max'
temp[feature+'_min'] = 'min'
temp[feature+'_mean'] = 'mean'
aggjson[feature] = temp
else:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_unique'] = 'nunique'
aggjson[feature] = temp
groupbystring = 'groupby([\''+groupbyfeatures+'\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
dataframe = dataframe.groupby([groupbyfeatures]).agg(aggjson)
dataframe.columns = dataframe.columns.droplevel(0)
dataframe = dataframe.reset_index()
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def timeGrouping(self,timegrouper,dataframe):
grouperbyjson= {}
dateTime = timegrouper['dateTime']
frequency = timegrouper['freq']
groupbyfeatures = timegrouper['groupby']
grouperbyjson['datetime'] = dateTime
if dataframe[dateTime].dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
dtlenth = dataframe[dateTime].iloc[0]
dtlenth = np.int64(dtlenth)
dtlenth = len(str(dtlenth))
if dtlenth == 13:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='ms')
grouperbyjson['unit'] = 'ms'
elif dtlenth == 10:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='s')
grouperbyjson['unit'] = 's'
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
dataframe = dataframe.reset_index()
dataframe.set_index('date',inplace=True)
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == dateTime or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {'size','sum','max','min','mean'}
aggjson[feature] = temp
else:
temp = {'size','nunique'}
aggjson[feature] = temp
if groupbyfeatures == '':
groupbystring = 'groupby([pd.Grouper(freq=\''+frequency+'\')]).agg('+str(aggjson)+')'
else:
groupbystring = 'groupby([pd.Grouper(freq=\''+frequency+'\'),\''+groupbyfeatures+'\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
print(grouperbyjson)
if groupbyfeatures == '':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency)]).agg(aggjson)
else:
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).agg(aggjson)
dataframe.columns = ['_'.join(col) for col in dataframe.columns]
dataframe = dataframe.reset_index()
self.log.info(dataframe.head(10))
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def readDf(self,dataF,featureList,targetColumn):
dataDf = dataF[featureList]
predictDf =dataF[targetColumn]
return dataDf,predictDf
def csvTodf(self,dataPath,delimiter,textqualifier):
'''
if os.path.splitext(dataPath)[1] == ".tsv":
dataFrame=pd.read_csv(dataPath,encoding='latin1',sep='\t')
else:
dataFrame=pd.read_csv(dataPath,encoding='latin1')
'''
if os.path.splitext(dataPath)[1] == ".py":
f = open(dataPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
dataFrame = ldict['dfpy']
else:
dataFrame=pd.read_csv(dataPath,encoding='utf-8',sep=delimiter,quotechar=textqualifier, skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
dataFrame.rename(columns=lambda x: x.strip(), inplace=True)
return dataFrame
def read_file(self, fileName):
fileName = Path(fileName)
if fileName.suffix == '.pdf':
pdf = pdfplumber.open(fileName)
text = ''
for index, page in enumerate(pdf.pages):
if index:
text += ' '
text += page.extract_text()
else:
with open(fileName, "r",encoding="utf-8") as f:
text = f.read()
return text
def documentsTodf(self,folderlocation,labelFilePath):
dataDf = pd.DataFrame()
error_message = ""
dataset_csv_file = os.path.join(folderlocation,labelFilePath)
labels = pd.read_csv(dataset_csv_file)
dataDict = {}
keys = ["File","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(labels)):
filename = os.path.join(folderlocation,labels.loc[i,"File"])
dataDict["File"].append(self.read_file(filename))
dataDict["Label"].append(labels.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
error_message = ""
return dataDf, error_message
def removeFeatures(self,df,datetimeFeature,indexFeature,modelFeatures,targetFeature):
self.log.info("\n---------- Prepare Features ----------")
if(str(datetimeFeature).lower() != 'na'):
datetimeFeature = datetimeFeature.split(",")
datetimeFeature = list(map(str.strip, datetimeFeature))
for dtfeature in datetimeFeature:
if dtfeature in df.columns:
self.log.info("-------> Remove Date Time Feature: "+dtfeature)
df = df.drop(columns=dtfeature)
if(str(indexFeature).lower() != 'na'):
indexFeature = indexFeature.split(",")
indexFeature = list(map(str.strip, indexFeature))
for ifeature in indexFeature:
if ifeature in df.columns:
self.log.info("-------> Remove Index Feature: "+ifeature)
df = df.drop(columns=ifeature)
if(str(modelFeatures).lower() != 'na'):
self.log.info("-------> Model Features: "+str(modelFeatures))
modelFeatures = modelFeatures.split(",")
modelFeatures = list(map(str.strip, modelFeatures))
if(targetFeature != '' and str(targetFeature).lower() != 'na'):
targetFeature = targetFeature.split(",")
targetFeature = list(map(str.strip, targetFeature))
for ifeature in targetFeature:
if ifeature not in modelFeatures:
modelFeatures.append(ifeature)
if(str(indexFeature).lower() != 'na'):
for ifeature in indexFeature:
if ifeature in modelFeatures:
modelFeatures.remove(ifeature)
if(str(datetimeFeature).lower() != 'na'):
for dtfeature in datetimeFeature:
if dtfeature in modelFeatures:
modelFeatures.remove(dtfeature)
df = df[modelFeatures]
self.log.info("---------- Prepare Features End ----------")
return(df)
def splitImageDataset(self, df, ratio, modelType):
if modelType.lower() == "objectdetection":
images = df['File'].unique().tolist()
trainImages = random.sample(images, int(len(images) * ratio))
mask = [0] * len(df)
for i in range(len(df)):
mask[i] = df.iloc[i]['File'] in trainImages
trainDf = df.iloc[mask]
testDf = df.iloc[[not elem for elem in mask]]
return trainDf, testDf
else:
return train_test_split(df, test_size=(1 - ratio))
def createTFRecord(self, train_image_dir, output_dir, csv_file, testPercentage, AugEnabled,keepAugImages,operations, modelType,augConf={}):
from transformations import generate_tfrecord
from transformations.imageAug import ImageAugmentation
if isinstance(csv_file, pd.DataFrame):
df = csv_file
else:
df = pd.read_csv(os.path.join(train_image_dir,csv_file))
labelmap_path, num_classes = generate_tfrecord.createLabelFile(df, output_dir)
train_df, test_df = self.splitImageDataset(df, testPercentage/100.0, modelType)
if AugEnabled:
augFile = os.path.join(output_dir,"tempTrainDf.csv")
train_df.to_csv(augFile)
ia = ImageAugmentation(train_image_dir, augFile)
augFile = ia.augment(modelType, operations,None,augConf)
train_df = pd.read_csv(augFile)
generate_tfrecord.generate_TF_record(train_image_dir, output_dir, train_df, test_df, labelmap_path)
if AugEnabled and not keepAugImages:
ia.removeAugmentedImages(train_df)
return train_df, num_classes
|
pretrainedModels.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sys
from pathlib import Path
import urllib.request
import tarfile
import json
import subprocess
import os
from os.path import expanduser
import platform
class ODpretrainedModels():
def __init__(self, location=None):
if location:
if isinstance(location, Path):
self.pretrained_models_location = location.as_posix()
else:
self.pretrained_models_location = location
else:
p = subprocess.run([sys.executable, "-m", "pip","show","AION"],capture_output=True, text=True)
if p.returncode == 0:
Output = p.stdout.split('\n')
for x in Output:
y = x.split(':',1)
if(y[0]=='Location'):
self.pretrained_models_location = y[1].strip()+"/AION/pretrained_models/object_detection"
break
if Path(self.pretrained_models_location).is_dir():
self.config_file_location = self.pretrained_models_location+'/supported_models.json'
with open(self.config_file_location) as json_data:
self.supportedModels = json.load(json_data)
home = expanduser("~")
if platform.system() == 'Windows':
self.modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection')
else:
self.modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection')
if os.path.isdir(self.modelsPath) == False:
os.makedirs(self.modelsPath)
def __save_config(self):
with open(self.config_file_location, 'w') as json_file:
json.dump(self.supportedModels, json_file)
def __download(self, modelName):
try:
url = self.supportedModels[modelName]["url"]
file = self.supportedModels[modelName]["file"]
local_file_path = Path(self.modelsPath)/(file+".tar.gz")
urllib.request.urlretrieve(url, local_file_path)
except:
raise ValueError("{} model download error, check your internet connection".format(modelName))
return local_file_path
def __extract(self, modelName, file_location, extract_dir):
try:
tarFile = tarfile.open(file_location)
tarFile.extractall(extract_dir)
tarFile.close()
Path.unlink(file_location)
return True
except:
return False
def download(self, modelName):
if modelName in list(self.supportedModels.keys()):
p = Path(self.modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.is_dir()]
if self.supportedModels[modelName]['file'] not in modelsDownloaded:
file = self.__download(modelName)
self.supportedModels[modelName]["downloaded"] = True
if self.__extract(modelName, file, self.modelsPath):
self.supportedModels[modelName]["extracted"] = True
self.__save_config()
else:
self.__save_config()
raise ValueError("{} model downloaded but extraction failed,please try again".format(modelName))
else:
raise ValueError("{} is not supported for object detection".format(modelName))
return self.supportedModels[modelName]
def get_info(self,modeltype):
models_info = {}
p = Path(self.pretrained_models_location)
downloaded_models = [x.name for x in p.iterdir() if x.is_dir()]
for model in list(self.supportedModels.keys()):
if (self.supportedModels[model]['type'] == modeltype) or (modeltype == ''):
models_info[model] = self.supportedModels[model]['extracted']
return models_info
def is_model_exist(self, model_name):
models = self.get_info('')
status = "NOT_SUPPORTED"
if model_name in models:
if self.supportedModels[model_name]['extracted']:
status = "READY"
else:
status = "NOT_READY"
return status
def clear_config(self, model_name):
self.supportedModels[model_name]['extracted'] = False
self.supportedModels[model_name]['downloaded'] = False
self.__save_config()
|
summarize.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import logging
import json
import joblib
from pathlib import Path
import platform
from datetime import datetime as dt
import time
from pathlib import Path
import argparse
from operator import itemgetter
import re
import fitz
from io import StringIO
from nltk.tokenize import sent_tokenize
import pandas as pd
from scipy import spatial
import urllib.request
import zipfile
import shutil
requirements = """
scipy
pandas
pathlib
"""
def pdf2txtInternal(pdffile):
keyword = ['Discussion','4. Discussion','DISCUSSION','Results','RESULTS','Introduction','introduction','methods',
'method','result','results','limitation','Conclusions','conclusion','Conclusions','Acknowledgements',
'Acknowledgement','ACKNOWLEDGMENT','ACKNOWLEDGMENTS','References','REFERENCES']
print(pdffile)
filename1 = Path(pdffile)
csvInpClassFileName = filename1.stem
csvOutpClassFileName = "ClassResult" + filename1.stem +".csv"
styles = {}
font_counts = {}
granularity=False
doc = fitz.open(pdffile)
for i in range(1,len(doc)+1):
page = doc[i-1]
blocks = page.get_text("dict")["blocks"]
for b in blocks: # iterate through the text blocks
if b['type'] == 0: # block contains text
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
if granularity:
identifier = "{0}_{1}_{2}_{3}".format(s['size'], s['flags'], s['font'], s['color'])
styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'],
'color': s['color']}
else:
identifier = "{0}".format(s['size'])
styles[identifier] = {'size': s['size'], 'font': s['font']}
font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count the fonts usage
font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)
doc.close()
if len(font_counts) < 1:
raise ValueError("Zero discriminating fonts found!")
p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph)
p_size = p_style['size']
results = [] # list of tuples that store the information as (text, font size, font name)
total_data =[]
para_data =[]
search_data =[]
only_text =[]
v={}
pdf = fitz.open(pdffile) # filePath is a string that contains the path to the pdf
for page in pdf:
dict = page.get_text("dict")
blocks = dict["blocks"]
for block in blocks:
if "lines" in block.keys():
spans = block['lines']
for span in spans:
data = span['spans']
for lines in data:
if lines['size']>=p_size:
total_data.append([[lines['text']], [lines['size'], lines['font']]])
search_data.append([[lines['text']], [str(int(lines['size']))]])
para_data.append([lines['text']]) #, [lines['size']]])
for keywords in keyword:
if keywords == lines['text']: # only store font information of a specific keyword
results.append([[lines['text']], [lines['size'], lines['font']]])
only_text.append([lines['text']])
pdf.close()
headers=['']
intros =['Abstract','abstract']
header = ['']
headers_info =[]
for line in total_data:
if results[-1][1] == line[1]:
headers_info.append(line)
headers.extend(line[0])
if str(results[-1][0]).isupper():
headers =([item for item in headers if re.findall(r"(?<![^\s,])[A-Z]+(?![^\s,])", item)])
headers.insert(0,'')
m1 = [x for x in headers if x=='Abstract']
if len(m1)!=0:
headers.pop(0)
else:
headers = headers
elif str(results[-1][0][0][0]).isdigit():
headers = ([item for item in headers if re.findall(r"([0-9])" , item)])
headers.insert(0,'')
else:
m1 = [x for x in headers if x=='Abstract']
if len(m1)!=0:
headers.pop(0)
else:
headers = headers
header_size=(headers_info[0][1][0])
paragraph =[]
check =[]
str1 =' '
for data in (para_data):
paragraph.extend(data)
str2 = str1.join(paragraph)
repl = [['- ', '-'], [' +', ' '], [' \.', '.']]
for i in repl:
str2 = re.sub(i[0], i[1], str2)
for al in search_data:
rec=(''.join(str(x) for x in al[1]))
if float(rec) >=(p_size) or float(rec)>= header_size:
check.extend(al[0])
str3 = str1.join(check)
str3 = str1.join(check)
repl = [['- ', '-'], [' +', ' '], [' \.', '.']]
for i in repl:
str3 = re.sub(i[0], i[1], str3)
dataTosend=[]
data = []
for cols in range(2,len(headers)+1):
start = headers[cols-2] #.replace(' ','') #'SUBJECTS AND METHODS'
end = headers[cols-1]
if start in ['Acknowledgements', 'Acknowledgement', 'ACKNOWLEDGMENT','ACKNOWLEDGMENTS', 'References', 'REFERENCES']:
break
if start=='': #.replace(' ','')
res=(str2[str2.find(start)+len(start):str2.rfind(end)])
data.append(['Abstract', res])
tmp='Abstract' + ':'+ ' ' + res
dataTosend.append(tmp)
else:
res=(str2[str2.rfind(start)+len(start):str2.rfind(end)])
data.append([start, res])
tmp=start + ':'+ ' ' + res
dataTosend.append(tmp)
tokens = [] # sent tokenization and csv file creation updated
for idx in range(len(data)):
head = data[idx][0]
para = data[idx][1]
exp = sent_tokenize(para)
for val in exp:
tokens.append([head, val])
sent_data = []
for head, sent in tokens:
break_sent = r'\. [A-Z]|\.[A-Z]' # break senteance if 2 or more in a same column.
match = re.findall(break_sent, sent)
if len(match) >= 1:
for i in range (len(match)):
idx, _ = re.search(break_sent, sent).span()
sent_data.append( sent[:int(idx)+1].strip())
sent = sent[int(idx)+1:].strip()
if (re.search('^[a-z]|^[,;]', sent)): # add incomplete sentence
if sent_data != []:
last_val = sent_data.pop()
new_val = last_val[1] +' '+ sent
sent_data.append( new_val)
else:
sent_data.append( sent)
return sent_data
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def extract_data(location):
files = [x for x in Path(location).iterdir() if x.suffix == '.pdf']
if not files:
raise ValueError(f'no pdf file found in directory {location}')
sentences = []
for file in files:
data=pdf2txtInternal(file)
sentences.append(data)
return [item for sublist in sentences for item in sublist]
def keyWordGeneration( keywords,deploy_loc, pretrained_loc):
keywords = keywords.split()
noOfKeyword = len(keywords)
embeddings = {}
word = ''
print(pretrained_loc)
with open(pretrained_loc, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
readData=pd.DataFrame([],columns=['Keyword'])
for i in range(noOfKeyword):
neighbours = (sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6]
readData = readData.append({'Keyword': keywords[i]}, ignore_index=True)
for j in range(len(neighbours)):
readData = readData.append({'Keyword': neighbours[j]}, ignore_index=True)
readData.to_csv( Path(deploy_loc)/"keywordDataBase.csv",encoding='utf-8',index=False)
return set( readData['Keyword'])
def dataClassifyWithKw(sentences, keywords):
df = pd.DataFrame(sentences, columns=['File'])
pattern = '|'.join(keywords)
df['Label'] = df.File.str.contains(pattern)
return df
def to_dataframe(data_loc, keywords, pretrained_type, embedding_size=300, deploy_loc=None, train=True):
pretrained_loc = checkAndDownloadPretrainedModel(pretrained_type, embedding_size)
sentences = extract_data(data_loc)
if train:
keywords = keyWordGeneration( keywords,deploy_loc, pretrained_loc)
df = dataClassifyWithKw(sentences, keywords)
return df
def get_pretrained_model_path():
from AION.appfe.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
def checkAndDownloadPretrainedModel(preTrainedModel, embedding_size=300):
models = {'glove':{50:'glove.6B.50d.w2vformat.txt',100:'glove.6B.100d.w2vformat.txt',200:'glove.6B.200d.w2vformat.txt',300:'glove.6B.300d.w2vformat.txt'}, 'fasttext':{300:'wiki-news-300d-1M.vec'}}
supported_models = [x for y in models.values() for x in y.values()]
embedding_sizes = {x:y.keys() for x,y in models.items()}
if embedding_size not in embedding_sizes[preTrainedModel]:
raise ValueError(f"Embedding size '{embedding_size}' not supported for {preTrainedModel}")
selected_model = models[preTrainedModel.lower()][embedding_size]
modelsPath = get_pretrained_model_path()
p = Path(modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
local_file_path = None
if selected_model not in modelsDownloaded:
if preTrainedModel.lower() == "glove":
try:
location = Path(modelsPath)
local_file_path = location/f"glove.6B.{embedding_size}d.w2vformat.txt"
file_test, header_test = urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.{embedding_size}d.w2vformat.txt', local_file_path)
except Exception as e:
raise ValueError("Error: unable to download glove pretrained model, please try again or download it manually and placed it at {}. ".format(location)+str(e))
elif preTrainedModel.lower() == "fasttext":
try:
location = Path(modelsPath)
local_file_path = location/"wiki-news-300d-1M.vec.zip"
url = 'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/wiki-news-300d-1M.vec.zip'
file_test, header_test = urllib.request.urlretrieve(url, local_file_path)
with zipfile.ZipFile(local_file_path) as zip_ref:
zip_ref.extractall(location)
Path(local_file_path).unlink()
except Exception as e:
raise ValueError("Error: unable to download fastText pretrained model, please try again or download it manually and placed it at {}. ".format(location)+str(e))
return Path(modelsPath)/selected_model
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_params(profiler):
pretrained_model = get_true_option(profiler.get('textConversionMethod', {}), 'Glove')
embedding_size = get_true_option(profiler['embeddingSize'][pretrained_model], 50)
pretrained_model = pretrained_model.lower()
if pretrained_model == 'fasttext':
embedding_size = 300
elif pretrained_model == 'glove':
sizes = {'default':300, '50d':50, '100d':100,'200d':200, '300d':300}
embedding_size = sizes[embedding_size]
keywords = profiler['KeyWords']
return "delhi dialysis", pretrained_model, embedding_size
def deploy(deploy_path, pretrained_model, embedding_size, output_columns,model_file, bert_length):
from AION.mlac.ml.core.imports import importModule
def create_predict(pretrained_model, embedding_size):
importer = importModule()
common_importes = [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
local_importes = [
{'module': 'selector', 'mod_from': 'script.selector', 'mod_as': None},
{'module': 'inputprofiler', 'mod_from': 'script.inputprofiler', 'mod_as': None},
{'module': 'trained_model', 'mod_from': 'script.trained_model', 'mod_as': None},
{'module': 'summarize', 'mod_from': None, 'mod_as': None}
]
for mod in local_importes:
importer.addLocalModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
def predict(data):
try:
dataLocation = Path(data)
if not dataLocation.is_dir():
raise ValueError('Input should be a valid directory')
keywords_file = Path(__file__).parent/'keywordDataBase.csv'
if not keywords_file.exists():
raise ValueError('keywordDataBase.csv is missing in trained model output')
keywords_df = pd.read_csv(keywords_file)
if 'Keyword' not in keywords_df.columns:
raise ValueError('keywordDataBase.csv file in output folder is corrupt')
pretrained_type = '{pretrained_model.lower()}'
embedding_sz = {embedding_size}
keywords = keywords_df['Keyword'].tolist()
df = summarize.to_dataframe(dataLocation, keywords, pretrained_type, embedding_sz, train=False)
df0 = df.copy()
profilerobj = inputprofiler()
df = profilerobj.apply_profiler(df)
selectobj = selector()
df = selectobj.apply_selector(df)
modelobj = trained_model()
output = modelobj.predict(df,df0)
outputjson = {{"status":"SUCCESS","data":output}}
print("predictions:",outputjson)
except KeyError as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
"""
code = importer.getCode()
code += text
return code
def create_profiler(output_columns):
importer = importModule()
common_importes = [
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
if self.model:
df = self.model.transform(df)
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns={output_columns})
else:
df = pd.DataFrame(df, columns={output_columns})
return(df)
"""
code = importer.getCode()
code += text
return code
def create_selector(output_columns):
importer = importModule()
common_importes = [
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
class selector(object):
def apply_selector(self,df):
df = df[{output_columns}]
return(df)
"""
code = importer.getCode()
code += text
return code
def create_train(model_file, bert_length):
importer = importModule()
common_importes = [
{'module': 'os', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'Summarizer', 'mod_from': 'summarizer', 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
class trained_model(object):
def __init__(self):
self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{model_file}'))
def predict(self, X, df_org):
X = X.astype(np.float32)
df_org['predicted'] = pd.DataFrame(self.model.predict(X))
textToSum=""
for i in df_org.index:
if (df_org['predicted'][i] or df_org['Label'][i]) :
textToSum=textToSum + " " + df_org["File"][i]
bert_model = Summarizer()
bert_summary=bert_model(textToSum, min_length={bert_length})
return bert_summary
"""
code = importer.getCode()
code += text
return code
deploy_path = Path(deploy_path)
aion_prediction = deploy_path/'aion_predict.py'
profiler_file = deploy_path/'script'/'inputprofiler.py'
selector_file = deploy_path/'script'/'selector.py'
trainer_file = deploy_path/'script'/'trained_model.py'
with open(aion_prediction, 'w') as f:
f.write(create_predict(pretrained_model, embedding_size))
with open(profiler_file, 'w') as f:
f.write(create_profiler(output_columns))
with open(selector_file, 'w') as f:
f.write(create_selector(output_columns))
with open(trainer_file, 'w') as f:
f.write(create_train(model_file, bert_length))
cwf = Path(__file__)
shutil.copy(cwf, deploy_path/cwf.name)
# require dataLocation for reading files
# require deployLocation for saving keywords
# require pretrained model location
# require pretrained model type
# require keywwords
if __name__ == '__main__':
dataLocation = r'C:\Harish\aion\task\task\summarization\reference\pdfs'
deployLocation = r'C:\Users\vashistah\AppData\Local\HCLT\AION\uses'
pretrained_loc = r"C:\Users\vashistah\AppData\Local\HCLT\AION\PreTrainedModels\TextProcessing"
pretrained_type = 'glove'
keywords = 'delhi dialysis'
data = to_dataframe(dataLocation, keywords, pretrained_type,300, deployLocation, train=True)
print(data)
data.to_csv(Path(deployLocation)/'output.csv', index=False) |
item_rating.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import os
import datetime, time, timeit
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import pickle
import logging
class recommendersystem():
def __init__(self,features,svd_params):
self.features = features
self.svd_input = svd_params
self.log = logging.getLogger('eion')
print ("recommendersystem starts \n")
#To extract dict key,values
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
def recommender_model(self,df,outputfile):
from sklearn.metrics.pairwise import cosine_similarity
from utils.file_ops import save_csv
USER_ITEM_MATRIX = 'user_item_matrix'
ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix'
selectedColumns = self.features.split(',')
data = pd.DataFrame()
for i in range(0,len(selectedColumns)):
data[selectedColumns[i]] = df[selectedColumns[i]]
dataset = data
self.log.info('-------> Top(5) Rows')
self.log.info(data.head(5))
start = time.time()
self.log.info('\n----------- Recommender System Training Starts -----------')
#--------------- Task 11190:recommender system changes Start ---Usnish------------------#
# selectedColumns = ['userId', 'movieId', 'rating']
df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns[2], 'count')).reset_index()
self.log.info('-------> Top 10 most rated Items:')
self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10))
matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2])
relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv')
matrix.to_csv(relative_file)
item_similarity_cosine = cosine_similarity(matrix.fillna(0))
item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'))
self.log.info('---------> Item-Item Similarity matrix created:')
self.log.info(item_similarity_cosine.head(5))
relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv')
save_csv(item_similarity_cosine,relative_file)
# --------------- recommender system changes End ---Usnish------------------#
executionTime=time.time() - start
self.log.info("------->Execution Time: "+str(executionTime))
self.log.info('----------- Recommender System Training End -----------\n')
return "filename",matrix,"NA","","" |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
text_similarity.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pickle
import pandas as pd
import sys
import time
import os
from os.path import expanduser
import platform
from sklearn.preprocessing import binarize
import logging
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D
from sklearn.metrics.pairwise import cosine_similarity, cosine_distances
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers
## Keras subclassing based siamese network
class siameseNetwork(Model):
def __init__(self, activation,inputShape, num_iterations):
self.activation=activation
self.log = logging.getLogger('eion')
super(siameseNetwork, self).__init__()
i1 = layers.Input(shape=inputShape)
i2 = layers.Input(shape=inputShape)
featureExtractor = self.build_feature_extractor(inputShape, num_iterations)
f1 = featureExtractor(i1)
f2 = featureExtractor(i2)
#distance vect
distance = layers.Concatenate()([f1, f2])
cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
c_loss=cosine_loss(f1, f2)
similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2])
outputs = layers.Dense(1, activation="sigmoid")(distance)
self.model = Model(inputs=[i1, i2], outputs=outputs)
##Build dense sequential layers
def build_feature_extractor(self, inputShape, num_iterations):
layers_config = [layers.Input(inputShape)]
for i, n_units in enumerate(num_iterations):
layers_config.append(layers.Dense(n_units))
layers_config.append(layers.Dropout(0.2))
layers_config.append(layers.BatchNormalization())
layers_config.append(layers.Activation(self.activation))
model = Sequential(layers_config, name='feature_extractor')
return model
def call(self, x):
return self.model(x)
def euclidean_distance(vectors):
(f1, f2) = vectors
sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True)
return K.sqrt(K.maximum(sumSquared, K.epsilon()))
def cosine_similarity(vectors):
(f1, f2) = vectors
f1 = K.l2_normalize(f1, axis=-1)
f2 = K.l2_normalize(f2, axis=-1)
return K.mean(f1 * f2, axis=-1, keepdims=True)
def cos_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0],1)
class eion_similarity_siamese:
def __init__(self):
self.log = logging.getLogger('eion')
def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file):
try:
self.log.info('-------> Read Embedded File')
home = expanduser("~")
if platform.system() == 'Windows':
modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity')
else:
modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity')
if os.path.isdir(modelsPath) == False:
os.makedirs(modelsPath)
embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt')
if not os.path.exists(embedding_file_path):
from pathlib import Path
import urllib.request
import zipfile
location = modelsPath
local_file_path = os.path.join(location,"glove.6B.zip")
file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path)
with zipfile.ZipFile(local_file_path, 'r') as zip_ref:
zip_ref.extractall(location)
os.unlink(os.path.join(location,"glove.6B.zip"))
if os.path.isfile(os.path.join(location,"glove.6B.50d.txt")):
os.unlink(os.path.join(location,"glove.6B.50d.txt"))
if os.path.isfile(os.path.join(location,"glove.6B.300d.txt")):
os.unlink(os.path.join(location,"glove.6B.300d.txt"))
if os.path.isfile(os.path.join(location,"glove.6B.200d.txt")):
os.unlink(os.path.join(location,"glove.6B.200d.txt"))
X = df[[col1,col2]]
Y = df[targetColumn]
testPercentage = testPercentage
self.log.info('\n-------------- Test Train Split ----------------')
if testPercentage == 0:
xtrain=X
ytrain=Y
xtest=X
ytest=Y
else:
testSize=testPercentage/100
self.log.info('-------> Split Type: Random Split')
self.log.info('-------> Train Percentage: '+str(testSize))
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize)
self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->')
self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->')
self.log.info('-------------- Test Train Split End ----------------\n')
self.log.info('\n-------------- Train Validate Split ----------------')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->')
self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->')
self.log.info('-------------- Train Validate Split End----------------\n')
self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test')
train_sentence1 = pipe.texts_to_sequences(X_train[col1].values)
train_sentence2 = pipe.texts_to_sequences(X_train[col2].values)
val_sentence1 = pipe.texts_to_sequences(X_val[col1].values)
val_sentence2 = pipe.texts_to_sequences(X_val[col2].values)
len_vec = [len(sent_vec) for sent_vec in train_sentence1]
max_len = np.max(len_vec)
len_vec = [len(sent_vec) for sent_vec in train_sentence2]
if (max_len < np.max(len_vec)):
max_len = np.max(len_vec)
train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post')
train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post')
val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post')
val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post')
y_train = y_train.values
y_val = y_val.values
activation = str(conf['activation'])
model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10])
model.compile(
loss="binary_crossentropy",
optimizer=optimizers.Adam(learning_rate=0.0001),
metrics=["accuracy"])
es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True)
rlp = callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1
)
x_valid=X_val
y_valid=y_val
n_epoch = int(conf['num_epochs'])
batch_size = int(conf['batch_size'])
similarityIndex = conf['similarityIndex']
model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size,
validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp])
scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0)
self.log.info('-------> Model Score Matrix: Accuracy')
self.log.info('-------> Model Score (Validate Data) : '+str(scores[1]))
self.log.info('Status:- |... Algorithm applied: SIAMESE')
test_sentence1 = pipe.texts_to_sequences(X_test[col1].values)
test_sentence2 = pipe.texts_to_sequences(X_test[col2].values)
test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post')
test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post')
prediction = model.predict([test_sentence1, test_sentence2 ])
n_epoch = conf['num_epochs']
batch_size = conf['batch_size']
activation = conf['activation']
similarityIndex = conf['similarityIndex']
self.log.info('-------> similarityIndex : '+str(similarityIndex))
prediction = np.where(prediction > similarityIndex,1,0)
rocauc_sco = roc_auc_score(y_test,prediction)
acc_sco = accuracy_score(y_test, prediction)
predict_df = pd.DataFrame()
predict_df['actual'] = y_test
predict_df['predict'] = prediction
predict_df.to_csv(predicted_data_file)
self.log.info('-------> Model Score Matrix: Accuracy')
self.log.info('-------> Model Score (Validate Data) : '+str(scores[1]))
self.log.info('Status:- |... Algorithm applied: SIAMESE')
test_sentence1 = pipe.texts_to_sequences(X_test[col1].values)
test_sentence2 = pipe.texts_to_sequences(X_test[col2].values)
test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post')
test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post')
prediction = model.predict([test_sentence1, test_sentence2 ])
prediction = np.where(prediction > similarityIndex,1,0)
rocauc_sco = roc_auc_score(y_test,prediction)
acc_sco = accuracy_score(y_test, prediction)
predict_df = pd.DataFrame()
predict_df['actual'] = y_test
predict_df['predict'] = prediction
predict_df.to_csv(predicted_data_file)
self.log.info("predict_df: \n"+str(predict_df))
sco = acc_sco
self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco))
self.log.info('Status:- |... Testing Score: '+str(acc_sco))
self.log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco))
matrix = '"Accuracy":'+str(acc_sco)+',"ROC AUC":'+str(rocauc_sco)
prediction = model.predict([train_sentence1, train_sentence2])
prediction = np.where(prediction > similarityIndex,1,0)
train_rocauc_sco = roc_auc_score(y_train,prediction)
train_acc_sco = accuracy_score(y_train, prediction)
self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco))
self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco))
trainmatrix = '"Accuracy":'+str(train_acc_sco)+',"ROC AUC":'+str(train_rocauc_sco)
model_tried = '{"Model":"SIAMESE","Score":'+str(sco)+'}'
saved_model = 'textsimilarity_'+iterName+'_'+iterVersion
# filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav')
# filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5')
## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model.
filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion)
model.save(filename)
# model.save_weights(filename)
model_name = 'SIAMESE MODEL'
return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex)
except Exception as inst:
self.log.info("SIAMESE failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) |
performance.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
def get_metrics(request):
output = {}
output_path = Path(request.session['deploypath'])/"etc"/"output.json"
if not output_path.exists():
raise ValueError('output json path does not exist, something unexpected happen')
with open(output_path) as file:
config = json.load(file)
output['problem_type'] = config.get('data',{}).get('ModelType')
output['best_model'] = config.get('data',{}).get('BestModel')
output['hyper_params'] = config.get('data',{}).get('params')
output['best_score'] = str(round(float(config.get('data',{}).get('BestScore')), 2))
output['scoring_method'] = config.get('data',{}).get('ScoreType')
if output['problem_type'] == 'classification':
output['mcc_score'] = str(round(float(config.get('data',{}).get('matrix',{}).get('MCC_SCORE', 0.0)), 2))
else:
output['mcc_score'] = 'NA'
return output
|
brier_score.py | import json
import os
def get_brier_score(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "output.json")
with open(displaypath) as file:
config = json.load(file)
problem_type = config["data"]["ModelType"]
brier_score = config["data"]["matrix"]["BRIER_SCORE"]
print(problem_type,brier_score)
except Exception as e:
#print(str(e))
raise ValueError(str(e))
return problem_type, brier_score
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
|
fairness_metrics.py |
import pandas as pd
import numpy as np
from appbe.eda import ux_eda
from sklearn.preprocessing import LabelEncoder
import json
import matplotlib.pyplot as plt
import os
import mpld3
import subprocess
import os
import sys
import re
import json
import pandas as pd
from appbe.eda import ux_eda
from aif360.datasets import StandardDataset
from aif360.metrics import ClassificationMetric
from aif360.datasets import BinaryLabelDataset
def get_metrics(request):
dataFile = os.path.join(request.session['deploypath'], "data", "preprocesseddata.csv.gz")
predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py')
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
Target_feature = configSettings['targetFeature']
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
df = pd.read_csv(dataFile)
df_p = pd.DataFrame.from_dict(predict_dict['data'])
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
df_temp = request.GET.get('feature')
global metricvalue
metricvalue = request.GET.get('metricvalue')
Protected_feature = df_temp
df_p = df_p.drop(columns=[Target_feature, 'remarks', 'probability'])
df_p.rename(columns={'prediction': Target_feature}, inplace=True)
eda_obj = ux_eda(dataFile, optimize=1)
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
features_to_Encode = features
categorical_names = {}
encoders = {}
for feature in features_to_Encode:
le = LabelEncoder()
le.fit(df[feature])
df[feature] = le.transform(df[feature])
le.fit(df_p[feature])
df_p[feature] = le.transform(df_p[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
new_list = [item for item in categorical_names[Protected_feature] if not(pd.isnull(item)) == True]
claas_size = len(new_list)
if claas_size > 10:
return 'HeavyFeature'
metrics = fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p)
figure = plot_fair_metrics(metrics)
html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url)
return html_graph
def fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p):
cols = [metricvalue]
obj_fairness = [[0]]
fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)
for indx in range(claas_size):
priv_group = categorical_names[Protected_feature][indx]
privileged_class = np.where(categorical_names[Protected_feature] == priv_group)[0]
data_orig = StandardDataset(df,
label_name=Target_feature,
favorable_classes=[1],
protected_attribute_names=[Protected_feature],
privileged_classes=[privileged_class])
attr = data_orig.protected_attribute_names[0]
idx = data_orig.protected_attribute_names.index(attr)
privileged_groups = [{attr:data_orig.privileged_protected_attributes[idx][0]}]
unprivileged_size = data_orig.unprivileged_protected_attributes[0].size
unprivileged_groups = []
for idx2 in range(unprivileged_size):
unprivileged_groups.extend([{attr:data_orig.unprivileged_protected_attributes[idx][idx2]}])
bld = BinaryLabelDataset(df=df, label_names=[Target_feature], protected_attribute_names=[Protected_feature])
bld_p = BinaryLabelDataset(df=df_p, label_names=[Target_feature], protected_attribute_names=[Protected_feature])
ClsMet = ClassificationMetric(bld, bld_p,unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
if metricvalue == "Theil Index":
row = pd.DataFrame([[ClsMet.theil_index()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Equal Opportunity Difference":
row = pd.DataFrame([[ClsMet.equal_opportunity_difference()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Disparate Impact":
row = pd.DataFrame([[ClsMet.disparate_impact()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Statistical Parity Difference":
row = pd.DataFrame([[ClsMet.statistical_parity_difference()]],
columns = cols ,
index = [priv_group])
#fair_metrics = fair_metrics.append(row)
fair_metrics = pd.concat([fair_metrics,row])
return fair_metrics
def plot_fair_metrics(fair_metrics):
import matplotlib.patches as patches
plt.style.use('default')
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1)
plt.subplots_adjust(
left = 0.125,
bottom = 0.1,
right = 0.9,
top = 0.9,
wspace = .5,
hspace = 1.1
)
y_title_margin = 1.2
plt.suptitle("Fairness metrics", y = 1.09, fontsize=20)
sns.set(style="dark")
cols = fair_metrics.columns.values
obj = fair_metrics.loc['objective']
if metricvalue == "Theil Index":
size_rect = [0.5]
rect = [-0.1]
bottom = [-0.1]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Equal Opportunity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
elif metricvalue == "Disparate Impact":
size_rect = [0.4]
rect = [0.8]
bottom = [0]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Statistical Parity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
for attr in fair_metrics.index[1:len(fair_metrics)].values:
check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)]
for i in range(0,1):
plt.subplot(1, 1, i+1)
xx = fair_metrics.index[1:len(fair_metrics)].values.tolist()
yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist()
palette = sns.color_palette('husl', len(xx))
ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx)
index = 0
for p in zip(ax.get_xticks(), yy):
if (p[1] > 2.0):
_color = palette.as_hex()[index]
_val = 'Outlier(' + str(round(p[1],3)) + ')'
ax.text(p[0]-0.5, 0.02, _val, color=_color)
else:
ax.text(p[0], p[1]+0.05, round(p[1],3), color='k')
index = index + 1
plt.ylim(bottom[i], top[i])
plt.setp(ax.patches, linewidth=0)
ax.get_xaxis().set_visible(False)
ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1)
ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor="green", linewidth=1, linestyle='solid'))
# plt.axhline(obj[i], color='black', alpha=0.3)
plt.title(cols[i], fontname="Times New Roman", size=20,fontweight="bold")
ax.set_ylabel('')
ax.set_xlabel('')
return fig |
sensitivity_analysis.py | import base64
import io
import json
import os
import urllib
import joblib
import numpy as np
import pandas as pd
from SALib.analyze import sobol
class sensitivityAnalysis():
def __init__(self, model, problemType, data, target, featureName):
self.model = model
self.probemType = problemType
self.data = data
self.target = target
self.featureName = featureName
self.paramvales = []
self.X = []
self.Y = []
self.problem = {}
def preprocess(self):
self.X = self.data[self.featureName].values
self.Y = self.data[self.target].values
bounds = [[np.min(self.X[:, i]), np.max(self.X[:, i])] for i in range(self.X.shape[1])]
self.problem = {
'num_vars': self.X.shape[1],
'names': self.featureName,
'bounds': bounds
}
def generate_samples(self,size):
from SALib.sample import sobol
self.param_values = sobol.sample(self.problem, size)
def calSiClass(self, satype,isML,isDL):
try:
D = self.problem['num_vars']
S = np.zeros(self.X.shape[1])
for class_label in np.unique(self.Y):
if isML:
y_pred_poba = self.model.predict_proba(self.param_values)[:, class_label]
if isDL:
y_pred_poba = self.model.predict(self.param_values)[:,class_label]
if not y_pred_poba.size % (2 * D + 2) == 0:
lim = y_pred_poba.size - y_pred_poba.size % (2 * D + 2)
y_pred_poba = y_pred_poba[:lim]
Si = sobol.analyze(self.problem, y_pred_poba)
if satype.lower() == 'first':
S += Si['S1']
else:
S += Si['ST']
S /= len(np.unique(self.Y))
return S
except Exception as e:
print('Error in calculating Si for Classification: ', str(e))
raise ValueError(str(e))
def calSiReg(self, satype,isML,isDL):
try:
D = self.problem['num_vars']
Y = np.array([self.model.predict(X_sample.reshape(1, -1)) for X_sample in self.param_values])
Y = Y.reshape(-1)
if not Y.size % (2 * D + 2) == 0:
lim = Y.size - Y.size % (2 * D + 2)
Y = Y[:lim]
Si = sobol.analyze(self.problem, Y)
if satype.lower() == 'first':
S = Si['S1']
else:
S = Si['ST']
return S
except Exception as e:
print('Error in calculating Si for Regression: ', str(e))
raise ValueError(str(e))
def plotSi(self, S, saType):
try:
import matplotlib.pyplot as plt
if saType.lower() == 'first':
title, label = 'Sensitivity Analysis', 'First order'
else:
title, label = 'Sensitivity Analysis', 'Total order'
x = np.arange(len(self.problem['names']))
width = 0.35
fig, ax = plt.subplots()
ax.bar(x - width / 2, S, width, label=label)
ax.set_xticks(x)
ax.set_xlabel('Features')
ax.set_ylabel('Sensitivity Indices')
ax.set_title(title)
ax.set_xticklabels(self.problem['names'], rotation=45, ha="right")
ax.legend()
plt.tight_layout()
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
SAimage = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as e:
print(e)
SAimage = ''
return SAimage
def checkModelType(modelName):
isML= False
isDL = False
if modelName in ["Neural Network", "Convolutional Neural Network (1D)", "Recurrent Neural Network","Recurrent Neural Network (GRU)",
"Recurrent Neural Network (LSTM)", "Neural Architecture Search", "Deep Q Network", "Dueling Deep Q Network"]:
isDL = True
elif modelName in ["Linear Regression","Lasso","Ridge","Logistic Regression", "Naive Bayes", "Decision Tree", "Random Forest", "Support Vector Machine", "K Nearest Neighbors", "Gradient Boosting",
"Extreme Gradient Boosting (XGBoost)", "Light Gradient Boosting (LightGBM)", "Categorical Boosting (CatBoost)","Bagging (Ensemble)"]:
isML = True
return isML,isDL
def startSA(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
if not os.path.exists(displaypath):
raise Exception('Config file not found.')
with open(displaypath) as file:
config = json.load(file)
probelmType = config['problemType']
if probelmType.lower() not in ['classification','regression']:
raise Exception(f"Probolem Type: {probelmType} not supported")
isML,isDL = checkModelType(config['modelname'])
sample_size = 1024
if isML:
model = joblib.load(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 2048
if isDL:
from tensorflow.keras.models import load_model
model = load_model(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 512
target = config['targetFeature']
featureName = config['modelFeatures']
dataPath = os.path.join(request.session['deploypath'], 'data', 'postprocesseddata.csv.gz')
if not os.path.exists(dataPath):
raise Exception('Data file not found.')
from utils.file_ops import read_df_compressed
read_status,dataFrame = read_df_compressed(dataPath)
obj = sensitivityAnalysis(model, probelmType, dataFrame, target, featureName)
obj.preprocess()
obj.generate_samples(sample_size)
submitType = str(request.GET.get('satype'))
saType = 'first' if submitType == 'first' else 'total'
if probelmType.lower() == 'classification':
SA_values = obj.calSiClass(saType,isML,isDL)
else:
SA_values = obj.calSiReg(saType,isML,isDL)
if SA_values.size and saType:
graph = obj.plotSi(SA_values, saType)
if graph:
outputJson = {'Status': "Success", "graph": graph}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in Plotting Graph'}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in calculating Si values'}
output_json = json.dumps(outputJson)
return output_json
except Exception as e:
print(str(e))
raise ValueError(str(e))
|
trustedai_uq.py | import numpy as np
import joblib
import pandas as pd
from appbe.eda import ux_eda
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# from pathlib import Path
import configparser
import json
import matplotlib.pyplot as plt
import numpy as np
import os
def trustedai_uq(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
TargetFeature = configSettings['targetFeature']
problemType = configSettings['problemType']
raw_data_loc = configSettings['preprocessedData']
dataLocation = configSettings['postprocessedData']
selectedfeatures = request.GET.get('values')
if problemType.lower() == "classification":
model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model']))
df = pd.read_csv(dataLocation)
trainfea = df.columns.tolist()
feature = json.loads(selectedfeatures)
# feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = TargetFeature
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(model,dataLocation,feature,tar)
return outputStr
if problemType.lower() == "regression":
model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model']))
df = pd.read_csv(dataLocation)
trainfea = df.columns.tolist()
feature = json.loads(selectedfeatures)
# feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = TargetFeature
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(model,dataLocation,feature,tar)
print(outputStr)
return outputStr
except Exception as e:
print('error',e)
return e |
pipeline_config.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import logging
import os
import shutil
import time
import importlib
from sys import platform
from pathlib import Path
from distutils.util import strtobool
import config_manager.pipeline_config_reader as cs
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class AionConfigManager:
def getDebiasingDetail(self):
return cs.getDebiasingDetail(self)
# eion configuration Constractor
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
self.summarize = False
#To get the inliner labels for eion anomaly detection
def get_text_feature(self):
self.text_features = []
feat_dict = self.advance['profiler']['featureDict']
for feat in feat_dict:
if feat.get('type') == 'text':
if feat.get('feature'):
self.text_features.append(feat['feature'])
return self.text_features
def validate_config(self):
status = True
error_id = ''
msg = ''
conversion_method = self.__get_true_option(self.advance.get('profiler',{}).get('textConversionMethod',{}))
is_text_feature = self.get_text_feature()
if is_text_feature and conversion_method.lower() == 'fasttext':
status = importlib.util.find_spec('fasttext')
if not status:
error_id = 'fasttext'
msg = 'fastText is not installed. Please install fastText'
return status,error_id, msg
def getTextlocation(self):
text_data = self.basic["dataLocation"]
return text_data
def getTextSummarize(self):
algo = self.basic['algorithms']['textSummarization']
for key in algo:
if algo[key] == 'True':
algoname = key
method = self.advance['textSummarization']['summaryLength']
for key in method:
if method[key] == 'True':
methodname = key
return algoname,methodname
def getAssociationRuleFeatures(self):
if 'invoiceNoFeature' in self.basic['algorithms']['recommenderSystem']['associationRulesConfig']:
invoiceNoFeature = self.basic['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature']
else:
invoiceNoFeature =''
if 'itemFeature' in self.basic['algorithms']['recommenderSystem']['associationRulesConfig']:
itemFeature = self.basic['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']
else:
itemFeature =''
return invoiceNoFeature,itemFeature
def getFirstDocumentFeature(self):
return cs.getFirstDocumentFeature(self)
def getSecondDocumentFeature(self):
return cs.getSecondDocumentFeature(self)
def getEionTextSimilarityConfig(self):
return self.advance['textSimilarityConfig']
def getEionTextSummarizationConfig(self):
return self.basic['dataLocation'],self.basic['deployLocation'] ,self.basic['textSummarization']['KeyWords'],self.basic['textSummarization']['pathForKeywordFile']
def getEionInliers(self):
return cs.getEionInliers(self)
#To get the selected models for eion anomaly detection
def getEionanomalyModels(self):
self.anomalyModels = self.mlmodels
return (self.anomalyModels)
# To get parameter list of configuration module from json, this will be passed as dict{}
def getEionProfilerConfigurarion(self):
return cs.getEionProfilerConfigurarion(self)
def getAIONTestTrainPercentage(self):
return cs.getAIONTestTrainPercentage(self)
def getModelEvaluationConfig(self):
try:
return request.POST.get('mydata',{})
except Exception as e:
return({})
def getAIONDataBalancingMethod(self):
return cs.getAIONDataBalancingMethod(self)
def updateFeatureSelection(self, selectorConfig,codeConfigure,vectorizer=False):
if vectorizer:
selectorConfig['selectionMethod']['featureSelection'] = 'True'
selectorConfig['featureSelection']['allFeatures'] = 'True'
selectorConfig['featureSelection']['statisticalBased'] = 'False'
selectorConfig['featureSelection']['modelBased'] = 'False'
codeConfigure.update_config("feature_selector", ['allFeatures'])
# To get parameter list of selector module params
def getEionSelectorConfiguration(self):
return cs.getEionSelectorConfiguration(self)
def createDeploymentFolders(self,deployFolder,iterName,iterVersion):
usecase = '{}{}{}'.format(iterName, '_' if iterVersion != '' else '', iterVersion)
folders = ['data','log','model','script','etc']
skip_delete = ['log']
deployLocation = Path(deployFolder)/iterName/iterVersion
deployLocation.mkdir(parents=True, exist_ok=True)
# delete previous failed/trained use case outputs except log folder
# as logging is already enabled for current usecase
for x in deployLocation.iterdir():
if x.is_file(): # bug 13315 delete existing files
x.unlink()
elif x.is_dir():
if x.stem not in skip_delete:
shutil.rmtree( x)
for folder in folders:
(deployLocation/folder).mkdir( parents=True, exist_ok=True)
(deployLocation/'log'/'img').mkdir( parents=True, exist_ok=True)
data_location = deployLocation/'data'
paths = {
'usecase': str(deployLocation.parent),
'deploy': str(deployLocation),
'data': str(deployLocation/'data'),
'image': str(deployLocation/'log'/'img'),
}
files = {
'original': str(data_location/'preprocesseddata.csv.gz'),
'profiled': str(data_location/'postprocesseddata.csv.gz'),
'reduction': str(data_location/'reductiondata.csv'),
'trained': str(data_location/'trainingdata.csv'),
'predicted': str(data_location/'predicteddata.csv.gz'),
'logs': str(deployLocation/'log'/'model_training_logs.log'),
'output': str(deployLocation/'etc'/'output.json'),
}
return( paths['usecase'],paths['deploy'],paths['data'],paths['image'],files['original'],files['profiled'],files['trained'],files['predicted'],files['logs'],files['output'],files['reduction'])
# To get parameter list of learner module params
def getEionLearnerConfiguration(self):
try:
if(self.advance['mllearner_config']):
mllearner_config = self.advance['mllearner_config']
if 'categoryBalancingMethod' not in mllearner_config:
mllearner_config['categoryBalancingMethod'] = 'oversample'
if 'testPercentage' not in mllearner_config:
mllearner_config['testPercentage'] = 20
if 'missingTargetCategory' not in mllearner_config:
mllearner_config['missingTargetCategory'] = ''
mllearner_config['modelParams']['classifierModelParams']['Deep Q Network'] = self.advance['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network']
mllearner_config['modelParams']['classifierModelParams']['Neural Architecture Search'] = self.advance['dllearner_config']['modelParams']['classifierModelParams']['Neural Architecture Search']
mllearner_config['modelParams']['classifierModelParams']['Dueling Deep Q Network'] = self.advance['rllearner_config']['modelParams']['classifierModelParams']['Dueling Deep Q Network']
mllearner_config['modelParams']['regressorModelParams']['Deep Q Network'] = self.advance['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network']
mllearner_config['modelParams']['regressorModelParams']['Dueling Deep Q Network'] = self.advance['rllearner_config']['modelParams']['regressorModelParams']['Dueling Deep Q Network']
mllearner_config['modelParams']['regressorModelParams']['Neural Architecture Search'] = self.advance['dllearner_config']['modelParams']['regressorModelParams']['Neural Architecture Search']
return mllearner_config
else:
return('NA')
except KeyError:
return('NA')
except Exception as inst:
self.log.info( '\n-----> getEionLearnerConfiguration failed!!!.'+str(inst))
return('NA')
def getEionDeepLearnerConfiguration(self):
return cs.getEionDeepLearnerConfiguration(self)
def gettimegrouper(self):
return cs.gettimegrouper(self)
def getgrouper(self):
return cs.getgrouper(self)
def getfilter(self):
return cs.getfilter(self)
def getNumberofForecasts(self):
return cs.getNumberofForecasts(self)
##To get multivariate feature based anomaly detection status
def getMVFeaturebasedAD(self):
return cs.getMVFeaturebasedAD(self)
def getModulesDetails(self):
problem_type = self.problemType
visualizationstatus = self.getEionVisualizationStatus()
profiler_status = self.getEionProfilerStatus()
selector_status = self.getEionSelectorStatus()
learner_status = self.mllearner
deeplearner_status = self.dllearner
targetFeature = self.getTargetFeatures()
deploy_status = self.getEionDeploymentStatus()
VideoProcessing = False
similarityIdentificationStatus = False
contextualSearchStatus = False
anomalyDetectionStatus = False
if problem_type.lower() == 'survivalanalysis':
survival_analysis_status = True
selector_status = False
associationRuleStatus = 'disable'
timeseriesStatus = 'disable'
learner_status = False
deeplearner_status = False
else:
survival_analysis_status = False
if problem_type.lower() == 'textsimilarity':
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
inputDriftStatus = 'disable'
textSimilarityStatus = True
else:
textSimilarityStatus = False
if problem_type.lower() == 'inputdrift':
inputDriftStatus = True
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
deploy_status = False
visualizationstatus = False
else:
inputDriftStatus = False
if problem_type.lower() == 'outputdrift':
outputDriftStatus = True
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
deploy_status = False
visualizationstatus = False
else:
outputDriftStatus = False
if problem_type.lower() == 'recommendersystem':
recommenderStatus = True
#profiler_status = 'disable'
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
#Task 11190
visualizationstatus = False
else:
recommenderStatus = False
'''
if profiler_status.lower() == 'enable':
profiler_status = True
else:
profiler_status = False
if selector_status.lower() == 'enable':
selector_status = True
else:
selector_status = False
if visualizationstatus.lower() == 'enable':
visualizationstatus = True
else:
visualizationstatus = False
'''
if learner_status:
if(problem_type == 'NA'):
learner_status = True
elif(problem_type.lower() in ['classification','regression','clustering','anomalydetection', 'topicmodelling', 'objectdetection', 'timeseriesanomalydetection']): #task 11997
learner_status = True
else:
learner_status = False
if problem_type.lower() == 'anomalydetection' or problem_type.lower() == 'timeseriesanomalydetection': #task 11997
anomalyDetectionStatus = True
if deeplearner_status:
if(problem_type.lower() == 'na'):
deeplearner_status = True
elif(problem_type.lower() in ['classification','regression']):
deeplearner_status = True
else:
deeplearner_status = False
if(targetFeature == ''):
deeplearner_status = False
if problem_type.lower() == 'timeseriesforecasting': #task 11997
timeseriesStatus = True
profiler_status = True #task 12627
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = 'disable'
else:
timeseriesStatus = False
if problem_type.lower() == 'videoforecasting':
forecastingStatus = True
timeseriesStatus = False
profiler_status = True
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = 'disable'
else:
forecastingStatus = False
if problem_type.lower() == 'imageclassification':
imageClassificationStatus = True
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = 'disable'
else:
imageClassificationStatus = False
if problem_type.lower() == 'associationrules':
associationRuleStatus = True
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
visualizationstatus = False
else:
associationRuleStatus = False
if problem_type.lower() == 'statetransition':
stateTransitionStatus = True
objectDetectionStatus = False
imageClassificationStatus = False
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
else:
stateTransitionStatus = False
if problem_type.lower() == 'objectdetection':
objectDetectionStatus = True
imageClassificationStatus = False
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
else:
objectDetectionStatus = False
if problem_type.lower() == 'similarityidentification':
similarityIdentificationStatus = True
objectDetectionStatus = False
imageClassificationStatus = False
timeseriesStatus = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
self.updateEmbeddingForDocSimilarity()
else:
similarityIdentificationStatus = False
if problem_type.lower() == 'contextualsearch':
contextualSearchStatus = True
objectDetectionStatus = False
imageClassificationStatus = False
timeseriesStatus = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
self.updateEmbeddingForContextualsearch()
else:
contextualSearchStatus = False
if problem_type.lower() == 'textsummarization':
textSummarization = True
profiler_status = False
selector_status = False
else:
textSummarization = False
'''
if deploy_status.lower() == 'enable':
deploy_status = True
else:
deploy_status = False
'''
#print(inputDriftStatus)
return problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textSummarization,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus,objectDetectionStatus,stateTransitionStatus,similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus
def __get_true_option(self, d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def updateEmbeddingForDocSimilarity(self):
method = self.__get_true_option(self.basic['algorithms']['similarityIdentification'])
textConversionMethods = self.advance['profiler']['textConversionMethod']
print("------------"+method+'---------------')
for key in textConversionMethods:
if key == method:
self.advance['profiler']['textConversionMethod'][key] = "True"
else:
self.advance['profiler']['textConversionMethod'][key] = "False"
if method.lower() == 'bm25':
self.advance['profiler']['textConversionMethod']['bm25'] = "True"
def updateEmbeddingForContextualsearch(self):
method = self.__get_true_option(self.basic['algorithms']['contextualSearch'])
textConversionMethods = self.advance['profiler']['textConversionMethod']
print("------------"+method+'---------------')
for key in textConversionMethods:
if key == method:
self.advance['profiler']['textConversionMethod'][key] = "True"
else:
self.advance['profiler']['textConversionMethod'][key] = "False"
if method.lower() == 'bm25':
self.advance['profiler']['textConversionMethod']['bm25'] = "True"
def get_conversion_method(self):
return self.__get_true_option( self.advance['profiler']['textConversionMethod'])
def getAlgoName(self, problem_type=None):
if problem_type == None:
problem_type = self.__get_true_option(self.basic['algorithms'])
return self.__get_true_option(self.basic['algorithms'][problem_type])
def getScoringCreteria(self):
return self.scoringCreteria
def getVectorDBCosSearchStatus(self,problemType):
if self.basic['preprocessing'][problemType]['VectorDB'] == 'True':
return True
else:
return False
def getVectorDBFeatureDelimitInDoc(self):
return ' ~&~ '
def getEionDeployerConfiguration(self):
return cs.getEionDeployerConfiguration(self)
def getEionAssociationRuleConfiguration(self):
return cs.getEionAssociationRuleConfiguration(self)
def getEionAssociationRuleModelParams(self):
try:
associationConfg = self.advance['associationrule']
if 'modelParams' in associationConfg:
modelParams = associationConfg['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in associationConfg:
ml_algorithm_filename = associationConfg['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/AssciationRules_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
modelList = []
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getEionImageAugmentationConfiguration(self):
try:
enable = self.advance['ImageAugmentation'].get('Enable', "False")
keepAugImages = self.advance['ImageAugmentation'].get('KeepAugmentedImages', "False")
if enable == "True":
operations = {}
operations.update(self.advance['ImageAugmentation'].get('Noise', {}))
operations.update(self.advance['ImageAugmentation'].get('Transformation', {}))
if keepAugImages == 'True':
keepAugImages = True
if keepAugImages == 'False':
keepAugImages = False
return True,keepAugImages,{key: True if value.lower() == "true" else False for key, value in operations.items()},self.advance['ImageAugmentation'].get('configuration',{})
else:
return False,False, {},{}
except KeyError:
return False,False, {},{}
def getAIONRemoteTraining(self):
try:
if(self.advance['remoteTraining']):
self.advance['remoteTraining']['Enable'] = strtobool(self.advance['remoteTraining'].get('Enable', 'False'))
return self.advance['remoteTraining']
else:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
except KeyError:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
def getEionObjectDetectionConfiguration(self):
return cs.getEionObjectDetectionConfiguration(self)
def getEionTimeSeriesConfiguration(self):
return cs.getEionTimeSeriesConfiguration(self)
def getAIONAnomalyDetectionConfiguration(self):
return cs.getAIONAnomalyDetectionConfiguration(self)
def getAIONTSAnomalyDetectionConfiguration(self):
return cs.getAIONTSAnomalyDetectionConfiguration(self)
def getEionVisualizationStatus(self):
return(True)
def getEionVisualizationConfiguration(self):
return cs.getEionVisualizationConfiguration(self)
def getEionRecommenderConfiguration(self):
return cs.getEionRecommenderConfiguration(self)
def getAionNASConfiguration(self):
return cs.getAionNASConfiguration(self)
def getEionProblemType(self):
try:
analysis_type = self.basic['analysisType']
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType:
return self.problemType
else:
return('NA')
except KeyError:
return('NA')
def getEionProfilerStatus(self):
return cs.getEionProfilerStatus(self)
def getEionSelectorStatus(self):
return cs.getEionSelectorStatus(self)
def getEionDeploymentStatus(self):
return cs.getEionDeploymentStatus(self)
def getEionTimeSeriesModelParams(self):
try:
selectedMLModel = self.mlmodels
tsconfig = self.advance['timeSeriesForecasting'] #task 11997
if 'modelParams' in tsconfig:
modelParams = tsconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in tsconfig:
ml_algorithm_filename = tsconfig['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/TS_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
#Modified getting modelParams as small letters
modelParams = {k.lower(): v for k, v in modelParams.items()}
#print("\n modelParams: type \n",modelParams,type(modelParams))
if selectedMLModel != '':
#if selectedMLModel.lower() != 'var':
if ('var' not in selectedMLModel.lower()):
modelList = selectedMLModel.split(",")
modelList = list(map(str.strip, modelList))
#Modified getting modelList as small letters
modelList = [strMP.lower() for strMP in modelList]
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = selectedMLModel.split(",")
#Modified
modelList = [strMP.lower() for strMP in modelList]
modelList = list(map(str.strip, modelList))
else:
#Modified
modelParams = [strMP.lower() for strMP in modelParams]
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
#NAS status
def getNASStatus(self):
return cs.getNASStatus(self)
def getEionImageLearnerModelParams(self):
try:
selectedDLModel = self.dlmodels
learnerconfig = self.advance['image_config']
modelList = selectedDLModel.split(",")
return(learnerconfig,modelList)
except KeyError:
learnerconfig = []
modelList=[]
return(learnerconfig,modelList)
def getAionObjectDetectionModelParams(self):
try:
selectedDLModel = self.dlmodels
modelList = selectedDLModel.split(",")
return(modelList)
except KeyError:
modelList=[]
return(modelList)
def getEionVideoLearnerModelParams(self):
try:
selectedDLModel = self.basic['selected_DL_Models']
learnerconfig = self.advance['video_config']
modelList = selectedDLModel.split(",")
return(learnerconfig,modelList)
except KeyError:
learnerconfig = []
modelList=[]
return(learnerconfig,modelList)
def getEionDeepLearnerModelParams(self,modelType):
try:
numberofModels = 0
dl_algorithm_filename = ''
if(modelType == 'classification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'regression'):
requiredalgo = 'regressorModelParams'
selectedmodels = 'regression'
elif(modelType == 'TextClassification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'clustering'):
requiredalgo = 'clusteringModelParams'
learnerconfig = self.advance['dllearner_config']
selectedDLModel = self.dlmodels
modelParams = []
modelList=[]
if 'modelParams' in learnerconfig:
modelParams = learnerconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in learnerconfig:
if(learnerconfig['modelparamsfile'] != ""):
dl_algorithm_filename = learnerconfig['modelparamsfile']
if(dl_algorithm_filename == ''):
dl_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/DL_Defaults.json'
modelParams = json.loads(open(dl_algorithm_filename).read())
if requiredalgo in modelParams:
modelParams = modelParams[requiredalgo]
if selectedDLModel != '':
modelList = selectedDLModel.split(",")
modelList = list(map(str.strip, modelList))
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = list(modelParams.keys())
#modelParams = dict((k.lower(), v) for k, v in modelParams .items())
#modelList = selectedMLModel.split(",")
if(len(modelList) == 0):
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getEionLearnerModelParams(self,modelType):
try:
numberofModels = 0
ml_algorithm_filename = ''
if(modelType == 'classification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'regression'):
requiredalgo = 'regressorModelParams'
elif(modelType == 'TextClassification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'clustering'):
requiredalgo = 'clusteringModelParams'
elif(modelType == 'topicmodelling'):
requiredalgo = 'topicModellingParams'
learnerconfig = self.advance['mllearner_config']
selectedMLModel = self.mlmodels
modelParams = []
modelList=[]
if 'modelParams' in learnerconfig:
modelParams = learnerconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in learnerconfig:
if(learnerconfig['modelparamsfile'] != ""):
ml_algorithm_filename = learnerconfig['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/ML_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
if requiredalgo in modelParams:
modelParams = modelParams[requiredalgo]
#modelParams = dict((k.lower(), v) for k, v in modelParams .items())
#print(modelParams)
#modelList = list(modelParams.keys())
#print("SelectedModels")
#self.log.info(selectedmodels)
#if selectedmodels in selectedMLModel:
if selectedMLModel != '':
modelList = selectedMLModel.split(",")
modelList = list(map(str.strip, modelList))
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = list(modelParams.keys())
#modelList = selectedMLModel.split(",")
if(len(modelList) ==0):
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getTargetFeatures(self):
return cs.getTargetFeatures(self)
def getModelFeatures(self):
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
return(modFeatures)
else:
return('NA')
except KeyError:
return('NA')
def getFolderSettings(self):
return cs.getFolderSettings(self)
def getAIONLocationSettings(self):
self.iter_name = self.basic['modelName']
self.iteration_version = self.basic['modelVersion']
if(self.basic['dataLocation']):
dataLocation = self.basic['dataLocation']
else:
dataLocation = 'NA'
if(self.basic['deployLocation']):
deployLocation = self.basic['deployLocation']
else:
deployLocation = 'NA'
try:
if 'fileSettings' in self.basic:
csv_setting = self.basic['fileSettings']
if 'delimiters' in csv_setting:
delimiter = csv_setting['delimiters']
if delimiter.lower() == 'tab' or delimiter.lower() == '\t':
delimiter = '\t'
elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';':
delimiter = ';'
elif delimiter.lower() == 'comma' or delimiter.lower() == ',':
delimiter = ','
elif delimiter.lower() == 'space' or delimiter.lower() == ' ':
delimiter = ' '
elif delimiter.lower() == 'other':
if 'other' in csv_setting:
delimiter = csv_setting['other']
else:
delimiter = ','
elif delimiter == '':
delimiter = ','
else:
delimiter = ','
if 'textqualifier' in csv_setting:
textqualifier = csv_setting['textqualifier']
else:
textqualifier = '"'
else:
delimiter = ','
textqualifier = '"'
except KeyError:
delimiter = ','
textqualifier = '"'
return(self.iter_name,self.iteration_version,dataLocation,deployLocation,delimiter,textqualifier)
def getFeatures(self):
try:
if(self.basic['dateTimeFeature']):
dtFeatures = self.basic['dateTimeFeature']
dtFeatures = dtFeatures.split(",")
dtFeatures = list(map(str.strip, dtFeatures))
dtFeatures = ",".join([dtf for dtf in dtFeatures])
else:
dtFeatures = 'NA'
except KeyError:
dtFeatures = 'NA'
try:
if(self.basic['indexFeature']):
iFeatures = self.basic['indexFeature']
iFeatures = iFeatures.split(",")
iFeatures = list(map(str.strip, iFeatures))
iFeatures = ",".join([dif for dif in iFeatures])
else:
iFeatures = 'NA'
except KeyError:
iFeatures = 'NA'
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
else:
modFeatures = 'NA'
except KeyError:
modFeatures = 'NA'
return(dtFeatures,iFeatures,modFeatures)
def setModels(self):
return cs.setModels(self)
def readConfigurationFile(self,path):
return cs.readConfigurationFile(self, path)
def getFilterExpression(self):
return cs.getFilterExpression(self)
def getSurvivalEventColumn(self):
return cs.getSurvivalEventColumn(self)
def getSurvivalDurationColumn(self):
return cs.getSurvivalDurationColumn(self) |
pipeline_config_reader.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
def getDebiasingDetail(self):
try:
if(self.advance['profiler']['deBiasing']):
dlconfig = self.advance['profiler']['deBiasing']
return dlconfig
else:
return('NA')
except KeyError:
return('NA')
def getFirstDocumentFeature(self):
if 'firstDocFeature' in self.basic:
firstDocFeature = self.basic['algorithms']['recommenderSystem']['textSimilarityConfig']['baseFeature']
else:
firstDocFeature = ''
return(firstDocFeature)
def getSecondDocumentFeature(self):
if 'secondDocFeature' in self.basic:
secondDocFeature = self.basic['algorithms']['recommenderSystem']['textSimilarityConfig']['comparisonFeature']
else:
secondDocFeature = ''
return(secondDocFeature)
def getEionInliers(self):
if 'inlierLabels' in self.basic:
self.inlierLabels = self.basic['inlierLabels']
else:
self.inlierLabels = 'NA'
return (self.inlierLabels)
def getEionProfilerConfigurarion(self):
try:
if(self.advance['profiler']):
return self.advance['profiler']
else:
return('NA')
except KeyError:
return('NA')
def getAIONTestTrainPercentage(self):
try:
return (int(self.advance.get('testPercentage',20)))
except KeyError:
return(20)
def getAIONDataBalancingMethod(self):
try:
if(self.advance['categoryBalancingMethod']):
return self.advance['categoryBalancingMethod']
else:
return("oversample")
except KeyError:
return("oversample")
def getEionSelectorConfiguration(self):
try:
if(self.advance['selector']):
return self.advance['selector']
else:
return('NA')
except KeyError:
return('NA')
def getEionDeepLearnerConfiguration(self):
try:
if(self.advance['dllearner_config']):
dlconfig = self.advance['dllearner_config']
if 'categoryBalancingMethod' not in dlconfig:
dlconfig['categoryBalancingMethod'] = ''
if 'testPercentage' not in dlconfig: #Unnati
dlconfig['testPercentage'] = 20 #Unnati
return dlconfig
else:
return('NA')
except KeyError:
return('NA')
def gettimegrouper(self):
try:
if(self.basic['timegrouper']):
return self.basic['timegrouper']
else:
return 'NA'
except:
return 'NA'
def getgrouper(self):
try:
if(self.basic['group']):
return self.basic['group']
else:
return 'NA'
except:
return 'NA'
def getfilter(self):
try:
if(self.basic['filter']):
return self.basic['filter']
else:
return 'NA'
except:
return 'NA'
def getNumberofForecasts(self):
try:
if(self.basic['noofforecasts']):
return int(self.basic['noofforecasts'])
else:
return (-1)
except:
return (-1)
##To get multivariate feature based anomaly detection status
def getMVFeaturebasedAD(self):
try:
dict_ae=self.basic['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder'] #task 11997
if(dict_ae):
return (dict_ae)
else:
return (-1)
except:
return (-1)
def getEionDeployerConfiguration(self):
try:
if(self.advance['deployer']):
return self.advance['deployer']
else:
return('NA')
except KeyError:
return('NA')
def getEionAssociationRuleConfiguration(self):
try:
if(self.advance['associationrule']):
return self.advance['associationrule']
else:
return('NA')
except KeyError:
return('NA')
def getEionObjectDetectionConfiguration(self):
try:
if(self.advance['objectDetection']):
return self.advance['objectDetection']
else:
return('NA')
except KeyError:
return('NA')
def getEionTimeSeriesConfiguration(self):
try:
if(self.advance['timeSeriesForecasting']): #task 11997
return self.advance['timeSeriesForecasting']
else:
return('NA')
except KeyError:
return('NA')
def getAIONAnomalyDetectionConfiguration(self):
try:
if(self.advance['anomalyDetection']):
return self.advance['anomalyDetection']
else:
return('NA')
except KeyError:
return('NA')
def getAIONTSAnomalyDetectionConfiguration(self): #task 11997
try:
if(self.advance['timeSeriesAnomalyDetection']):
return self.advance['timeSeriesAnomalyDetection']
else:
return('NA')
except KeyError:
return('NA')
def getEionVisualizationConfiguration(self):
try:
if(self.advance['visualization_settings']):
return(self.advance['visualization_settings'])
else:
return('NA')
except KeyError:
return('NA')
def getEionRecommenderConfiguration(self):
try:
if(self.advance['recommenderparam']):
return self.advance['recommenderparam']
else:
return('NA')
except KeyError:
return('NA')
def getAionNASConfiguration(self):
try:
if(self.advance['neuralarchsearch']):
return self.advance['neuralarchsearch']
else:
return('NA')
except KeyError:
return('NA')
def getEionProfilerStatus(self):
try:
if(self.basic['output']['profilerStage']):
return(self.basic['output']['profilerStage'])
else:
return('false')
except KeyError:
return('false')
def getEionSelectorStatus(self):
try:
if(self.basic['output']['selectorStage']):
return(self.basic['output']['selectorStage'])
else:
return('disable')
except KeyError:
return('disable')
def getEionDeploymentStatus(self):
try:
if(self.basic['output']['deploymentStage']):
return(self.basic['output']['deploymentStage'])
else:
return(False)
except KeyError:
return(False)
def __get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def getNASStatus(self):
try:
if(self.dlmodels):
return(self.dlmodels)
else:
return('NA')
except KeyError:
return('NA')
def getTargetFeatures(self):
try:
if(self.basic['targetFeature']):
return(self.basic['targetFeature'])
else:
return('')
except KeyError:
return('')
def getFolderSettings(self):
try:
if(self.basic['folderSettings']):
return(self.basic['folderSettings'])
else:
return('NA')
except KeyError:
return('NA')
def getFilterExpression(self):
try:
if(self.basic['filterExpression']):
return (self.basic['filterExpression'])
else:
return None
except KeyError:
return None
def setModels(self):
try:
analysis_type = self.basic['analysisType']
#print(analysis_type)
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType == 'summarization':
self.problemType = 'classification'
self.summarize = True
if self.problemType not in ['inputDrift','outputDrift']:
conf_algorithm = self.basic['algorithms'][self.problemType]
else:
conf_algorithm = {}
self.mlmodels=''
self.dlmodels=''
self.scoringCreteria = 'NA'
if self.problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997
scorCre = self.basic['scoringCriteria'][self.problemType]
for key in scorCre.keys():
if scorCre[key] == 'True':
self.scoringCreteria = key
break
if self.problemType.lower() == 'timeseriesforecasting': #task 11997
self.mllearner=False #task 11997 removed initialising self.ml models as timeSeriesForecasting
if self.scoringCreteria == 'Mean Squared Error':
self.scoringCreteria = 'MSE'
if self.scoringCreteria == 'Root Mean Squared Error':
self.scoringCreteria = 'RMSE'
if self.scoringCreteria == 'Mean Absolute Error':
self.scoringCreteria = 'MAE'
if self.scoringCreteria == 'R-Squared':
self.scoringCreteria = 'R2'
if self.problemType in ['similarityIdentification','contextualSearch']:
self.scoringCreteria = __get_true_option(self.basic['scoringCriteria'][self.problemType], "Cosine Similarity")
if self.problemType in ['classification','regression']:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if key not in ['Recurrent Neural Network','Convolutional Neural Network (1D)','Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','GoogleModelSearch_DNN']:
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
else:
if self.dlmodels != '':
self.dlmodels += ','
self.dlmodels += key
elif self.problemType in ['videoForecasting','imageClassification','objectDetection']:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.dlmodels != '':
self.dlmodels += ','
self.dlmodels += key
elif self.problemType == 'recommenderSystem':
problem_model = ''
for key in conf_algorithm.keys():
if key not in ['itemRatingConfig','textSimilarityConfig']:
if conf_algorithm[key] == 'True':
problem_model = key
break
if problem_model == 'ItemRating':
self.mlmodels = 'SVD'
elif problem_model == 'AssociationRules-Apriori':
self.mlmodels = 'Apriori'
self.problemType = 'AssociationRules'
elif problem_model == 'TextSimilarity-Siamese':
self.mlmodels = 'Siamese'
self.problemType = 'TextSimilarity'
else:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
self.mllearner = False
self.dllearner = False
if self.mlmodels != '':
self.mllearner = True
if self.advance['mllearner_config']['Stacking (Ensemble)'] == 'True':
self.mlmodels += ','
self.mlmodels += 'Stacking (Ensemble)'
if self.advance['mllearner_config']['Voting (Ensemble)'] == 'True':
self.mlmodels += ','
self.mlmodels += 'Voting (Ensemble)'
if self.dlmodels != '':
self.dllearner = True
return('done')
except KeyError:
return('NA')
def readConfigurationFile(self, path):
if isinstance( path, dict):
self.data = path
else:
with open(path, 'r') as data_file:
self.data = json.load(data_file) #loading json object as python dictionary
self.basic = self.data['basic']
self.advance = self.data['advance']
problemType = self.setModels()
if 'output' in self.basic:
if(self.basic['output']['profilerStage']):
if(str(type(self.basic['output']['profilerStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Profiling Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() == 'true') & ('profiler' not in self.advance)):
msg = "JSON Validation Fail: Profiler Configuration Not Found in Advance JSON"
self.log.info(msg)
return False,msg
if(str(type(self.advance['profiler'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Profiler Configuration Syntax"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() != 'true') & (self.basic['output']['profilerStage'].lower() != 'false')):
msg = "JSON Validation Fail: Profiling is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['output']['selectorStage']):
if(str(type(self.basic['output']['selectorStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Selection Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() == 'true') & ('selector' not in self.advance)):
msg = "JSON Validation Fail: Selector Configuration Not Found"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() != 'true') & (self.basic['output']['selectorStage'].lower() != 'false')):
msg = "JSON Validation Fail:: Selection is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(str(type(self.advance['selector'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Syntax of Selector"
self.log.info(msg)
return False,msg
if 'dataLocation' not in self.basic:
msg = "JSON Validation Fail: Data Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployLocation' not in self.basic:
msg = "JSON Validation Fail: Deploy Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployment' in self.basic:
if(str(type(self.basic['deployment'])) != "<class 'str'>"):
msg = "JSON Validation Fail: deployment Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['deployment'] == 'enable'):
if 'deployer' in self.advance:
if(str(type(self.advance['deployer'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: deployer configuration should be nexted json object"
self.log.info(msg)
return False,msg
else:
msg = "JSON Validation Fail: deployer configuration is missing"
self.log.info(msg)
return False,msg
return True,'Good'
def getSurvivalEventColumn(self):
try:
if(self.advance['survival_config']):
survival_config = self.advance['survival_config']
if 'input' in survival_config:
inp = survival_config['input']
if not isinstance(inp, dict):
return None
elif 'event_col' in inp:
e = inp['event_col']
if not isinstance(e, str):
return None
return (e)
else:
return None
else:
return None
else:
return None
except KeyError:
return None
def getSurvivalDurationColumn(self):
try:
if(self.advance['survival_config']):
survival_config = self.advance['survival_config']
if 'input' in survival_config:
inp = survival_config['input']
if not isinstance(inp, dict):
return None
elif 'duration_col' in inp:
t = inp['duration_col']
if not isinstance(t, str):
return None
return (t)
else:
return None
else:
return None
else:
return None
except KeyError:
return None
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
online_pipeline_config.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import logging
import os
import shutil
import time
from sys import platform
from distutils.util import strtobool
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class OTAionConfigManager:
# eion configuration Constractor
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
# To get parameter list of configuration module from json, this will be passed as dict{}
def getEionProfilerConfigurarion(self):
try:
if(self.advance['profiler']):
return self.advance['profiler']
else:
return('NA')
except KeyError:
return('NA')
def getAIONTestTrainPercentage(self):
try:
if(self.advance['testPercentage']):
return int(self.advance['testPercentage'])
else:
return(80)
except KeyError:
return(80)
def getAIONDataBalancingMethod(self):
try:
if(self.advance['categoryBalancingMethod']):
return self.advance['categoryBalancingMethod']
else:
return("oversample")
except KeyError:
return("oversample")
# To get parameter list of selector module params
def getEionSelectorConfiguration(self):
try:
if(self.advance['selector']):
return self.advance['selector']
else:
return('NA')
except KeyError:
return('NA')
def createDeploymentFolders(self,deployFolder,iterName,iterVersion):
usecaseFolderLocation = os.path.join(deployFolder,iterName)
os.makedirs(usecaseFolderLocation,exist_ok = True)
deployLocation = os.path.join(usecaseFolderLocation,str(iterVersion))
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
time.sleep(2)
os.makedirs(deployLocation)
dataFolderLocation = os.path.join(deployLocation,'data')
try:
os.makedirs(dataFolderLocation)
except OSError as e:
print("\nDeployment Data Folder Already Exists")
logFolderLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logFolderLocation)
except OSError as e:
print("\nLog Folder Already Exists")
etcFolderLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcFolderLocation)
except OSError as e:
print("\ETC Folder Already Exists")
prodFolderLocation = os.path.join(deployLocation,'production')
os.makedirs(prodFolderLocation)
profilerFolderLocation = os.path.join(prodFolderLocation, 'profiler')
os.makedirs(profilerFolderLocation)
modelFolderLocation = os.path.join(prodFolderLocation, 'model')
os.makedirs(modelFolderLocation)
original_data_file = os.path.join(dataFolderLocation,'preprocesseddata.csv')
profiled_data_file = os.path.join(dataFolderLocation,'postprocesseddata.csv')
trained_data_file = os.path.join(dataFolderLocation,'trainingdata.csv')
predicted_data_file = os.path.join(dataFolderLocation,'predicteddata.csv')
logFileName=os.path.join(logFolderLocation,'model_training_logs.log')
outputjsonFile=os.path.join(deployLocation,'etc','output.json')
return(deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile)
# To get parameter list of learner module params
def getEionLearnerConfiguration(self):
try:
if(self.advance['onlinelearner_config']):
mllearner_config = self.advance['mllearner_config']
if 'categoryBalancingMethod' not in mllearner_config:
mllearner_config['categoryBalancingMethod'] = 'oversample'
if 'testPercentage' not in mllearner_config:
mllearner_config['testPercentage'] = 20
if 'missingTargetCategory' not in mllearner_config:
mllearner_config['missingTargetCategory'] = ''
return mllearner_config
else:
return('NA')
except KeyError:
return('NA')
except Exception as inst:
self.log.info( '\n-----> getEionLearnerConfiguration failed!!!.'+str(inst))
return('NA')
def gettimegrouper(self):
try:
if(self.basic['timegrouper']):
return self.basic['timegrouper']
else:
return 'NA'
except:
return 'NA'
def getgrouper(self):
try:
if(self.basic['group']):
return self.basic['group']
else:
return 'NA'
except:
return 'NA'
def getfilter(self):
try:
if(self.basic['filter']):
return self.basic['filter']
else:
return 'NA'
except:
return 'NA'
def getModulesDetails(self):
problem_type = self.problemType
visualizationstatus = self.getEionVisualizationStatus()
profiler_status = self.getEionProfilerStatus()
selector_status = self.getEionSelectorStatus()
learner_status = self.mllearner
targetFeature = self.getTargetFeatures()
deploy_status = self.getEionDeploymentStatus()
if learner_status:
if(problem_type == 'NA'):
learner_status = True
elif(problem_type.lower() in ['classification','regression']):
learner_status = True
else:
learner_status = False
return problem_type,targetFeature,profiler_status,selector_status,learner_status,visualizationstatus,deploy_status
def __get_true_option(self, d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def getAlgoName(self, problem_type=None):
if problem_type == None:
problem_type = self.__get_true_option(self.basic['algorithms'])
return self.__get_true_option(self.basic['algorithms'][problem_type])
def getScoringCreteria(self):
return (self.scoringCreteria)
def getEionDeployerConfiguration(self):
try:
if(self.advance['deployer']):
return self.advance['deployer']
else:
return('NA')
except KeyError:
return('NA')
def getAIONRemoteTraining(self):
try:
if(self.advance['remoteTraining']):
self.advance['remoteTraining']['Enable'] = strtobool(self.advance['remoteTraining'].get('Enable', 'False'))
return self.advance['remoteTraining']
else:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
except KeyError:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
def getEionVisualizationStatus(self):
return(True)
def getEionVisualizationConfiguration(self):
try:
if(self.advance['visualization_settings']):
return(self.advance['visualization_settings'])
else:
return('NA')
except KeyError:
return('NA')
def getEionBatchLearningStatus(self):
try:
if(self.basic['output']['batchLearning']):
return(self.basic['output']['batchLearning'])
else:
return('disable')
except KeyError:
return('disable')
def getEionProblemType(self):
try:
analysis_type = self.basic['analysisType']
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType:
return self.problemType
else:
return('NA')
except KeyError:
return('NA')
def getEionProfilerStatus(self):
try:
if(self.basic['output']['profilerStage']):
return(self.basic['output']['profilerStage'])
else:
return('false')
except KeyError:
return('false')
#To get eion selector module status (enable/disable/none)
def getEionSelectorStatus(self):
try:
if(self.basic['output']['selectorStage']):
return(self.basic['output']['selectorStage'])
else:
return('disable')
except KeyError:
return('disable')
def getEionDeploymentStatus(self):
try:
if(self.basic['output']['deploymentStage']):
return(self.basic['output']['deploymentStage'])
else:
return(False)
except KeyError:
return(False)
def getEionLearnerModelParams(self,modelType):
try:
numberofModels = 0
ml_algorithm_filename = ''
if(modelType == 'classification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'regression'):
requiredalgo = 'regressorModelParams'
learnerconfig = self.advance['onlinelearner_config']
selectedMLModel = self.mlmodels
modelParams = []
modelList=[]
if 'modelParams' in learnerconfig:
modelParams = learnerconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in learnerconfig:
if(learnerconfig['modelparamsfile'] != ""):
ml_algorithm_filename = learnerconfig['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/ML_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
if requiredalgo in modelParams:
modelParams = modelParams[requiredalgo]
if selectedMLModel != '':
modelList = selectedMLModel.split(",")
modelList = list(map(str.strip, modelList))
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = list(modelParams.keys())
#modelList = selectedMLModel.split(",")
if(len(modelList) ==0):
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getTargetFeatures(self):
try:
if(self.basic['targetFeature']):
return(self.basic['targetFeature'])
else:
return('')
except KeyError:
return('')
def getModelFeatures(self):
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
return(modFeatures)
else:
return('NA')
except KeyError:
return('NA')
def getFolderSettings(self):
try:
if(self.basic['folderSettings']):
return(self.basic['folderSettings'])
else:
return('NA')
except KeyError:
return('NA')
def getAIONLocationSettings(self):
self.iter_name = self.basic['modelName']
self.iteration_version = self.basic['modelVersion']
if(self.basic['dataLocation']):
dataLocation = self.basic['dataLocation']
else:
dataLocation = 'NA'
if(self.basic['deployLocation']):
deployLocation = self.basic['deployLocation']
else:
deployLocation = 'NA'
try:
if 'csv_settings' in self.basic:
csv_setting = self.basic['csv_settings']
if 'delimiters' in csv_setting:
delimiter = csv_setting['delimiters']
if delimiter.lower() == 'tab':
delimiter = '\t'
elif delimiter.lower() == 'semicolon':
delimiter = ';'
elif delimiter.lower() == 'comma':
delimiter = ','
elif delimiter.lower() == 'space':
delimiter = ' '
elif delimiter.lower() == 'other':
if 'other' in csv_setting:
delimiter = csv_setting['other']
else:
delimiter = ','
else:
delimiter = ','
else:
delimiter = ','
if 'textqualifier' in csv_setting:
textqualifier = csv_setting['textqualifier']
else:
textqualifier = '"'
else:
delimiter = ','
textqualifier = '"'
except KeyError:
delimiter = ','
textqualifier = '"'
return(self.iter_name,self.iteration_version,dataLocation,deployLocation,delimiter,textqualifier)
def getFeatures(self):
try:
if(self.basic['dateTimeFeature']):
dtFeatures = self.basic['dateTimeFeature']
dtFeatures = dtFeatures.split(",")
dtFeatures = list(map(str.strip, dtFeatures))
dtFeatures = ",".join([dtf for dtf in dtFeatures])
else:
dtFeatures = 'NA'
except KeyError:
dtFeatures = 'NA'
try:
if(self.basic['indexFeature']):
iFeatures = self.basic['indexFeature']
iFeatures = iFeatures.split(",")
iFeatures = list(map(str.strip, iFeatures))
iFeatures = ",".join([dif for dif in iFeatures])
else:
iFeatures = 'NA'
except KeyError:
iFeatures = 'NA'
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
else:
modFeatures = 'NA'
except KeyError:
modFeatures = 'NA'
return(dtFeatures,iFeatures,modFeatures)
def setModels(self):
try:
analysis_type = self.basic['analysisType']
#print(analysis_type)
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType not in ['inputDrift','outputDrift']:
conf_algorithm = self.basic['algorithms'][self.problemType]
else:
conf_algorithm = {}
self.mlmodels=''
self.dlmodels=''
self.scoringCreteria = 'NA'
if self.problemType in ['classification','regression']:
scorCre = self.basic['scoringCriteria'][self.problemType]
for key in scorCre.keys():
if scorCre[key] == 'True':
self.scoringCreteria = key
break
#print(self.problemType)
#print(self.scoringCreteria)
if self.scoringCreteria == 'Mean Squared Error':
self.scoringCreteria = 'MSE'
if self.scoringCreteria == 'Root Mean Squared Error':
self.scoringCreteria = 'RMSE'
if self.scoringCreteria == 'Mean Absolute Error':
self.scoringCreteria = 'MAE'
if self.scoringCreteria == 'R-Squared':
self.scoringCreteria = 'R2'
if self.problemType in ['classification','regression']:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
else:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
self.mllearner = False
if self.mlmodels != '':
self.mllearner = True
return('done')
except KeyError:
return('NA')
def readConfigurationFile(self,path):
with open(path, 'rb') as data_file:
try:
self.data = json.load(data_file) #loading json object as python dictionary
#print(self.data)
self.basic = self.data['basic']
self.advance = self.data['advance']
problemType = self.setModels()
if(self.basic['output']['profilerStage']):
if(str(type(self.basic['output']['profilerStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Profiling Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() == 'true') & ('profiler' not in self.advance)):
msg = "JSON Validation Fail: Profiler Configuration Not Found in Advance JSON"
self.log.info(msg)
return False,msg
if(str(type(self.advance['profiler'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Profiler Configuration Syntax"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() != 'true') & (self.basic['output']['profilerStage'].lower() != 'false')):
msg = "JSON Validation Fail: Profiling is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['output']['selectorStage']):
if(str(type(self.basic['output']['selectorStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Selection Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() == 'true') & ('selector' not in self.advance)):
msg = "JSON Validation Fail: Selector Configuration Not Found"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() != 'true') & (self.basic['output']['selectorStage'].lower() != 'false')):
msg = "JSON Validation Fail:: Selection is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(str(type(self.advance['selector'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Syntax of Selector"
self.log.info(msg)
return False,msg
if 'dataLocation' not in self.basic:
msg = "JSON Validation Fail: Data Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployLocation' not in self.basic:
msg = "JSON Validation Fail: Deploy Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployment' in self.basic:
if(str(type(self.basic['deployment'])) != "<class 'str'>"):
msg = "JSON Validation Fail: deployment Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['deployment'] == 'enable'):
if 'deployer' in self.advance:
if(str(type(self.advance['deployer'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: deployer configuration should be nexted json object"
self.log.info(msg)
return False,msg
else:
msg = "JSON Validation Fail: deployer configuration is missing"
self.log.info(msg)
return False,msg
except ValueError as e:
print("Error"+str(e))
return False,e
return True,'Good'
|
config_gen.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
from distutils.util import strtobool
class code_configure():
def __init__(self):
self.code_config = {}
self.unsupported_algo = []
self.supported_model = {"classification":{"Logistic Regression": "LogisticRegression", "Naive Bayes": "GaussianNB", "Decision Tree": "DecisionTreeClassifier", "Random Forest": "RandomForestClassifier", "Support Vector Machine": "SVC", "K Nearest Neighbors": "KNeighborsClassifier", "Gradient Boosting": "GradientBoostingClassifier", "Extreme Gradient Boosting (XGBoost)":"XGBClassifier", "Light Gradient Boosting (LightGBM)": "LGBMClassifier","Categorical Boosting (CatBoost)": "CatBoostClassifier"},
"regression":{"Linear Regression": "LinearRegression", "Lasso": "Lasso", "Ridge": "Ridge", "Decision Tree": "DecisionTreeRegressor", "Random Forest": "RandomForestRegressor", "Extreme Gradient Boosting (XGBoost)": "XGBRegressor", "Light Gradient Boosting (LightGBM)": "LGBMRegressor","Categorical Boosting (CatBoost)": "CatBoostRegressor"},"timeSeriesForecasting":{"MLP": "MLP","LSTM":"LSTM"}} #task 11997
def __get_true_option(self, d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def __get_true_options(self, d):
options = []
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
options.append(k)
return options
def __get_scoring_criteria(self, criteria):
mapping = {'Mean Squared Error':'MSE', 'Root Mean Squared Error':'RMSE','Mean Absolute Error':'MAE','R-Squared':'R2'}
if criteria in mapping.keys():
return mapping[criteria]
return criteria
def __get_feature_selector(self, selector_config):
feature_selector = []
if self.__get_true_option(selector_config['selectionMethod']) == 'featureSelection':
feature_selector = self.__get_true_options(selector_config['featureSelection'])
return feature_selector
def __get_feature_reducer(self, selector_config):
feature_reducer = ""
if self.__get_true_option(selector_config['selectionMethod']) == 'featureEngineering':
feature_reducer = self.__get_true_option(selector_config['featureEngineering'],'pca').lower()
return feature_reducer
def __getOptimizationParam(self, param_config):
param_dict = {}
param_dict['iterations'] = int(param_config['iterations'])
param_dict['trainTestCVSplit'] = int(param_config['trainTestCVSplit'])
param_dict['geneticparams'] = param_config['geneticparams']
return param_dict
def add_model(self, model_name, config):
if not self.unsupported_algo:
self.code_config["algorithms"][model_name] = config.copy()
def update_config(self, key, value):
self.code_config[key] = value
def save_config(self, file_path):
if Path(file_path).is_dir():
file_path = Path(file_path)/'etc/code_config.json'
with open(file_path, "w") as f:
if not self.unsupported_algo:
json.dump(self.code_config, f, indent=4)
else:
if 'ensemble' in self.unsupported_algo:
json.dump({"Status":"Failure","msg":"Ensemble is not supported","error":"Ensemble is not supported"}, f) # keep error key
elif 'text_features' in self.unsupported_algo:
json.dump({"Status":"Failure","msg":"Text feature processing is not supported","error":"Text feature processing is not supported"}, f) # keep error key
else:
json.dump({"Status":"Failure","msg":f"Unsupported model {self.unsupported_algo}","error":f"Unsupported model {self.unsupported_algo}"}, f) # keep error key
def __is_algo_supported(self, config):
problem_type = self.__get_true_option(config['basic']['analysisType'])
if problem_type not in self.supported_model.keys():
self.unsupported_algo = [problem_type]
return False
algos = config['basic']['algorithms'][problem_type]
algos = self.__get_true_options(algos)
self.unsupported_algo = [x for x in algos if x not in self.supported_model[problem_type].keys()]
if self.unsupported_algo:
return False
return True
def create_config(self, config):
if isinstance(config, str):
with open(config,'r') as f:
config = json.load(f)
problem_type = self.__get_true_option(config['basic']['analysisType'])
self.code_config["problem_type"] = problem_type.lower()
if not self.__is_algo_supported(config):
return
if 'ensemble' in config['advance']['mllearner_config']:
if config['advance']['mllearner_config']['ensemble'] == 'enable':
self.unsupported_algo = ['ensemble']
return
self.code_config["modelName"] = config['basic']['modelName']
self.code_config["modelVersion"] = config['basic']['modelVersion']
if config['basic']['folderSettings']['fileType'].lower() == 'url':
self.code_config["dataLocation"] = config['basic']['folderSettings']['labelDataFile']
else:
self.code_config["dataLocation"] = config['basic']['dataLocation']
self.code_config["target_feature"] = config['basic']['targetFeature']
trainingfeatures = config['basic']['trainingFeatures'].split(',')
datetimeFeature = list(map(str.strip, config['basic']['dateTimeFeature'].split(',')))
for dtfeature in datetimeFeature:
if dtfeature in trainingfeatures:
trainingfeatures.remove(dtfeature)
indexFeature = list(map(str.strip, config['basic']['indexFeature'].split(',')))
for dtfeature in indexFeature:
if dtfeature in trainingfeatures:
trainingfeatures.remove(dtfeature)
self.code_config["selected_features"] = trainingfeatures
self.code_config["dateTimeFeature"] = datetimeFeature
self.code_config["profiler"] = config['advance']['profiler']
self.code_config["feature_selector"]= self.__get_feature_selector(config['advance']['selector'])
self.code_config["feature_reducer"]= self.__get_feature_reducer(config['advance']['selector'])
self.code_config["corr_threshold"]= float(config['advance']['selector']['statisticalConfig'].get('correlationThresholdTarget',0.85))
self.code_config["var_threshold"]= float(config['advance']['selector']['statisticalConfig'].get('varianceThreshold',0.01))
self.code_config["pValueThreshold"]= float(config['advance']['selector']['statisticalConfig'].get('pValueThresholdTarget',0.04))
self.code_config["n_components"]= int(config['advance']['selector']['featureEngineering']['numberofComponents'])
self.code_config["balancingMethod"] = config['advance']['categoryBalancingMethod']
self.code_config["test_ratio"] = int(config['advance']['testPercentage'])/100
#self.code_config["scoring_criteria"] = "accuracy"
if self.code_config["problem_type"] in ['classification','regression']:
self.code_config["algorithms"] = {}
else:
algo = self.__get_true_option(config['basic']['algorithms'][problem_type])
self.code_config["algorithms"] = {algo: config['advance'][problem_type]['modelParams'][algo]} #task 11997
self.code_config["scoring_criteria"] = self.__get_scoring_criteria(self.__get_true_option(config['basic']["scoringCriteria"][problem_type]))
if problem_type.lower() == 'timeseriesforecasting': #task 11997
self.code_config["lag_order"] = self.code_config["algorithms"][algo]["lag_order"]
self.code_config["noofforecasts"] = config["basic"]["noofforecasts"]
self.code_config["target_feature"] = config['basic']['targetFeature'].split(',')
self.code_config["optimization"] = config['advance']['mllearner_config']['optimizationMethod']
self.code_config["optimization_param"] = self.__getOptimizationParam(config['advance']['mllearner_config']['optimizationHyperParameter'])
if __name__ == '__main__':
codeConfigure = code_configure()
codeConfigure.create_config("C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\config\\AION_1668151242.json")
codeConfigure.save_config(r"C:\Users\vashistah\AppData\Local\HCLT\AION\target\AION_57_ts_1") |
check_config.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k, v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_true_options(d):
options = []
if isinstance(d, dict):
for k, v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
options.append(k)
return options
def check_datetime(config):
dateTime = config['basic']['dateTimeFeature']
if dateTime == '' or dateTime.lower()=='na':
return False
return True
def check_dtype(d):
flag= 1
for item in d:
if item["type"].lower() != "text" and item["type"].lower() != "index":
flag = 0
break
return flag
def check_text(d): #task 12627
flag= 0
for item in d:
if item["type"].lower() == "text":
flag = 1
break
return flag
def check_labelencoding(ftr_dict_list, target_ftr):
for ftr_dict in ftr_dict_list:
if ftr_dict['feature']!=target_ftr and ftr_dict['type'].lower()=='categorical' and ftr_dict['categoryEncoding'].lower()!='labelencoding':
return False
return True
class timeseries():
def __init__(self,config):
self.config=config
#task 11997
if self.config['basic']['analysisType']['timeSeriesForecasting'].lower()=='true':
self.problemType = 'timeSeriesForecasting'
elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true':
self.problemType = 'timeSeriesAnomalyDetection'
def validate_basic_config(self,status='pass',msg=None):
#task 12627
date_time_status = check_datetime(self.config)
text_status = check_text(self.config['advance']['profiler']['featureDict'])
if not date_time_status and text_status:
msg = 'For time series problem,\\n* One feature should be in datetime format\\n* Text feature not supported '
return 'error', msg
elif not date_time_status:
msg = 'For time series problem, one feature should be in datetime format'
return 'error', msg
elif text_status:
msg = 'For time series problem, text feature not supported '
return 'error', msg
selected_algos = get_true_options(self.config['basic']['algorithms'][self.problemType]) #task 11997
if isinstance(self.config['basic']['targetFeature'],str):
targetFeature = list(self.config['basic']['targetFeature'].split(','))
if self.problemType=='timeSeriesForecasting': #task 11997
if len(targetFeature) > 1:
if 'ARIMA' in selected_algos:
status = 'error'
msg = "ARIMA is not supported for multilabel (target) feature"
return status, msg
if "FBPROPHET" in selected_algos:
status = 'error'
msg = "FBPROPHET is not supported for multiLabel (target) feature"
return status, msg
if 'MLP' in selected_algos:
status = 'error'
msg = "MLP is not supported for multiLabel (target) feature"
return status, msg
if len(targetFeature) == 1 and 'VAR' in selected_algos:
status = 'error'
msg = "VAR is not supported for singleLabel (target) feature"
return status, msg
elif self.problemType=='timeSeriesAnomalyDetection':
anomChecker = anomaly(self.config)
status, msg = anomChecker.validate_basic_config()
return status, msg
class anomaly():
def __init__(self,config):
self.config = config
if self.config['basic']['analysisType']['anomalyDetection']=='':
self.problemType = 'anomalyDetection'
elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection']: #task 11997
self.problemType = 'timeSeriesAnomalyDetection'
def validate_basic_config(self,status='pass',msg=None):
#task 12627
date_time_status = check_datetime(self.config)
targetFeature = self.config['basic']['targetFeature']
if self.problemType=='anomalyDetection' and date_time_status:
status = 'error'
msg = 'Date feature detected. For anomaly detection on time series change problem type to Time Series Anomaly Detection or drop Date feature'
return status, msg
if targetFeature.lower()!= 'na' and targetFeature!= "" and self.config['basic']['inlierLabels'] == '':
status = 'error'
msg = 'Please provide inlier label in case of supervised anomaly detection'
return status, msg
class survival():
def __init__(self,config):
self.config = config
self.problemType= 'survivalAnalysis'
def validate_basic_config(self):
dateTimeStatus = check_datetime(self.config)
labelencoding_status = check_labelencoding(self.config['advance']['profiler']['featureDict'], self.config['basic']['targetFeature'])
if not dateTimeStatus and not labelencoding_status:
msg = 'For survival analysis problem,\\n* One feature should be in datetime format\\n* Encoding of categorical features should be of label encoding '
return 'error', msg
elif not dateTimeStatus:
msg = 'One feature should be in datetime format for survival analysis problem. Please select it from model feature'
return 'error', msg
elif not labelencoding_status:
msg = 'Categorical features are expected to be label encoded for survival analysis problem. Please select it from feature encoding'
return 'error', msg
else:
return 'pass', " "
class associationrule():
def __init__(self,config):
self.config=config
def validate_basic_config(self,status='pass', msg=None):
if self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == 'na' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == 'na':
return "error","Make sure to configure invoice feature and item feature"
elif self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] == self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']:
return "error","Make sure to invoice feature and item feature is configure correctly"
else:
return "pass", " "
class documentsimilarity():
def __init__(self,config):
self.config=config
def validate_basic_config(self,status='pass', msg=None):
flag = check_dtype(self.config['advance']['profiler']['featureDict'])
if flag == 1:
return "pass", " "
else:
msg="Make sure to change the feature type from Catgeory to Text and drop numerical features for document Similarity"
return "error", msg
def config_validate(path):
with open(path, 'rb') as data_file:
config = json.load(data_file)
data_file.close()
try:
problem_type = get_true_option(config['basic']['analysisType'])
status = 'pass'
msg = ''
if 'timeseries' in problem_type.lower(): #task 11997
obj = timeseries(config)
elif problem_type.lower() == 'survivalanalysis':
obj = survival(config)
elif problem_type.lower() == 'anomalydetection':
obj = anomaly(config)
elif problem_type.lower() in ['similarityidentification','contextualsearch']:
obj = documentsimilarity(config)
elif problem_type.lower() == 'recommendersystem':
if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'].lower() == 'true':
obj = associationrule(config)
else:
return 'pass',""
else:
return 'pass',""
status,msg= obj.validate_basic_config()
return(status,msg)
except Exception as e:
print(e)
def start_check(config):
return config_validate(config)
|
TextProcessing.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import logging
import numpy as np
import sys
from pathlib import Path
import nltk
from nltk.tokenize import sent_tokenize
from nltk import pos_tag
from nltk import ngrams
from nltk.corpus import wordnet
from nltk import RegexpParser
from textblob import TextBlob
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
import urllib.request
import zipfile
import os
from os.path import expanduser
import platform
from text import TextCleaning as text_cleaner
from text.Embedding import extractFeatureUsingPreTrainedModel
logEnabled = False
spacy_nlp = None
def ExtractFeatureCountVectors(ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
binary=False):
vectorizer = CountVectorizer(ngram_range = ngram_range, max_df = max_df, \
min_df = min_df, max_features = max_features, binary = binary)
return vectorizer
def ExtractFeatureTfIdfVectors(ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
binary=False,
norm='l2',
use_idf=True,
smooth_idf=True,
sublinear_tf=False):
vectorizer = TfidfVectorizer(ngram_range = ngram_range, max_df = max_df, \
min_df = min_df, max_features = max_features, \
binary = binary, norm = norm, use_idf = use_idf, \
smooth_idf = smooth_idf, sublinear_tf = sublinear_tf)
return vectorizer
def GetPOSTags( inputText, getPOSTags_Lib='nltk'):
global spacy_nlp
tokens_postag_list = []
if (inputText == ""):
__Log("debug", "{} function: Input text is not provided".format(sys._getframe().f_code.co_name))
else:
if getPOSTags_Lib == 'spacy':
if spacy_nlp == None:
spacy_nlp = spacy.load('en_core_web_sm')
doc = spacy_nlp(inputText)
for token in doc:
tokens_postag_list.append((token.text, token.tag_))
elif getPOSTags_Lib == 'textblob':
doc = TextBlob(inputText)
tokens_postag_list = doc.tags
else:
tokensList = WordTokenize(inputText)
tokens_postag_list = pos_tag(tokensList)
return tokens_postag_list
def GetNGrams( inputText, ngramRange=(1,1)):
ngramslist = []
for n in range(ngramRange[0],ngramRange[1]+1):
nwordgrams = ngrams(inputText.split(), n)
ngramslist.extend([' '.join(grams) for grams in nwordgrams])
return ngramslist
def NamedEntityRecognition( inputText):
global spacy_nlp
neResultList = []
if (inputText == ""):
__Log("debug", "{} function: Input text is not provided".format(sys._getframe().f_code.co_name))
else:
if spacy_nlp == None:
spacy_nlp = spacy.load('en_core_web_sm')
doc = spacy_nlp(inputText)
neResultList = [(X.text, X.label_) for X in doc.ents]
return neResultList
def KeywordsExtraction( inputText, ratio=0.2, words = None, scores=False, pos_filter=('NN', 'JJ'), lemmatize=False):
keywordsList = []
if (inputText == ""):
__Log("debug", "{} function: Input text is not provided".format(sys._getframe().f_code.co_name))
else:
keywordsList = keywords(inputText, ratio = ratio, words = words, split=True, scores=scores,
pos_filter=pos_filter, lemmatize=lemmatize)
return keywordsList
def __get_nodes(parent):
nounList = []
verbList = []
for node in parent:
if type(node) is nltk.Tree:
if node.label() == "NP":
subList = []
for item in node.leaves():
subList.append(item[0])
nounList.append((" ".join(subList)))
elif node.label() == "VP":
subList = []
for item in node.leaves():
subList.append(item[0])
verbList.append((" ".join(subList)))
#verbList.append(node.leaves()[0][0])
__get_nodes(node)
result = {'NP': nounList, 'VP': verbList}
return result
def ShallowParsing( inputText, lib='spacy'):
tags = GetPOSTags(inputText, getPOSTags_Lib=lib)
chunk_regex = r"""
NBAR:
{<DT>?<NN.*|JJ.*>*<NN.*>+} # Nouns and Adjectives, terminated with Nouns
VBAR:
{<RB.?>*<VB.?>*<TO>?<JJ>*<VB.?>+<VB>?} # Verbs and Verb Phrases
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
VP:
{<VBAR>}
{<VBAR><IN><VBAR>} # Above, connected with in/of/etc...
"""
rp = RegexpParser(chunk_regex)
t = rp.parse(tags)
return __get_nodes(t)
def SyntacticAndEntityParsing(inputCorpus,
featuresList=['POSTags','NGrams','NamedEntityRecognition','KeywordsExtraction','ShallowParsing'],
posTagsLib='nltk',
ngramRange=(1,1),
ke_ratio=0.2,
ke_words = None,
ke_scores=False,
ke_pos_filter=('NN', 'JJ'),
ke_lemmatize=False):
columnsList = ['Input']
columnsList.extend(featuresList)
df = pd.DataFrame(columns=columnsList)
df['Input'] = inputCorpus
for feature in featuresList:
if feature == 'POSTags':
df[feature] = inputCorpus.apply(lambda x: GetPOSTags(x, posTagsLib))
if feature == 'NGrams':
df[feature] = inputCorpus.apply(lambda x: GetNGrams(x, ngramRange))
if feature == 'NamedEntityRecognition':
df[feature] = inputCorpus.apply(lambda x: NamedEntityRecognition(x))
if feature == 'KeywordsExtraction':
df[feature] = inputCorpus.apply(lambda x: KeywordsExtraction(x,
ratio=ke_ratio, words=ke_words,
scores=ke_scores, pos_filter=ke_pos_filter,
lemmatize=ke_lemmatize))
if feature == 'ShallowParsing':
df[feature] = inputCorpus.apply(lambda x: ShallowParsing(x, lib=posTagsLib))
return df
def __Log( logType="info", text=None):
if logType.lower() == "exception":
logging.exception( text)
elif logEnabled:
if logType.lower() == "info":
logging.info( text)
elif logType.lower() == "debug":
logging.debug( text)
def SentenceTokenize( inputText):
return text_cleaner.WordTokenize(inputText)
def WordTokenize( inputText, tokenizationLib = 'nltk'):
return text_cleaner.WordTokenize(inputText, tokenizationLib)
def Lemmatize( inputTokensList, lemmatizationLib = 'nltk'):
return text_cleaner.Lemmatize(inputTokensList, lemmatizationLib)
def Stemmize( inputTokensList):
return text_cleaner.Stemmize(inputTokensList)
def ToLowercase( inputText):
resultText = ""
if inputText is not None and inputText != "":
resultText = inputText.lower()
return resultText
def ToUppercase( inputText):
resultText = ""
if inputText is not None and inputText != '':
resultText = inputText.upper()
return resultText
def RemoveNoise(
inputText,
removeNoise_fHtmlDecode = True,
removeNoise_fRemoveHyperLinks = True,
removeNoise_fRemoveMentions = True,
removeNoise_fRemoveHashtags = True,
removeNoise_RemoveOrReplaceEmoji = 'remove',
removeNoise_fUnicodeToAscii = True,
removeNoise_fRemoveNonAscii = True):
return text_cleaner.RemoveNoise(inputText, removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions,
removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii)
def RemoveStopwords( inputTokensList, stopwordsRemovalLib='nltk', stopwordsList = None, extend_or_replace='extend'):
return text_cleaner.RemoveStopwords(inputTokensList, stopwordsRemovalLib, stopwordsList, extend_or_replace)
def RemoveNumericTokens( inputText, removeNumeric_fIncludeSpecialCharacters=True):
return text_cleaner.RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters)
def RemovePunctuation( inputText, fRemovePuncWithinTokens=False):
return text_cleaner.RemovePunctuation(inputText, fRemovePuncWithinTokens)
def CorrectSpelling( inputTokensList):
return text_cleaner.CorrectSpelling(inputTokensList)
def ReplaceAcronym( inputTokensList, acrDict=None):
return text_cleaner.ReplaceAcronym(inputTokensList, acrDict)
def ExpandContractions( inputText, expandContractions_googleNewsWordVectorPath=None):
return text_cleaner.ExpandContractions(inputText, expandContractions_googleNewsWordVectorPath)
def get_pretrained_model_path():
try:
from appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
def checkAndDownloadPretrainedModel(preTrainedModel, embedding_size=300):
models = {'glove':{50:'glove.6B.50d.w2vformat.txt',100:'glove.6B.100d.w2vformat.txt',200:'glove.6B.200d.w2vformat.txt',300:'glove.6B.300d.w2vformat.txt'}, 'fasttext':{300:'wiki-news-300d-1M.vec'}}
supported_models = [x for y in models.values() for x in y.values()]
embedding_sizes = {x:y.keys() for x,y in models.items()}
preTrainedModel = preTrainedModel.lower()
if preTrainedModel not in models.keys():
raise ValueError(f'model not supported: {preTrainedModel}')
if embedding_size not in embedding_sizes[preTrainedModel]:
raise ValueError(f"Embedding size '{embedding_size}' not supported for {preTrainedModel}")
selected_model = models[preTrainedModel][embedding_size]
modelsPath = get_pretrained_model_path()
p = modelsPath.glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
if selected_model not in modelsDownloaded:
if preTrainedModel == "glove":
try:
local_file_path = modelsPath/f"glove.6B.{embedding_size}d.w2vformat.txt"
file_test, header_test = urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.{embedding_size}d.w2vformat.txt', local_file_path)
except Exception as e:
raise ValueError("Error: unable to download glove pretrained model, please try again or download it manually and placed it at {}. ".format(modelsPath)+str(e))
elif preTrainedModel == "fasttext":
try:
local_file_path = modelsPath/"wiki-news-300d-1M.vec.zip"
url = 'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/wiki-news-300d-1M.vec.zip'
file_test, header_test = urllib.request.urlretrieve(url, local_file_path)
with zipfile.ZipFile(local_file_path) as zip_ref:
zip_ref.extractall(modelsPath)
Path(local_file_path).unlink()
except Exception as e:
raise ValueError("Error: unable to download fastText pretrained model, please try again or download it manually and placed it at {}. ".format(location)+str(e))
return modelsPath/selected_model
def load_pretrained(path):
embeddings = {}
word = ''
with open(path, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
return embeddings
class TextProcessing(BaseEstimator, TransformerMixin):
def __init__(self,
functionSequence = ['RemoveNoise','ExpandContractions','Normalize','ReplaceAcronym',
'CorrectSpelling','RemoveStopwords','RemovePunctuation','RemoveNumericTokens'],
fRemoveNoise = True,
fExpandContractions = False,
fNormalize = True,
fReplaceAcronym = False,
fCorrectSpelling = False,
fRemoveStopwords = True,
fRemovePunctuation = True,
fRemoveNumericTokens = True,
removeNoise_fHtmlDecode = True,
removeNoise_fRemoveHyperLinks = True,
removeNoise_fRemoveMentions = True,
removeNoise_fRemoveHashtags = True,
removeNoise_RemoveOrReplaceEmoji = 'remove',
removeNoise_fUnicodeToAscii = True,
removeNoise_fRemoveNonAscii = True,
tokenizationLib='nltk',
normalizationMethod = 'Lemmatization',
lemmatizationLib = 'nltk',
acronymDict = None,
stopwordsRemovalLib = 'nltk',
stopwordsList = None,
extend_or_replace_stopwordslist = 'extend',
removeNumeric_fIncludeSpecialCharacters = True,
fRemovePuncWithinTokens = False,
data_path = None
):
global logEnabled
#logEnabled = EnableLogging
self.functionSequence = functionSequence
self.fRemoveNoise = fRemoveNoise
self.fExpandContractions = fExpandContractions
self.fNormalize = fNormalize
self.fReplaceAcronym = fReplaceAcronym
self.fCorrectSpelling = fCorrectSpelling
self.fRemoveStopwords = fRemoveStopwords
self.fRemovePunctuation = fRemovePunctuation
self.fRemoveNumericTokens = fRemoveNumericTokens
self.removeNoise_fHtmlDecode = removeNoise_fHtmlDecode
self.removeNoise_fRemoveHyperLinks = removeNoise_fRemoveHyperLinks
self.removeNoise_fRemoveMentions = removeNoise_fRemoveMentions
self.removeNoise_fRemoveHashtags = removeNoise_fRemoveHashtags
self.removeNoise_RemoveOrReplaceEmoji = removeNoise_RemoveOrReplaceEmoji
self.removeNoise_fUnicodeToAscii = removeNoise_fUnicodeToAscii
self.removeNoise_fRemoveNonAscii = removeNoise_fRemoveNonAscii
self.tokenizationLib = tokenizationLib
self.normalizationMethod = normalizationMethod
self.lemmatizationLib = lemmatizationLib
self.acronymDict = acronymDict
self.stopwordsRemovalLib = stopwordsRemovalLib
self.stopwordsList = stopwordsList
self.extend_or_replace_stopwordslist = extend_or_replace_stopwordslist
self.removeNumeric_fIncludeSpecialCharacters = removeNumeric_fIncludeSpecialCharacters
self.fRemovePuncWithinTokens = fRemovePuncWithinTokens
self.data_path = data_path
self.fit_and_transformed_ = False
def fit(self, x, y=None):
return self
def transform(self, x):
x = map(lambda inputText: text_cleaner.cleanText(inputText, functionSequence = self.functionSequence, fRemoveNoise = self.fRemoveNoise, fExpandContractions = self.fExpandContractions, fNormalize = self.fNormalize, fReplaceAcronym = self.fReplaceAcronym, fCorrectSpelling = self.fCorrectSpelling, fRemoveStopwords = self.fRemoveStopwords, fRemovePunctuation = self.fRemovePunctuation, fRemoveNumericTokens = self.fRemoveNumericTokens, removeNoise_fHtmlDecode = self.removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks = self.removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions = self.removeNoise_fRemoveMentions , removeNoise_fRemoveHashtags = self.removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji = self.removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii = self.removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii = self.removeNoise_fRemoveNonAscii, tokenizationLib = self.tokenizationLib, normalizationMethod = self.normalizationMethod, lemmatizationLib = self.lemmatizationLib, acronymDict = self.acronymDict, stopwordsRemovalLib = self.stopwordsRemovalLib, stopwordsList = self.stopwordsList, extend_or_replace_stopwordslist = self.extend_or_replace_stopwordslist, removeNumeric_fIncludeSpecialCharacters = self.removeNumeric_fIncludeSpecialCharacters, fRemovePuncWithinTokens = self.fRemovePuncWithinTokens), x)
x = pd.Series(list(x))
if hasattr(self, 'fit_and_transformed_') and not self.fit_and_transformed_:
self.fit_and_transformed_ = True
if self.data_path and Path(self.data_path).exists():
x.to_csv(Path(self.data_path)/'text_cleaned.csv', index=False)
return x
def get_feature_names_out(self):
return ['tokenize']
class wordEmbedding(BaseEstimator, TransformerMixin):
def __init__(self, preTrainedModel, embeddingSize=300,external_model=None,external_model_type='binary'):
self.number_of_features = 0
self.embeddingSize = embeddingSize
self.preTrainedModel = preTrainedModel.lower()
self.external_model=external_model
self.external_model_type = external_model_type
if self.preTrainedModel == "glove":
self.preTrainedModelpath = f'glove.6B.{self.embeddingSize}d.w2vformat.txt'
self.binary = False
elif self.preTrainedModel == "fasttext":
self.preTrainedModelpath = 'wiki-news-300d-1M.vec'
self.binary = False
else:
raise ValueError(f'Model ({self.preTrainedModel}) not supported')
def fit(self, x, y=None):
return self
def transform(self, x):
if ((isinstance(self.external_model, pd.DataFrame) and not self.external_model.empty) or (not isinstance(self.external_model, pd.DataFrame) and self.external_model)):
if self.preTrainedModel == "fasttext" and self.external_model_type == 'binary':
print('Transforming using external binary')
extracted = np.vstack([self.external_model.get_sentence_vector( sentense) for sentense in x])
else:
print('Transforming using external vector')
extracted = extractFeatureUsingPreTrainedModel(x, pretrainedModelPath=None, loaded_model=self.external_model, embed_size=300)
else:
print('Transforming using Vector')
models_path = checkAndDownloadPretrainedModel(self.preTrainedModel, self.embeddingSize)
extracted = extractFeatureUsingPreTrainedModel(x, models_path)
self.number_of_features = extracted.shape[1]
return extracted
def get_feature_names_out(self):
return [str(x) for x in range(self.number_of_features)]
def get_feature_names(self):
return self.get_feature_names_out()
def getProcessedPOSTaggedData(pos_tagged_data):
def get_wordnet_post(tag):
if tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def process_pos_tagged_data(text):
processed_text = [f"{t[0]}_{get_wordnet_post(t[1])}" for t in text]
processed_text = " ".join(processed_text)
return processed_text
processed_pos_tagged_data = pos_tagged_data.apply(process_pos_tagged_data)
return processed_pos_tagged_data
class PosTagging(BaseEstimator, TransformerMixin):
def __init__(self, posTagsLib, data_path):
self.posTagsLib = posTagsLib
self.fit_and_transformed_ = False
self.data_path = data_path
def fit(self, x, y=None):
return self
def transform(self, x):
parsing_output = SyntacticAndEntityParsing(x, featuresList=['POSTags'], posTagsLib=self.posTagsLib)
output = getProcessedPOSTaggedData(parsing_output['POSTags'])
if not self.fit_and_transformed_:
self.fit_and_transformed_ = True
if self.data_path and Path(self.data_path).exists():
output.to_csv(Path(self.data_path)/'pos_tagged.csv', index=False)
return output
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
#from .eda import ExploreTextData |
textProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import logging
from distutils.util import strtobool
import numpy as np
import pandas as pd
from text import TextProcessing
from sklearn.preprocessing import FunctionTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from pathlib import Path
external_model = None
external_model_type = None
def get_one_true_option(d, default_value):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
class textProfiler():
def __init__(self):
self.log = logging.getLogger('eion')
self.embedder = None
self.bert_embedder_size = 0
def textCleaning(self, textCorpus):
textProcessor = TextProcessing.TextProcessing()
textCorpus = textProcessor.transform(textCorpus)
return(textCorpus)
def sentense_encode(self, item):
return self.model.encode(item,show_progress_bar=False)
def get_embedding_size(self, model, config):
if model in config.keys():
config = config[model]
else:
config = {}
model = model.lower()
if model == 'glove':
size_map = {'default': 100, '50d': 50, '100d':100, '200d': 200, '300d':300}
size_enabled = get_one_true_option(config, 'default')
return size_map[size_enabled]
elif model == 'fasttext':
size_map = {'default': 300}
size_enabled = get_one_true_option(config, 'default')
return size_map[size_enabled]
elif model == 'latentsemanticanalysis':
size_map = {'default': 100, '50d': 50, '100d':100, '200d': 200, '300d':300,'500d':500,'700d':700,'1000d':1000}
size_enabled = get_one_true_option(config, 'default')
return size_map[size_enabled]
elif model in ['tf_idf', 'countvectors']:
return int(config.get('maxFeatures', 2000))
else: # for word2vec
return 300
def cleaner(self, conf_json, pipeList, data_path=None):
cleaning_kwargs = {}
textCleaning = conf_json.get('textCleaning')
self.log.info("Text Preprocessing config: ",textCleaning)
cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True'))
cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True'))
cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False'))
cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False'))
cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True'))
cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True'))
cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True'))
cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'),
'lemmatization').capitalize()
removeNoiseConfig = textCleaning.get('removeNoiseConfig')
if type(removeNoiseConfig) is dict:
cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True'))
cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True'))
cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True'))
cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True'))
cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace'
cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True'))
cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True'))
acronymConfig = textCleaning.get('acronymConfig')
if type(acronymConfig) is dict:
cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None)
stopWordsConfig = textCleaning.get('stopWordsConfig')
if type(stopWordsConfig) is dict:
cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', '[]')
if isinstance(cleaning_kwargs['stopwordsList'], str):
if cleaning_kwargs['stopwordsList'] != '[]':
cleaning_kwargs['stopwordsList'] = cleaning_kwargs['stopwordsList'][1:-1].split(',')
else:
cleaning_kwargs['stopwordsList'] = []
cleaning_kwargs['extend_or_replace_stopwordslist'] = 'replace' if strtobool(stopWordsConfig.get('replace', 'True')) else 'extend'
removeNumericConfig = textCleaning.get('removeNumericConfig')
if type(removeNumericConfig) is dict:
cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True'))
removePunctuationConfig = textCleaning.get('removePunctuationConfig')
if type(removePunctuationConfig) is dict:
cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False'))
cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False'))
libConfig = textCleaning.get('libConfig')
if type(libConfig) is dict:
cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk')
cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk')
cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk')
if data_path:
cleaning_kwargs['data_path'] = data_path
textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs)
pipeList.append(("TextProcessing",textProcessor))
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('pos_tags', 'False')):
pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk')
posTagger = TextProcessing.PosTagging( pos_tags_lib, data_path)
pipeList.append(("posTagger",posTagger))
return pipeList
def embedding(self, conf_json, pipeList):
ngram_min = 1
ngram_max = 1
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('n_grams', 'False')):
n_grams_config = textFeatureExtraction.get("n_grams_config")
ngram_min = int(n_grams_config.get('min_n', 1))
ngram_max = int(n_grams_config.get('max_n', 1))
if (ngram_min < 1) or ngram_min > ngram_max:
ngram_min = 1
ngram_max = 1
invalidNgramWarning = 'WARNING : invalid ngram config.\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max)
self.log.info(invalidNgramWarning)
ngram_range_tuple = (ngram_min, ngram_max)
textConversionMethod = conf_json.get('textConversionMethod')
conversion_method = get_one_true_option(textConversionMethod, None)
embedding_size_config = conf_json.get('embeddingSize', {})
embedding_size = self.get_embedding_size(conversion_method, embedding_size_config)
if conversion_method.lower() == "countvectors":
vectorizer = TextProcessing.ExtractFeatureCountVectors( ngram_range=ngram_range_tuple,max_features=embedding_size)
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: CountVectors')
elif conversion_method.lower() in ["fasttext","glove"]:
embedding_method = conversion_method
wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method, embedding_size)
pipeList.append(("vectorizer",wordEmbeddingVecotrizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "openai":
from text.openai_embedding import embedding as openai_embedder
vectorizer = openai_embedder()
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "sentencetransformer_distilroberta":
from sentence_transformers import SentenceTransformer
embedding_pretrained = {'model':'sentence-transformers/msmarco-distilroberta-base-v2','size': 768}
self.bert_embedder_size = embedding_pretrained['size']
self.model = SentenceTransformer(embedding_pretrained['model'])
self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output)
pipeList.append(("vectorizer",self.embedder))
self.log.info('----------> Conversion Method: SentenceTransformer using msmarco_distilroberta')
elif conversion_method.lower() == "sentencetransformer_minilm":
from sentence_transformers import SentenceTransformer
embedding_pretrained = {'model':'sentence-transformers/all-MiniLM-L6-v2','size': 384}
self.bert_embedder_size = embedding_pretrained['size']
self.model = SentenceTransformer(embedding_pretrained['model'])
self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output)
pipeList.append(("vectorizer",self.embedder))
self.log.info('----------> Conversion Method: SentenceTransformer using MiniLM-L6-v2')
elif conversion_method.lower() == "sentencetransformer_mpnet":
from sentence_transformers import SentenceTransformer
embedding_pretrained = {'model':'sentence-transformers/all-mpnet-base-v2','size': 768}
self.bert_embedder_size = embedding_pretrained['size']
self.model = SentenceTransformer(embedding_pretrained['model'])
self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output)
pipeList.append(("vectorizer",self.embedder))
self.log.info('----------> Conversion Method: SentenceTransformer using mpnet-base-v2')
elif conversion_method.lower() == 'latentsemanticanalysis':
vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(ngram_range=ngram_range_tuple)
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: latentsemanticanalysis')
elif conversion_method.lower() == 'tf_idf':
vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(ngram_range=ngram_range_tuple,max_features=embedding_size)
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: TF_IDF')
else:
df1 = pd.DataFrame()
#df1['tokenize'] = textCorpus
self.log.info('----------> Conversion Method: '+str(conversion_method))
return pipeList
def sentence_transformer_output(self, transformer, names=None):
return [str(x) for x in range(self.bert_embedder_size)]
class textCombine(TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
if X.shape[1] > 1:
return np.array([" ".join(i) for i in X])
else:
if isinstance(X, np.ndarray):
return np.ndarray.flatten(X)
else:
return X
def get_pretrained_model_path():
try:
from appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
def set_pretrained_model(pipe):
from text.Embedding import load_pretrained
import importlib.util
global external_model
global external_model_type
params = pipe.get_params()
model_name = params.get('text_process__vectorizer__preTrainedModel', None)
if model_name and model_name.lower() in ['fasttext','glove'] and not external_model:
if model_name == 'fasttext' and importlib.util.find_spec('fasttext'):
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
external_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
external_model_type = 'binary'
print('loaded fasttext binary')
else:
model_path = TextProcessing.checkAndDownloadPretrainedModel(model_name)
embed_size, external_model = load_pretrained(model_path)
external_model_type = 'vector'
print(f'loaded {model_name} vector')
pipe.set_params(text_process__vectorizer__external_model = external_model)
pipe.set_params(text_process__vectorizer__external_model_type = external_model_type)
def reset_pretrained_model(pipe, clear_mem=True):
global external_model
global external_model_type
params = pipe.get_params()
is_external_model = params.get('text_process__vectorizer__external_model', None)
if (isinstance(is_external_model, pd.DataFrame) and not is_external_model.empty) or is_external_model:
pipe.set_params(text_process__vectorizer__external_model = None)
pipe.set_params(text_process__vectorizer__external_model_type = None)
if clear_mem:
external_model = None
def release_pretrained_model():
global external_model
global external_model_type
external_model = None
external_model_type = None
|
Embedding.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
# Private function
def unitvec(vec):
return vec / np.linalg.norm(vec)
def __word_average(vectors, sent, vector_size,key_to_index):
"""
Compute average word vector for a single doc/sentence.
"""
try:
mean = []
for word in sent:
index = key_to_index.get( word, None)
if index != None:
mean.append( vectors[index] )
if len(mean):
return unitvec(np.array(mean).mean(axis=0))
return np.zeros(vector_size)
except:
raise
# Private function
def __word_average_list(vectors, docs, embed_size,key_to_index):
"""
Compute average word vector for multiple docs, where docs had been tokenized.
"""
try:
return np.vstack([__word_average(vectors, sent, embed_size,key_to_index) for sent in docs])
except:
raise
def load_pretrained(path):
df = pd.read_csv(path, index_col=0,sep=' ',quotechar = ' ' , header=None, skiprows=1,encoding_errors= 'replace')
return len(df.columns), df
def get_model( df:pd.DataFrame):
index_to_key = {k:v for k,v in enumerate(df.index)}
key_to_index = {v:k for k,v in enumerate(df.index)}
df = df.to_numpy()
return df, index_to_key, key_to_index
def extractFeatureUsingPreTrainedModel(inputCorpus, pretrainedModelPath=None, loaded_model=False,key_to_index={}, embed_size=300):
"""
Extract feature vector from input Corpus using pretrained Vector model(word2vec,fasttext, glove(converted to word2vec format)
"""
try:
if inputCorpus is None:
return None
else:
if not pretrainedModelPath and ((isinstance(loaded_model, pd.DataFrame) and loaded_model.empty) or (not isinstance(loaded_model, pd.DataFrame) and not loaded_model)):
inputCorpusWordVectors = None
else:
if (isinstance(loaded_model, pd.DataFrame) and not loaded_model.empty) or loaded_model:
pretrainedModel = loaded_model
else:
embed_size, pretrainedModel = load_pretrained(pretrainedModelPath)
pretrainedModel, index_to_key,key_to_index = get_model( pretrainedModel)
if len(pretrainedModel):
input_docs_tokens_list = [word_tokenize(inputDoc) for inputDoc in inputCorpus]
inputCorpusWordVectors = __word_average_list(pretrainedModel, input_docs_tokens_list,embed_size,key_to_index)
else:
inputCorpusWordVectors = None
return inputCorpusWordVectors
except:
raise
|
Subsets and Splits