content
stringlengths 255
17.2k
|
---|
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
return render(request, 'advancedconfig.html', context)
def updateRunConfig(_trainingTime, _filesize, _features, _modelname, _problem_type):
returnVal = 'Success'
try:
import psutil
memInGB = round(psutil.virtual_memory().total / (1024 * 1024 * 1024))
_resource = str(memInGB) + " GB"
_time = str(_trainingTime) + " Mins"
new_record = {
"sampleSize": _filesize,
"features": _features,
"algorithm": _modelname,
"machineResource": _resource,
"trainingTime": _time,
"problemtype": _problem_type
}
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training_runs.json')
if(os.path.isfile(configfilepath)):
with open(configfilepath,'r+') as file:
# load existing data into a dict.
file_data = json.load(file)
# join new_record with file_data inside runs
file_data["runs"].append(new_record)
# sets file's current position at offset.
file.seek(0)
# convert back to json.
json.dump(file_data, file, indent = 4)
except Exception as inst:
returnVal = 'Fail'
pass
return returnVal
def objectlabeldone(request):
try:
computeinfrastructure = compute.readComputeConfig()
request.session['datatype'] = 'Object'
request.session['csvfullpath'] = request.session['objectLabelFileName']
df = pd.read_csv(request.session['csvfullpath'])
df1 = df.groupby(['Label']).agg({"File":{"count","nunique"}})
df1.columns = df1.columns.droplevel(0)
df1 = df1.reset_index()
class_count = []
for i in range(len(df1)):
dct = {}
dct['Label'] = df1.loc[i, "Label"]
dct['TotalAnnotations'] = df1.loc[i, "count"]
dct['Images'] = df1.loc[i, "nunique"]
class_count.append(dct)
#orxml_file in glob.glob(request.session['datalocation'] + '/*.xml'):
status_msg = 'Successfully Done'
wordcloudpic = ''
bargraph = ''
firstFile = pd.DataFrame()
#print(class_count)
context = {'tab': 'upload','firstFile':firstFile,'dataa': class_count,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure}
return render(request, 'upload.html', context)
except:
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,"usecaseerror":"Error in labeling object!"}
return render(request, 'upload.html', context)
def ObjLabelDiscard(request):
return redirect(reverse('objectlabelling'))
def ObjLabelAdd(request,id):
angle = request.GET.get("angle")
gid = request.GET.get("gid")
xMin = min(int(request.GET.get("xMin")),int(request.GET.get("xMax")))
xMax =max(int(request.GET.get("xMin")),int(request.GET.get("xMax")))
yMin = min(int(request.GET.get("yMin")),int(request.GET.get("yMax")))
yMax = max(int(request.GET.get("yMin")),int(request.GET.get("yMax")))
height = request.GET.get("height")
width = request.GET.get("width")
#print("=====> "+str(angle) +" "+ str(gid) +" "+ str(xMin) + " " + str(xMax) + " " +str(yMin) +" "+ str(yMax)+" "+str(width))
# with open("out.csv", 'w') as f:
# # writer = csv.writer(f)
# # writer.writerow([angle, id, gid, xMin, xMax, yMin, yMax])
# f.write(angle +" "+ gid +" "+ xMin + " " + xMax + " " +yMin +" "+ yMax)
labels = request.session['labels']
labels.append({"id":id, "name":"", "xMin":xMin, "xMax":xMax, "yMin":yMin, "yMax":yMax, "height":height,"width":width, "angle":angle})
request.session['labels'] = labels
return redirect(reverse('objectlabelling'))
def imageeda(request):
try:
computeinfrastructure = compute.readComputeConfig()
request.session['datatype'] = 'Image'
filename = request.session['csvfullpath']
os.remove(filename)
request.session['csvfullpath'] = request.session['LabelFileName']
df = pd.read_csv(request.session['csvfullpath'])
eda_result = ''
duplicate_img = ''
color_plt = ''
df2 = df.groupby('Label', as_index=False)['File'].count().reset_index()
df_json = df2.to_json(orient="records")
df_json = json.loads(df_json)
cfig = go.Figure()
xaxis_data = df2['Label'].tolist()
yaxis_data = df2['File'].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data))
cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520)
firstFile = df.groupby('Label').first().reset_index()
#firstFile['FilePath'] = firstFile['File'].apply(lambda x: os.path.join(request.session['datalocation'], x))
images = []
qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation'])
for i in range(len(firstFile)):
filename = firstFile.loc[i, "File"]
filePath = os.path.join(request.session['datalocation'], filename)
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
firstFile.loc[i, "Image"] = image_64
firstFile.loc[i, "Quality"] = qualityscore[filename]
status_msg = 'Successfully Done'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'upload', 'featuregraph': bargraph,'dataa': df_json, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'validcsv': True,'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt, 'firstFile': firstFile,
'status_msg': status_msg,'computeinfrastructure':computeinfrastructure}
return(context)
except:
context={'error':'Fail to load Eda result'}
return (context)
def imagelabelling(request):
if (request.session['currentIndex']) == (request.session['endIndex']+1):
try:
context = imageeda(request)
return render(request, 'upload.html', context)
except:
context = {'error': 'Image labeling error'}
return render(request, 'upload.html', context)
else:
try:
df = pd.read_csv(request.session['csvfullpath'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'tab': 'upload','id':request.session['currentIndex'],'labels': request.session['labels'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df)}
return render(request, 'imagelabelling.html', context)
except:
context = {'error': 'Image labeling error'}
return render(request, 'upload.html', context)
def objecteda(request):
request.session['datatype'] = 'Object'
filename = request.session['csvfullpath']
try:
os.remove(filename)
except:
pass
try:
request.session['csvfullpath'] = request.session['LabelFileName']
df = pd.read_csv(request.session['csvfullpath'])
df1 = df.groupby(['Label']).agg({"File":{"count","nunique"}})
df1.columns = df1.columns.droplevel(0)
df1 = df1.reset_index()
class_count = []
for i in range(len(df1)):
dct = {}
dct['Label'] = df1.loc[i, "Label"]
dct['TotalAnnotations'] = df1.loc[i, "count"]
dct['Images'] = df1.loc[i, "nunique"]
class_count.append(dct)
#orxml_file in glob.glob(request.session['datalocation'] + '/*.xml'):
status_msg = 'Successfully Done'
wordcloudpic = ''
bargraph = ''
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'dataa': class_count,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True}
return(context)
except:
context={'tab': 'upload','error':'Fail to load Eda result'}
return(context)
def objectlabelling(request):
if (request.session['currentIndex']) == (request.session['endIndex']+1):
try:
context = objecteda(request)
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except:
return render(request, 'upload.html', {'error':'objectlabelling error','version':AION_VERSION})
else:
try:
df = pd.read_csv(request.session['csvfullpath'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
bounds = []
context = {'tab': 'upload','bounds':bounds,'labels': request.session['labels'],'directory':request.session['datalocation'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'filelist':df,'selectedfile':df["File"].iloc[request.session['currentIndex']]}
context['version'] = AION_VERSION
return render(request, 'objectlabelling.html',context)
except:
return render(request, 'objectlabelling.html',{'tab': 'upload','error':'Objectlabelling Error','version':AION_VERSION})
def imagelabel(request,id):
request.session['labels'] = request.GET.get("name")
return redirect(reverse('imagelabelling'))
def objectlabel(request,id):
name = request.GET.get("name")
labels = request.session['labels']
labels[int(id) - 1]["name"] = name
request.session['labels'] = labels
return redirect(reverse('objectlabelling'))
def ObjLabelRemove(request,id):
index = int(id) - 1
labels = request.session['labels']
del labels[index]
for label in labels[index:]:
label["id"] = str(int(label["id"]) - 1)
request.session['labels'] = labels
return redirect(reverse('objectlabelling'))
def ImgLabelNext(request):
df = pd.read_csv(request.session['csvfullpath'])
filePath = df["File"].iloc[request.session['currentIndex']]
if request.session['labels'] != '':
dataFile = request.session['LabelFileName']
#print(dataFile)
with open(dataFile,'a') as f:
f.write(filePath + "," +
request.session['labels'] + "\\n")
f.close()
request.session['currentIndex'] = request.session['currentIndex']+1
request.session['labels'] = ''
return redirect(reverse('imagelabelling'))
def ObjLabelPrev(request):
df = pd.read_csv(request.session['csvfullpath'])
imagePath = df["File"].iloc[request.session['currentIndex']]
request.session['currentIndex'] = request.session['currentIndex'] - 1
process_marked_area_on_image(imagePath,request)
return redirect(reverse('objectlabelling'))
def remove_labelling_from_csv(imagePath,request):
dataFile = request.session['LabelFileName']
df = pd.read_csv(dataFile)
if not df.empty:
if imagePath in df.values:
df = df.set_index("File")
df = df.drop(imagePath, axis=0)
df.to_csv(dataFile, index=True)
def process_marked_area_on_image(imagePath,request):
df = pd.read_csv(request.session['csvfullpath'])
dataFile = request.session['LabelFileName']
remove_labelling_from_csv(imagePath,request)
write_coordinates_and_label_to_csv(imagePath,request)
if request.session['currentIndex'] < len(df):
image = df["File"].iloc[request.session['currentIndex']]
request.session['labels'] = []
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
return True
def write_coordinates_and_label_to_csv(imagePath,request):
dataFile = request.session['LabelFileName']
with open(dataFile, 'a') as f:
for label in request.session['labels']:
f.write(imagePath + "," +
str(round(float(label["id"]))) + "," +
str(label["angle"]) + "," +
str(round(float(label["x |
Min"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "," +
str(round(float(label["height"]))) + "," +
str(round(float(label["width"]))) + "," +
label["name"] + "\\n")
f.close()
def ObjLabelSelect(request):
selectedimage=request.GET.get('file')
df = pd.read_csv(request.session['csvfullpath'])
filePath = df["File"].iloc[request.session['currentIndex']]
remove_labelling_from_csv(filePath,request)
dataFile = request.session['LabelFileName']
with open(dataFile,'a') as f:
for label in request.session['labels']:
f.write(filePath + "," +
str(round(float(label["id"]))) + "," +
str(label["angle"]) + "," +
str(round(float(label["xMin"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "," +
str(round(float(label["height"]))) + "," +
str(round(float(label["width"]))) + "," +
label["name"] + "\\n")
f.close()
currentIndex = 0
for index,row in df.iterrows():
#print(row['File'])
if row['File'] == selectedimage:
break
else:
currentIndex = currentIndex+1
request.session['currentIndex'] = currentIndex
if request.session['currentIndex'] < len(df):
image = df["File"].iloc[request.session['currentIndex']]
request.session['labels'] = []
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
return redirect(reverse('objectlabelling'))
def ObjLabelNext(request):
df = pd.read_csv(request.session['csvfullpath'])
filePath = df["File"].iloc[request.session['currentIndex']]
remove_labelling_from_csv(filePath,request)
dataFile = request.session['LabelFileName']
with open(dataFile,'a') as f:
for label in request.session['labels']:
f.write(filePath + "," +
str(round(float(label["id"]))) + "," +
str(label["angle"]) + "," +
str(round(float(label["xMin"]))) + "," +
str(round(float(label["xMax"]))) + "," +
str(round(float(label["yMin"]))) + "," +
str(round(float(label["yMax"]))) + "," +
str(round(float(label["height"]))) + "," +
str(round(float(label["width"]))) + "," +
label["name"] + "\\n")
f.close()
request.session['currentIndex'] = request.session['currentIndex']+1
if request.session['currentIndex'] < len(df):
image = df["File"].iloc[request.session['currentIndex']]
request.session['labels'] = []
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
return redirect(reverse('objectlabelling'))
def encryptedpackage(request):
from appbe.encryptedPackage import encrptpackage_command
from appbe.encryptedPackage import download_sclient
context = encrptpackage_command(request,Existusecases,usecasedetails)
context['version'] = AION_VERSION
try:
return download_sclient(request,context) #Task 9981
except Exception as e:
print(e)
return render(request, 'usecases.html', context)
def StationarySeasonalityTest(request):
from appbe.stationarity_seasonality_check import StationarySeasonalityTest as sst
datapath = request.GET.get('datapath')
datetimefeature = request.GET.get('datefeature')
featurename = request.GET.get('targetfeature')
seasonality_status = request.GET.get('seasonality_status')
stationarity_status = request.GET.get('stationarity_status')
df=pd.read_csv(datapath)
ss_obj=sst(df,featurename,datetimefeature)
result_dict=ss_obj.analysis(seasonality_status,stationarity_status)
return HttpResponse(json.dumps(result_dict), content_type="application/json")
def dataoverframe(df):
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
gfsg = GenericFeatureStatisticsGenerator()
proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': df}])
protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8")
return protostr
def getimpfeatures(dataFile, numberoffeatures):
imp_features = []
if numberoffeatures > 20:
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile, optimize=1)
pca_map = eda_obj.getPCATop10Features()
imp_features = pca_map.index.values.tolist()
return imp_features
def uploaddata(request):
from appbe import exploratory_Analysis as ea
from appbe.aion_config import eda_setting
# context={'test':'test'}
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
try:
if selected_use_case == 'Not Defined':
context = {'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'tab': 'tabconfigure',
'usecaseerror': 'Please create a new use case for training the model or select an existing use case for retraining', 'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage()
,'usecasetab':usecasetab,'version':AION_VERSION}
return render(request, 'upload.html', context)
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
ModelStatus = 'Not Trained'
if request.session['finalstate'] > 0:
if request.session['datatype'] in ['Video', 'Image','Document','Object']:
folderLocation = str(request.session['datalocation'])
dataFile = os.path.join(folderLocation, request.session['csvfullpath'])
df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
if df['Label'].isnull().sum() > 0:
if request.session['datatype'] == 'Document':
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDf = pd.DataFrame.from_dict(dataDict)
tcolumns=['text']
wordcloudpic,df_text = ea.getWordCloud(dataDf,tcolumns)
status_msg = 'Successfully Done'
request.session['currentstate'] = 0
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'validcsv': True,'singletextdetails':wordcloudpic,'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage()
,'usecasetab':usecasetab,'version':AION_VERSION}
return render(request, 'upload.html', context)
eda_result = ''
duplicate_img = ''
color_plt = ''
df2 = df.groupby('Label', as_index=False)['File'].count().reset_index()
df_json = df2.to_json(orient="records")
df_json = json.loads(df_json)
cfig = go.Figure()
xaxis_data = df2['Label'].tolist()
yaxis_data = df2['File'].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data))
cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520)
firstFile = df.groupby('Label').first().reset_index()
images = []
if request.session['datatype'] == 'Image':
qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation'])
for i in range(len(firstFile)):
filename = firstFile.loc[i, "File"]
filePath = os.path.join(request.session['datalocation'], filename)
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
firstFile.loc[i, "Image"] = image_64
firstFile.loc[i, "Quality"] = qualityscore[filename]
elif request.session['datatype'] == 'Document':
dataDrift = ''
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDict["Label"].append(df.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
wordcloudpic = ea.getCategoryWordCloud(dataDf)
status_msg = 'Successfully Done'
context = {'tab': 'upload','dataa': df_json,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket()
,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request, 'upload.html', context)
status_msg = 'Successfully Done'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
context = {'tab': 'upload', 'featuregraph': bargraph, 'validcsv': True, 'firstFile': firstFile,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt,'azurestorage':get_azureStorage(),
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,
'usecasetab':usecasetab,'version':AION_VERSION
}
return render(request, 'upload.html', context)
elif request.session['datatype'].lower() in ['llm_document', 'llm_code']:
request.session['currentstate'] = 0
dataFile = request.session['csvfullpath']
df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
filesCount = 0
filesSize = 0
files = []
for index, row in df.iterrows():
filename = row['File']
files.append(filename)
filesCount = filesCount + 1
get_size = os.path.getsize(filename)
filesSize = round(filesSize + get_size, 1)
if filesSize > 1048576:
size = round((filesSize / (1024 * 1024)), 1)
filesSize = str(size) + ' M'
elif filesSize > 1024:
size = round((filesSize /1024), 1)
filesSize = str(size) + ' K'
else:
filesSize = str(filesSize) + ' B'
files = pd.DataFrame(files, columns=['File'])
files.index = range(1, len(files) + 1)
files.reset_index(level=0, inplace=True)
files = files.to_json(orient="records")
files = json.loads(files)
from appbe.prediction import get_instance
hypervisor, instanceid,region,image = get_instance(selected_use_case + '_' + str(ModelVersion))
if hypervisor != '':
computeinfrastructure['computeInfrastructure'] = hypervisor
else:
computeinfrastructure['computeInfrastructure'] = 'AWS'
context = {'tab': 'upload',"selected_use_case":selected_use_case,"selectedPath":request.session['datalocation'],"selectedfile":request.session['fileExtension'],'csvgenerated': True,'filesCount':filesCount,'filesSize':filesSize,'files':files,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'s3buckets':get_s3_bucket(),'gcsbuckets':get |
_gcs_bucket(),
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'azurestorage':get_azureStorage(),
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'datatype':request.session['datatype'],
'usecasetab':usecasetab,'version':AION_VERSION,"selectedfile":request.session['fileExtension'],"selectedPath":request.session['datalocation']
}
return render(request, 'upload.html', context)
else:
dataFile = str(request.session['datalocation'])
check_df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
check_df.rename(columns=lambda x: x.strip(), inplace=True)
featuresList = check_df.columns.tolist()
numberoffeatures = len(featuresList)
imp_features = getimpfeatures(dataFile, numberoffeatures)
# check_df = pd.read_csv(dataFile)
# check_df.rename(columns=lambda x: x.strip(), inplace=True)
# ----------------------------
# EDA Performance change
# ----------------------------
sample_size = int(eda_setting())
samplePercentage = 100
samplePercentval = 0
showRecommended = False
#dflength = len(eda_obj.getdata())
dflength = len(check_df)
if dflength > sample_size:
samplePercentage = round(float((sample_size/dflength) * 100),2)
samplePercentval = samplePercentage / 100
showRecommended = True
# ----------------------------
# df_top = df.head(10)
df_top = check_df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = ''
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
# EDA Subsampling changes
context = {'range':range(1,101),'samplePercentage':samplePercentage,'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList, 'selected_use_case': selected_use_case,'data': df_json,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'imp_features':imp_features,'numberoffeatures':numberoffeatures,
'version':AION_VERSION,
'selected': 'modeltraning','exploratory':False,'computeinfrastructure':computeinfrastructure}
else:
request.session['uploaddone'] = False
request.session['currentstate'] = 0
request.session['finalstate'] = 0
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage(),'clusteringModels':clusteringModels,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure
}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(e)
return render(request, 'upload.html', {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Fail to upload Data','usecasetab':usecasetab,'version':AION_VERSION})
def mlflowtracking(request):
import requests
response = requests.get("http://localhost:5000/")
#response = requests.get(url)
statuscode = response.status_code
data = []
context = {'statuscode':statuscode}
context['version'] = AION_VERSION
return render(request, 'openmlflow.html', context)
def readlogfile(request):
file_path = request.session['logfilepath']
try:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+")
configSettingsData = f.read()
configSettings = json.loads(configSettingsData)
f.close()
if os.path.exists(file_path):
my_file = open(file_path, 'r',encoding="utf-8")
file_content = my_file.read()
my_file.close()
matched_lines = [line.replace('Status:-', '') for line in file_content.split('\\n') if "Status:-" in line]
matched_status_lines = matched_lines[::-1]
if len(matched_status_lines) > 0:
no_lines = len(matched_lines)
if 'noflines' not in request.session:
request.session['noflines'] = 0
request.session['noflines'] = request.session['noflines'] + 1
if request.session['ModelStatus'] != 'SUCCESS':
numberoflines = request.session['noflines']
if numberoflines > no_lines:
numberoflines = no_lines
request.session['noflines'] = no_lines
matched_lines = matched_lines[0:numberoflines]
matched_status_lines = matched_status_lines[0]
output = getStatusCount(matched_lines,request.session['total_steps'])
matched_status_lines = matched_status_lines.split('...')
matched_status_lines = matched_status_lines[1]
output2=[]
output2.append(matched_status_lines)
from appbe import leaderboard
import pandas
result = leaderboard.get_leaderboard(file_content)
if result.empty==False:
result = result.to_html(classes='table',col_space='100px', index=False)
else:
result = 'Leaderboard is not available'
data_details = {'status':output2,'logs':output,'log_file':file_content,'leaderboard': result,'trainingstatus':request.session['ModelStatus']}
return HttpResponse(json.dumps(data_details), content_type="application/json")
else:
matched_lines = []
matched_lines.append('Initializing Training Engine')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
else:
stepsdone = 0
matched_lines = []
if request.session['ModelStatus'] == 'Running':
matched_lines.append('Initializing Training Engine')
else:
matched_lines.append('Not Trained')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
matched_lines = []
if request.session['ModelStatus'] == 'Running':
stepsdone = 0
matched_lines.append('Initializing Training Engine')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines, 'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
else:
matched_lines.append('Not Trained')
data_details = {'status':matched_lines,'logs':matched_lines,'log_file':matched_lines,'leaderboard':matched_lines,'trainingstatus':matched_lines}
return HttpResponse(json.dumps(data_details), content_type="application/json")
# EDA Visualization changes
# ----------------------------
def getgraph(request):
from appbe import exploratory_Analysis as ea
output = ea.get_edaGraph(request)
return HttpResponse(output)
# ----------------------------
# --- 12686:Data Distribution related Changes S T A R T ---
def getDataDistribution(request):
from appbe import exploratory_Analysis as ea
output = ea.get_DataDistribution(request)
return HttpResponse(output)
# ---------------------- E N D ----------------------
def getDeepDiveData(request):
from appbe import exploratory_Analysis as ea
output = ea.get_DeepDiveData(request)
return HttpResponse(output)
# Fairness Metrics changes
# ----------------------------
def getmetrics(request):
from appbe import exploratory_Analysis as ea
output = ea.get_fairmetrics(request)
return HttpResponse(output)
# ----------------------------
def getdataimbalance(request):
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8')
targetFeature = configSettingsJson['basic']['targetFeature']
df1 = df[targetFeature].value_counts().to_frame()
if (len(df1) < 1):
response = 'Data balancing detail is not available due to no class is found in target feature.'
elif (len(df1) > 30):
response = 'Data balancing detail is not available due to high number of classes in target feature.'
else:
dfStyler = df1.style.set_properties(**{'text-align': 'right'})
dfStyler.set_table_styles([dict(selector='th', props=[('text-align', 'right')])])
valueCount = dfStyler.to_html()
import matplotlib.pyplot as plt
import mpld3
fig, ax = plt.subplots(figsize=[6.5,6])
df2 = df[targetFeature].value_counts().sort_values()
_ncol = 1
_radius = 0.5
if (len(df1) > 10):
_radius = 0.4
_ncol = 1
else:
_radius = 0.6
_ncol = 1
ax = df2.plot(kind = 'pie', ylabel='', title=targetFeature, labeldistance=None, radius=_radius, autopct='%1.0f%%')
ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol = _ncol)
# ax.legend(bbox_to_anchor=(1,1), bbox_transform=plt.gcf().transFigure)
plt.subplots_adjust(left=0.02, bottom=0.05, right=0.9)
ax.get_yaxis().set_visible(False)
html_graph = mpld3.fig_to_html(fig,d3_url=d3_url,mpld3_url=mpld3_url)
response = valueCount + ' ' + html_graph
return HttpResponse(response)
def dotextSummarization(request):
from appbe.textSummarization import startSummarization
context = startSummarization(request,DEFAULT_FILE_PATH,CONFIG_FILE_PATH,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request, 'summarization.html', context)
def openmodelevaluation(request,id):
deploypath = request.session['deploypath']
if id == 1:
contentFile= os.path.join(deploypath,'log','boosting_overfit.html')
if id == 2:
contentFile= os.path.join(deploypath,'log','boosting_overfit_condition.html')
if id == 3:
contentFile= os.path.join(deploypath,'log','smc.html')
if id == 4:
contentFile= os.path.join(deploypath,'log','smc_condition.html')
if id == 5:
contentFile= os.path.join(deploypath,'log','mi.html')
if id == 6:
contentFile= os.path.join(deploypath,'log','mi_con.html')
try:
my_file = open(contentFile, 'r', encoding="utf-8")
file_content = my_file.read()
my_file.close()
context = {'content': file_content,'status':request.session['ModelStatus']}
context['version'] = AION_VERSION
return render(request, 'deepcheck.html', context, content_type="text/html")
except:
context = {'content': 'Not available'}
context['version'] = AION_VERSION
return render(request, 'deepcheck.html', context, content_type="text/html")
def downloadlogfile(request,id,currentVersion):
import mimetypes
from django.http import FileResponse
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
if model[0].DeployPath != 'NA':
file_path = os.path.join(str(model[0].DeployPath),'log','model_training_logs.log')
else:
file_path = os.path.join(DEPLOY_LOCATION,model[0].ModelName.usecaseid,str(currentVersion),'log','model_training_logs.log')
try:
if os.path.exists(file_path):
my_file = open(file_path, 'r', encoding="utf-8")
file_content = my_file.read()
my_file.close()
mime_type, _ = mimetypes.guess_type(file_path)
response = HttpResponse(file_content, content_type=mime |
_type)#bugid 12513
# Set the HTTP header for sending to browser
filename = p.usecaseid+'.log'
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
else:
response = HttpResponse('File Not Found')#bugid 12513
# Set the HTTP header for sending to browser
filename = p.usecaseid+'.log'
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
except Exception as e:
response = HttpResponse('File Not Found')#bugid 12513
# Set the HTTP header for sending to browser
filename = p.usecaseid+'.log'
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
def opendetailedlogs(request,id,currentVersion):
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
if model[0].DeployPath != 'NA':
file_path = os.path.join(str(model[0].DeployPath),'log','model_training_logs.log')
else:
file_path = os.path.join(DEPLOY_LOCATION,model[0].ModelName.usecaseid,str(currentVersion),'log','model_training_logs.log')
try:
if os.path.exists(file_path):
my_file = open(file_path, 'r', encoding="utf-8")
file_content = my_file.read()
my_file.close()
context = {'content':file_content}
return HttpResponse(json.dumps(context),content_type="application/json")
else:
context = {'content':'Status not available'}
return HttpResponse(json.dumps(context),content_type="application/json")
except Exception as e:
print(e)
context = {'content':'Status not available'}
return HttpResponse(json.dumps(context),content_type="application/json")
def batchlearning(request):
from appbe.onlineLearning import startIncrementallearning
action,context = startIncrementallearning(request,usecasedetails,Existusecases,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request,action,context)
def downlpredictreport(request):
predictionResults = request.POST.get('predictionResults')
predictionResults = pd.DataFrame.from_dict(eval(predictionResults))
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
predictFileName = usename + '_prediction.xlsx'
from io import BytesIO as IO
excel_file = IO()
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
predictionResults.to_excel(excel_writer, sheet_name='Predictions')
workbook = excel_writer.book
#excel_writer.save()
excel_writer.close()
excel_file.seek(0)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + predictFileName
return response
# EDA Reports changes
# ----------------------------
def downloadxplainreport(request):
from appbe.xplain import global_explain
status,msg,ale_view,sentences,bargraph,inputFields,nrows,ncols,targetFeature,dataPoints,target_classes,df_proprocessed,numberofclasses,modelfeatures,problemType,mfcount,topTwoFeatures,topFeaturesMsg,most_influencedfeature,interceppoint,anchorjson,labelMaps = global_explain(request)
if status == 'Success':
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
predictFileName = usename + '_xplain.xlsx'
df = pd.DataFrame({'What kind of data does the system learn from?': ['This dataset is a dataset of measurements taken for '+str(numberofclasses)+' categories of '+str(targetFeature),'The '+str(numberofclasses)+' different categories of '+str(targetFeature)+' as per the data are:']})
i = 1
df1 = []
for x in target_classes:
df1.append({'What kind of data does the system learn from?':' '+str(i)+':'+str(x)})
i = i+1
df1.append({'What kind of data does the system learn from?':'The total number of data points is '+str(dataPoints)})
df = pd.concat([df, pd.DataFrame(df1)], ignore_index = True)
from io import BytesIO as IO
excel_file = IO()
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
df.to_excel(excel_writer, sheet_name='Dashboard',index=False)
pd.DataFrame(df_proprocessed).to_excel(excel_writer, sheet_name='Top 5 Rows',index=False)
df = pd.DataFrame({'What are the various features of the data used for model training?': ['The various features of the data are:']})
i = 1
df1 = []
for x in modelfeatures:
df1.append({'What are the various features of the data used for model training?':' '+str(i)+': '+str(x)})
i = i+1
df = pd.concat( [df, pd.DataFrame( df1)], ignore_index = True)
df.to_excel(excel_writer, sheet_name='Features',index=False)
topFeaturesMsg = pd.DataFrame(topFeaturesMsg,columns=["Feature Importance"])
topFeaturesMsg.to_excel(excel_writer, sheet_name='Feature importance',index=False)
achors = pd.DataFrame(anchorjson)
achors.to_excel(excel_writer, sheet_name='Prediction',index=False)
workbook = excel_writer.book
#excel_writer.save()
excel_writer.close()
excel_file.seek(0)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + predictFileName
return response
else:
response = HttpResponse()
return response
def gotoreport(request):
report_button = request.POST.get('trainmodel')
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
if report_button == 'download_edafile':
from appbe.reports import downloadtrainingfile
edaFileName,excel_file = downloadtrainingfile(request,Existusecases)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + edaFileName
return response
def LoadBasicConfiguration(request):
try:
from appbe import exploratory_Analysis as ea
configFile = DEFAULT_FILE_PATH + 'eion_config.json'
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
temp = {}
temp['ModelName'] = request.session['UseCaseName']
temp['Version'] = request.session['ModelVersion']
dataLocation = str(request.session['datalocation'])
df = pd.read_csv(dataLocation, encoding='latin1')
featuresList = df.columns.values.tolist()
datetimeFeatures = []
sequenceFeatures = []
unimportantFeatures = []
featuresRatio = {}
for i in featuresList:
check = ea.match_date_format(df[i])
if check == True:
datetimeFeatures.append(i)
unimportantFeatures.append(i)
continue
seq_check = ea.check_seq_feature(df[i])
if seq_check == True:
sequenceFeatures.append(i)
unimportantFeatures.append(i)
continue
ratio = ea.check_category(df[i])
if ratio != 0:
featuresRatio[i] = ratio
else:
unimportantFeatures.append(i)
targetFeature = min(featuresRatio, key=featuresRatio.get)
unimportantFeatures.append(targetFeature)
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = datetimeFeatures
config['sequenceFeatures'] = sequenceFeatures
config['FeaturesList'] = featuresList
config['unimportantFeatures'] = unimportantFeatures
config['targetFeature'] = targetFeature
context = {'tab': 'configure', 'temp': temp, 'config': config}
context['version'] = AION_VERSION
return render(request, 'modeltraning.html', context)
except:
return render(request, 'modeltraning.html', {'error':'Fail to load basic config file','version':AION_VERSION})
def LoadDataForSingleInstance(request):
try:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemtypes = configSettingsJson['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict = {'noofforecasts': 10}
elif problem_type == 'recommenderSystem':
inputFieldsDict = {"uid": 1, "iid": 31, "rating": 0}
elif problem_type == 'videoForecasting':
inputFieldsDict = {'VideoPath': 'person01_boxing_d1_uncomp.avi'}
else:
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df = pd.read_csv(dataFilePath, encoding='latin1')
singleInstanceData = df.loc[0, inputFeaturesList]
inputFieldsDict = singleInstanceData.to_dict()
inputFields = []
inputFields.append(inputFieldsDict)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'predict', 'inputFields': inputFields, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction'}
return render(request, 'prediction.html', context=context)
except:
return render(request, 'prediction.html', {'tab': 'predict', 'error': 'Fail to load inputfields', 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction'})
def uploadDatafromunsupervisedmodel(request):
computeinfrastructure = compute.readComputeConfig()
try:
modelid = request.POST.get('modelid')
p = Existusecases.objects.get(id=modelid)
dataFile = str(p.DataFilePath)
deploypath = str(p.DeployPath)
if(os.path.isfile(dataFile) == False):
context = {'tab': 'tabconfigure', 'error': 'Data file does not exist','computeinfrastructure':computeinfrastructure}
return render(request, 'prediction.html', context)
predictionScriptPath = os.path.join(deploypath,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
if (predict_dict['status'] == 'SUCCESS'):
predictionResults = predict_dict['data']
df2 = pd.json_normalize(predictionResults)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df2.to_csv(dataFile, index=False)
request.session['datalocation'] = str(dataFile)
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures()
# ----------------------------
samplePercentage = 100
samplePercentval = 0
showRecommended = False
df = pd.read_csv(dataFile,nrows=100)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
No_of_Permissible_Features_EDA = get_edafeatures()
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'tab': 'tabconfigure','range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage,'computeinfrastructure':computeinfrastructure, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'clusteringModels':clusteringModels,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','version':AION_VERSION})
def qlearning(request):
return render(request, 'qlearning.html', {})
def RLpath(request):
return render(request, 'rl_path.html', {})
def stateTransitionSettings(request):
selected_use_case = request. |
session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
import requests
setting_url = service_url.read_service_url_params(request)
usecasename = request.session['usecaseid'].replace(" ", "_")
setting_url = setting_url+'pattern_anomaly_settings?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion'])
#print(setting_url)
inputFieldsDict = {}
inputFieldsDict['groupswitching'] = request.POST.get('groupswitching')
inputFieldsDict['transitionprobability'] = request.POST.get('transitionprobability')
inputFieldsDict['transitionsequence'] = request.POST.get('transitionsequence')
inputFieldsDict['sequencethreshold'] = request.POST.get('sequencethreshold')
# print(inputFieldsDict)
inputFieldsJson = json.dumps(inputFieldsDict)
#print(inputFieldsJson)
try:
response = requests.post(setting_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
context = {'tab': 'tabconfigure', 'error': outputStr.decode('utf-8'), 'selected': 'prediction'}
return render(request, 'prediction.html', context)
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
context = {'tab': 'tabconfigure', 'error': 'AION Service needs to be started', 'selected': 'prediction'}
else:
context = {'tab': 'tabconfigure', 'error': 'Prediction Error '+str(inst), 'selected': 'prediction'}
return render(request, 'prediction.html', context)
try:
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
#print(outputStr)
predict_dict = json.loads(str(outputStr))
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'}
inputFields = []
inputFields.append(inputFieldsDict)
iterName = request.session['UseCaseName'].replace(" ", "_")
settings_url = ''
problemtypes = configSettingsJson['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'StateTransition':
ser_url = service_url.read_pattern_anomaly_url_params(request)
settings_url = service_url.read_pattern_anomaly_setting_url_params(request)
else:
ser_url = service_url.read_service_url_params(request)
ser_url = ser_url+'predict?usecaseid='+iterName+'&version='+str(ModelVersion)
onnx_runtime = False
if str(configSettingsJson['advance']['deployer']['edge_deployment']) == 'True':
if str(configSettingsJson['advance']['deployer']['edge_format']['onnx']) == 'True':
onnx_runtime = True
analyticsTypes = problem_type
imagedf = ''
return render(request, 'prediction.html',
{'inputFields': inputFields,'imagedf':imagedf, 'selected_use_case': selected_use_case,'ser_url':ser_url,'analyticsType':analyticsTypes,'settings_url':settings_url,'usecasetab':usecasetab,
'ModelStatus': ModelStatus,'onnx_edge':onnx_runtime,'ModelVersion': ModelVersion, 'selected': 'prediction'})
except Exception as e:
print(e)
return render(request, 'prediction.html', {'error': 'Fail to do state Transition Settings', 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction'})
def flcommand(request):
try:
from appbe.flconfig import fl_command
context = fl_command(request,Existusecases,usecasedetails)
return render(request, 'usecases.html', context)
except Exception as e:
print(e)
return render(request, 'models.html',{'error': 'Failed to generate federated learning client code'})
def maaccommand(request):
from appbe.models import maac_command
try:
context,page = maac_command(request,Existusecases,usecasedetails)
context['version'] = AION_VERSION
return render(request,page,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'errormlac': 'Failed to generate code: '+str(e),'version':AION_VERSION})
def onnxruntime(request):
try:
onnx_scriptPath = os.path.join(request.session['deploypath'],'edge','onnxvalidation.py')
outputStr = subprocess.check_output([sys.executable, onnx_scriptPath])
#print(outputStr)
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'predict', 'predictionResults': predict_dict, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction','onnx_edge':True,'version':AION_VERSION}
return render(request, 'prediction.html', context=context)
except Exception as inst:
print('-------------------->'+str(inst))
context = {'tab': 'tabconfigure', 'error': 'Failed To Perform Prediction', 'selected': 'prediction','version':AION_VERSION}
return render(request, 'prediction.html', context)
def instancepredict(request):
log = logging.getLogger('log_ux')
from appbe.train_output import get_train_model_details
modelType=''
trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request)
computeinfrastructure = compute.readComputeConfig()
selected_use_case, ModelVersion, ModelStatus = getusercasestatus(request)
try:
t1 = time.time()
if request.FILES:
Datapath = request.FILES['DataFilePath']
from io import StringIO
ext = str(Datapath).split('.')[-1]
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet','txt']:
content = StringIO(Datapath.read().decode('utf-8'))
reader = csv.reader(content)
df = pd.DataFrame(reader)
df.columns = df.iloc[0]
df = df[1:]
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet','txt','pdf']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
dataPath = dataFile
if(os.path.isfile(dataFile) == False):
context = {'tab': 'tabconfigure', 'error': 'Data file does not exist','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Data file does not exist')
return render(request, 'prediction.html', context)
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py')
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ''
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
PredictionResultsOfTextSum = []
if (predict_dict['status'] == 'SUCCESS'):
predictionResults = predict_dict['data']
predictionResultsTextSum= predict_dict['data']
if problem_type in ['similarityIdentification','contextualSearch']:
for x in predictionResults:
msg=''
for y in x['prediction']:
msg += str(y)
msg += '\\n'
msg += '\\n'
msg += '\\n'
msg += '\\n'
msg += '\\n'
x['prediction'] = msg
if problem_type == 'textSummarization':
Results = {}
Results['msg'] = predict_dict['msg']
PredictionResultsOfTextSum.append(Results)
Results['prediction'] = predict_dict['data']
PredictionResultsOfTextSum.append(Results)
t2 = time.time()
log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str(
round(t2 - t1)) + ' sec' + ' : ' + 'Success')
else:
context = {'tab': 'tabconfigure', 'error': 'Failed To perform prediction','version':AION_VERSION}
log.info('Predict Batch : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction')
return render(request, 'prediction.html', context)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
from appfe.modelTraining.train_views import getMLModels
problem_type,dproblemtype,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
from appbe.prediction import createInstanceFeatures
ser_url = service_url.read_service_url_params(request)
inputFields,ser_url = createInstanceFeatures(configSettingsJson,problem_type,mlmodels,request.session['usecaseid'],request.session['ModelVersion'],ser_url)
from appfe.modelTraining.prediction_views import getTrainingStatus
result = getTrainingStatus(request)
context = {'tab': 'predict','ser_url':ser_url,'predictionResults': predictionResults, 'selected_use_case': selected_use_case,'problem_type':problem_type,'result':result,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'bestmodel':bestmodel,'usecasetab':usecasetab,'version':AION_VERSION,'modelType':modelType,'inputFields':inputFields,'configSettingsJson':configSettingsJson}
if problem_type == 'textSummarization':
context={'tab': 'predict','predictionResultsTextSum': predictionResultsTextSum, 'PredictionResultsOfTextSum': PredictionResultsOfTextSum,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction','problem_type':problem_type}
return render(request, 'prediction.html', context=context)
except Exception as inst:
print(inst)
context = {'tab': 'tabconfigure', 'error': 'Failed To perform prediction', 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Predict Batch :' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction, '+str(inst))
return render(request, 'prediction.html', context)
def LoadAdvanceConfiguration(request):
try:
if request.method == 'POST':
configFile = request.session['config_json']
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
context = {'tab': 'advconfig', 'advconfig': configSettingsJson}
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
return render(request, 'modeltraning.html', context)
except:
return render(request, 'modeltraning.html', {'error':'Fail to load advance config file','version':AION_VERSION,'usecasetab':usecasetab})
# advance
def Advance(request):
try:
from appbe import advance_Config as ac
request.session['defaultfilepath'] = DEFAULT_FILE_PATH
context = ac.save(request)
submittype = request.POST.get('AdvanceSubmit')
computeinfrastructure = compute.readComputeConfig()
if submittype != 'AdvanceDefault':
from appfe.modelTraining.train_views import trainmodel
return trainmodel(request)
else:
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
context['computeinfrastructure'] = computeinfrastructure
return render(request, 'advancedconfig.html', context)
except Exception as e:
print(e)
return render(request, 'advancedconfig.html', {'erroradvance':'Fail to save','version':AION_VERSION,'usecasetab':usecasetab,'computeinfrastructure':computeinfrastructure})
def templatepage(request):
computeinfrastructure = compute.readComputeConfig()
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
ser_url = service_url.read_service_url_params(request)
packagetip='''
Call From Command Line
1. Click AION Shell
2. python {packageAbsolutePath}/aion_prediction.py {json_data}
Call As a Package
1. Go To package_path\\WHEELfile
2. python -m pip install {packageName}-py3-none-any.whl
Call the predict function after wheel package installation
1. from {packageName} import aion_prediction as p1
2. p1.predict({json_ |
data})
'''
usecase = usecasedetails.objects.all()
models = Existusecases.objects.filter(Status='SUCCESS')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
context = {'usecasedetail': usecase, 'nouc': nouc,'models': models, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ser_url':ser_url,'packagetip':packagetip,'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab}
return (context)
except:
context = {'error':'Fail to load usecases details','usecasetab':usecasetab}
return (context)
def modelkafka(request):
try:
addKafkaModel(request,request.session['datalocation'])
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
ser_url = service_url.read_service_url_params(request)
packagetip='''
Call From Command Line
1. Click AION Shell
2. python {packageAbsolutePath}/aion_prediction.py {json_data}
Call As a Package
1. Go To package_path\\WHEELfile
2. python -m pip install {packageName}-py3-none-any.whl
Call the predict function after wheel package installation
1. from {packageName} import aion_prediction as p1
2. p1.predict({json_data})
'''
models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id')
usecase = usecasedetails.objects.all().order_by('-id')
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
return render(request, 'usecases.html',
{'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting})
except:
return render(request, 'usecases.html',{'selected': 'usecase', 'selected_use_case': selected_use_case,'error': 'Fail to load modelkafka'})
def startTracking(request):
from appbe.aion_config import aion_tracking
from appbe.aion_config import start_tracking
try:
status = aion_tracking()
if status.lower() == 'error':
start_tracking()
status = 'MLflowSuccess'
else:
status = 'MLflowSuccess'
context = {'selected':'DataOperations','usecasetab':usecasetab,'status':status}
context['version'] = AION_VERSION
return render(request, "dataoperations.html",context)
except:
context = {'selected':'DataOperations','usecasetab':usecasetab,'status':'Error'}
context['version'] = AION_VERSION
return render(request, "dataoperations.html",context)
def startService(request):
try:
status = aion_service()
if status == 'Running':
status = 'AION service already running'
elif status == 'Started':
status = 'AION service started successfully'
else:
status = 'Error in starting'
context = settings(request)
context['status'] = status
return render(request, 'settings_page.html', context)
except:
return render(request, 'settings_page.html', {'error':'Fail to start service'})
def Dataupload(request):
from appbe.pages import usecases_page
checkModelUnderTraining(request,usecasedetails,Existusecases)
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
context['currentstate'] =0
from appbe.aion_config import get_telemetryoptout
telemetryoptout = get_telemetryoptout()
if telemetryoptout == 'No':
from appbe.telemetry import checkTelemtry
checkTelemtry()
return render(request,action,context)
def show(request):
try:
models = Existusecases.objects.all()
# print(models)
return render(request, "usecases.html", {'models': models, 'selected': 'usecase'})
except:
return render(request, "usecases.html", {'error': 'Error to show Usecases', 'selected': 'usecase'})
def edit(request, id):
try:
usecasedetail = usecasedetails.objects.get(id=id)
return render(request, 'edit.html', {'usecasedetail': usecasedetail, 'selected': 'usecase'})
except:
return render(request, "usecases.html", {'error': 'Error in editing usecase', 'selected': 'usecase'})
def opentraining(request, id,currentVersion):
from appbe.pages import usecases_page
try:
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
Version = model[0].Version
usecasename = p.UsecaseName
request.session['ModelName'] = p.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.usecaseid
request.session['ModelVersion'] = Version
request.session['ModelStatus'] = 'Not Trained'
request.session['finalstate'] = 0
usecase = usecasedetails.objects.all().order_by('-id')
configfile = str(model[0].ConfigPath)
dataFile = ''
if configfile != '':
request.session['finalstate'] = 2
f = open(configfile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
dataFile = configSettings['basic']['dataLocation']
if configSettings['basic']['folderSettings']['fileType'] == 'Object':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['objectLabelFileName'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
return objectlabeldone(request)
elif configSettings['basic']['folderSettings']['fileType'] in ['LLM_Document','LLM_Code']:
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['fileExtension'] = configSettings['basic']['folderSettings']['fileExtension']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
else:
request.session['datalocation'] = str(configSettings['basic']['dataLocation'])
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
if dataFile == '':
dataFile = str(model[0].DataFilePath)
if dataFile != '':
request.session['finalstate'] = 2
request.session['datalocation'] = dataFile
return uploaddata(request)
except Exception as e:
print(e)
checkModelUnderTraining(request,usecasedetails,Existusecases)
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
context['Status'] = 'Error'
context['Msg'] = 'Error in retraining usecase. Check log file for more details'
return render(request,action,context)
def stopmodelservice(request):
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
id = request.POST.get('modelid')
pid = request.POST.get('pid')
installPackage.stopService(pid)
time.sleep(5)
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.UsecaseName
runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
model.maacsupport = 'False'
model.flserversupport = 'False'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath), 'output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
supportedmodels = ["Logistic Regression",
"Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge"]
if model.deploymodel in supportedmodels:
model.maacsupport = 'True'
else:
model.maacsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
except Exception as e:
pass
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
nouc = 0
usecase = usecasedetails.objects.all()
return render(request, 'models.html',
{'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id})
except:
return render(request, 'models.html',{'error': 'Fail to stop model service'})
def startmodelservice(request):
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
installPackage.startService(request.POST.get('modelName'),request.POST.get('ip'),request.POST.get('portNo'))
time.sleep(5)
id = request.POST.get('modelid')
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.UsecaseName
runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
installationStatus,modelName,modelVersion=insallPackage.checkInstalledPackge(usecasename)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
model.maacsupport = 'False'
model.flserversupport = 'False'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc', 'output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
supportedmodels = ["Logistic Regression",
"Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge"]
if model.deploymodel in supportedmodels:
model.maacsupport = 'True'
else:
model.maacsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
except Exception as e:
pass
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
nouc = 0
usecase = usecasedetails.objects.all()
return render(request, 'models.html',
{'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id})
except:
return render(request, 'models.html',{'error': 'Fail to start model service'})
def downloadpackage(request, id,version):
return(installPackage.downloadPackage(request,id,version,usecasedet |
ails,Existusecases))
def createpackagedocker(request, id,version):
try:
context = installPackage.createPackagePackage(request,id,version,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
except Exception as e:
return render(request, 'usecases.html',{'error': str(e)})
def publish(request, id):
print("Inside Publish Tab")
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.UsecaseName
publish_version,publish_status,publish_drift_status =chech_publish_info(usecasename)
runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
model.maacsupport = 'False'
model.flserversupport = 'False'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc', 'output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
model.featuresused = eval(outputconfig['data']['featuresused'])
model.targetFeature = outputconfig['data']['targetFeature']
if 'params' in outputconfig['data']:
model.modelParams = outputconfig['data']['params']
model.modelType = outputconfig['data']['ModelType']
model.dataPath = os.path.join(str(model.DeployPath),'data', 'postprocesseddata.csv')
supportedmodels = ["Logistic Regression",
"Naive Bayes","Decision Tree","Support Vector Machine","K Nearest Neighbors","Gradient Boosting","Random Forest","Linear Regression","Lasso","Ridge","Extreme Gradient Boosting (XGBoost)","Light Gradient Boosting (LightGBM)","Categorical Boosting (CatBoost)","LSTM"]
print(model.deploymodel)
if model.deploymodel in supportedmodels:
model.maacsupport = 'True'
else:
model.maacsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
supportedmodels = ["Extreme Gradient Boosting (XGBoost)"]
if model.deploymodel in supportedmodels:
model.encryptionsupport = 'True'
else:
model.encryptionsupport = 'False'
except Exception as e:
print(e)
pass
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
nouc = 0
usecase = usecasedetails.objects.all()
print(models)
return render(request, 'models.html',
{'tab': 'upload','nouc':nouc,'usecasedetail': usecase, 'models': models, 'selected_use_case': selected_use_case,'usecasetab':usecasetab,'publish_version':publish_version,'publish_status':publish_status,'publish_drift_status':publish_drift_status,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'installationStatus':installationStatus,'modelName':modelName,'modelVersion':modelVersion,'usecasename':usecasename,'runningStatus':runningStatus,'pid':pid,'ip':ip,'port':port,'usecaseid':id})
except Exception as e:
print(e)
return render(request, 'models.html',{'error': 'Fail to publish model'})
def remove_version(request, id):
from appbe.pages import get_usecase_page
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
if request.method == 'GET':
try:
model = Existusecases.objects.get(id=id)
usecaseid = model.ModelName.id
if os.path.isdir(str(model.DeployPath)):
import shutil
if DEPLOY_LOCATION != str(model.DeployPath):
shutil.rmtree(str(model.DeployPath))
else:
uname = model.ModelName.usecaseid.replace(" ", "_")
usecaseversion = model.Version
deployLocation = os.path.join(str(model.DeployPath),uname+'_'+str(usecaseversion))
if os.path.isdir(str(deployLocation)):
shutil.rmtree(str(deployLocation))
model.delete()
usecasedetail = usecasedetails.objects.get(id=model.ModelName.id)
models = Existusecases.objects.filter(ModelName=usecasedetail)
if len(models) == 0:
usecasedetail.delete()
Status = 'SUCCESS'
Msg = 'Version Deleted Successfully'
except Exception as e:
print(e)
Status = 'Error'
Msg = str(e)
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = Status
context['Msg'] = Msg
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
except Exception as e:
print(e)
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = 'Error'
context['Msg'] = 'Usecase Version Deletion Error'
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
def destroy(request, id):
from appbe.pages import get_usecase_page
try:
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
if request.method == 'GET':
try:
usecasedetail = usecasedetails.objects.get(id=id)
usecasename = usecasedetail.usecaseid
models = Existusecases.objects.filter(ModelName=usecasedetail)
for model in models:
if os.path.isdir(str(model.DeployPath)):
import shutil
if DEPLOY_LOCATION != str(model.DeployPath):
shutil.rmtree(str(model.DeployPath))
else:
uname = usecasename.replace(" ", "_")
usecaseversion = model.Version
deployLocation = os.path.join(str(model.DeployPath),uname+'_'+str(usecaseversion))
if os.path.isdir(str(deployLocation)):
shutil.rmtree(str(deployLocation))
usecasedetail.delete()
Status = 'SUCCESS'
Msg = 'Deleted Successfully'
except Exception as e:
print(e)
Status = 'Error'
Msg = str(e)
else:
usecasename = 'Not Defined'
if 'UseCaseName' in request.session:
if (usecasename == request.session['UseCaseName']):
selected_use_case = 'Not Defined'
request.session['UseCaseName'] = selected_use_case
request.session['ModelVersion'] = 0
request.session['ModelStatus'] = 'Not Trained'
else:
selected_use_case = request.session['UseCaseName']
else:
selected_use_case = 'Not Defined'
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = Status
context['Msg'] = Msg
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
except:
status, context,page = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = 'Error'
context['Msg'] = 'Usecase Deletion Error'
context['version'] = AION_VERSION
return render(request, 'usecases.html',context)
def update(request, id):
try:
lab = get_object_or_404(usecasedetails, id=id)
if request.method == 'POST':
form = usecasedetailsForm(request.POST, instance=lab)
request.session['usecaseid'] = form['id']
# print(request.session['usecaseid'])
if form.is_valid():
form.save()
return redirect('/show')
else:
form = usecasedetailsForm(instance=lab)
request.session['usecaseid'] = form['id']
# print(request.session['usecaseid'])
return render(request, 'edit.html', {'form': form, 'selected': 'usecase'})
except:
return render(request, 'edit.html', {'error': 'Error in updating usecase', 'selected': 'usecase'})
def newfile(request):
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
try:
model = Existusecases.objects.get(ModelName=request.session['ModelName'], Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
training_output = json.loads(training_output)
dataFile = request.POST.get('localfilePath')
if(os.path.isfile(dataFile) == False):
context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'outputdrif.html', context)
df = pd.read_csv(dataFile)
request.session['drift_datalocations'] = dataFile
request.session['Features_dr'] = df.columns.values.tolist()
Featrs = request.session['Features_dr']
statusmsg = 'Data File Uploaded Successfully'
context = {'tab': 'tabconfigure', 'status_msg': statusmsg,
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'selected': 'monitoring', 'z': Featrs}
context['version'] = AION_VERSION
return render(request, 'outputdrif.html', context)
except Exception as Isnt:
context = {'error': 'Error during output drift.'+str(Isnt), 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'outputdrif.html', context)
def summarization(request):
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "summarization.html",context)
# ------------------ Debiasing Changes ------------------
def getdataclasses(request):
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8')
classeslist = []
selectedFeature = request.GET.get('features')
classeslist = df[selectedFeature].unique().tolist()
_list = []
for item in classeslist:
_list.append("<option value='"+ item +"'>" + item +"</option>")
return HttpResponse(_list)
# ------------------ ------------------
def ucdetails(request, id):
from appbe.pages import usecases_page
checkModelUnderTraining(request, usecasedetails, Existusecases)
request.session['IsRetraining'] = 'No'
status, context, action = usecases_page(request, usecasedetails, Existusecases, id)
context['version'] = AION_VERSION
return render(request, 'usecasedetails.html', context)
def dataoperations(request):
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "dataoperations.html",context)
# @login_required(login_url="/login/")
def datalabel(request):
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "label_dataset_ver2.html",context)
# @login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template('page-404.html')
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template('page-500.html')
return HttpResponse(html_template.render(context, request))
def delimitedsetting(delimiter='',textqualifier='',other=''):
if delimiter != '':
if delimiter.lower() == 'tab' or delimiter.lower() == '\\t':
delimiter = '\\t'
elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';':
delimiter = ';'
elif delimiter.lower() == 'comma' or delimiter.lower() == ',':
delimiter = ','
elif delimiter.lower() == 'space' or delimiter.lower() == ' ':
delimiter = ' '
elif delimiter.lower() == 'other' or other.lower() != '':
if other != '':
delimiter = other
else:
delimiter = ','
elif delimiter != '':
delimiter = delimiter
else:
delimiter = ','
else:
delimiter = ','
if textqualifier == '':
textqualifier = '"'
return delimiter,textqual |
ifier
@csrf_exempt
def upload_and_read_file_data(request):
file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'])
file_delim = request.POST.get("file_delim")
textqualifier = request.POST.get("qualifier")
delimiters = request.POST.get("delimiters")
delimiter,textqualifier = delimitedsetting(request.POST.get('file_delim'),request.POST.get('qualifier'),request.POST.get('delimiters_custom_value'))
size_take = 100
if file_ext in ["csv", "tsv"]:
num_records = sum(1 for line in open(file_path)) - 1
num_rows = num_records
if num_records > size_take:
skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take))
else:
skip = 0
# with open(file_path, 'r') as file:
# data = file.readline(10)
# from detect_delimiter import detect
# row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t', ' '])
# if file_delim == "custom" and request.POST["custom_delim"] != "":
# row_delimiter = request.POST["custom_delim"]
# print('row_delimiter',row_delimiter)
file_content = pd.read_csv(file_path, sep=delimiter,quotechar=textqualifier, engine='python',skiprows=skip,encoding='utf-8-sig',skipinitialspace = True)
elif file_path.endswith(".json"):
file_content_df = pd.read_json(file_path)
file_content = pd.json_normalize(file_content_df.to_dict("records"))
num_rows = len(file_content)
elif file_path.endswith(".avro"):
import pandavro as pdx
from avro.datafile import DataFileReader
from avro.io import DatumReader
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
file_content = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
num_rows = len(file_content)
elif file_path.endswith(".parquet"):
from pyarrow.parquet import ParquetFile
import pyarrow as pa
import pyarrow.parquet as pq
pf = ParquetFile(file_path)
take_rows = next(pf.iter_batches(batch_size=size_take))
file_content = pa.Table.from_batches([take_rows]).to_pandas()
table = pq.read_table(file_path, columns=[])
num_rows = table.num_rows
# file_content = pd.read_parquet(file_path, engine="pyarrow")
else:
raise ValueError("Invalid file format")
response = {}
column_list = []
for key, val in dict(file_content.dtypes).items():
if str(val) == 'object':
try:
pd.to_datetime(file_content[str(key)])
column_list.append({"column_name": str(key), 'data_type': 'datetime64'})
except ValueError:
column_list.append({"column_name": str(key), 'data_type': 'string'})
pass
else:
column_list.append({"column_name": str(key), 'data_type': str(val)})
response["column_list"] = column_list
response["data_html"] = file_content.to_html(classes='table table-striped table-bordered table-hover dataTable no-footer', justify='left', index=False)
response["record_count"] = num_rows
response["file_ext"] = file_ext
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def handle_uploaded_file(path, file, test_dataset=False):
print('path',path)
if test_dataset:
filename = os.path.join(path,"test_data_file." + file.name.split('.')[1])
with open(filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
return filename, file.name.split('.')[1]
else:
filename = os.path.join(path,"uploaded_file." + file.name.split('.')[1])
with open(filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
return filename, file.name.split('.')[1]
@csrf_exempt
def apply_rule(request):
from appbe import labelling_utils as utils
rule_list = json.loads(request.POST['rule_list'])
file_ext = request.POST.get("file_ext")
label_list = json.loads(request.POST['label_list'])
not_satisfy_label = request.POST.get("non_satisfied_label")
response = utils.label_dataset(rule_list, file_ext, label_list, not_satisfy_label)
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def get_sample_result_of_individual_rule(request):
from appbe import labelling_utils as utils
rule_json = json.loads(request.POST['rule_json'])
file_ext = request.POST.get("file_ext")
label_list = json.loads(request.POST['label_list'])
not_satisfy_label = request.POST.get("non_satisfied_label")
print("rule_json>>>", rule_json)
print("file_ext>>>", file_ext)
print("label_list>>>>", label_list)
print("not_satisfied_label", not_satisfy_label)
response = utils.get_sample_result_of_individual_rule(rule_json, file_ext, label_list, not_satisfy_label)
return HttpResponse(json.dumps(response), content_type="application/json")
def download_result_dataset(request):
#file_name = request.GET.get("filename")
file_name = request.session['AION_labelled_Dataset']
file_path = os.path.join(DATA_FILE_PATH, file_name)
is_exist = os.path.exists(file_path)
if is_exist:
with open(file_path, "rb") as file:
response = HttpResponse(file, content_type="application/force-download")
response["Content-Disposition"] = "attachment; filename=%s" % file_name
return response
else:
return HttpResponse(json.dumps("file not found"), content_type="application/error")
@csrf_exempt
def get_sample_result_of_individual_rule_ver2(request):
from appbe import labelling_utils as utils
rule_json = json.loads(request.POST['rule_json'])
file_ext = request.POST.get("file_ext")
label_list = json.loads(request.POST['label_list'])
not_satisfy_label = request.POST.get("non_satisfied_label")
response = utils.get_sample_result_of_individual_rule_ver2(rule_json, file_ext, label_list, not_satisfy_label)
return HttpResponse(json.dumps(response), content_type="application/json")
def get_label_list(label_json):
label_list = []
label_weightage = []
for item in label_json:
label_list.append(item["label_name"])
if item["label_weightage"] != "":
weightage_perc = float(item["label_weightage"]) / 100
label_weightage.append(np.around(weightage_perc, 2))
else:
label_weightage.append(100 / len(label_json))
return label_list, label_weightage
@csrf_exempt
def apply_rule_ver2(request):
from appbe import labelling_utils as utils
rule_list = json.loads(request.POST['rule_list'])
file_ext = request.POST.get("file_ext")
label_json = json.loads(request.POST['label_list'])
label_list, label_weightage = get_label_list(label_json)
not_satisfy_label = request.POST.get("non_satisfied_label")
include_proba = request.POST.get("is_include_proba") == 'true'
response = utils.label_dataset_ver2(request,rule_list, file_ext, label_list, not_satisfy_label, label_weightage,
include_proba)
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def upload_and_read_test_data(request):
file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'], test_dataset=True)
# file_path, file_ext = handle_uploaded_file(path=DATA_FILE_PATH, file=request.FILES['uploaded_file'])
file_delim_test = request.POST.get("file_delim_test")
size_take = 100
if file_ext in ["csv", "tsv"]:
num_records = sum(1 for line in open(file_path)) - 1
num_rows = num_records
if num_records > size_take:
skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take))
else:
skip = 0
with open(file_path, 'r') as file:
data = file.readline(10)
from detect_delimiter import detect
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t', ' '])
if file_delim_test == "custom" and request.POST["custom_test_delim"] != "":
row_delimiter = request.POST["custom_test_delim"]
file_content = pd.read_csv(file_path, sep=row_delimiter, quotechar="'", escapechar="/", engine='python',skiprows=skip,encoding='utf-8-sig',skipinitialspace = True)
elif file_path.endswith(".json"):
file_content_df = pd.read_json(file_path)
file_content = pd.json_normalize(file_content_df.to_dict("records"))
num_rows = len(file_content)
elif file_path.endswith(".avro"):
import pandavro as pdx
from avro.datafile import DataFileReader
from avro.io import DatumReader
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
file_content = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
num_rows = len(file_content)
elif file_path.endswith(".parquet"):
from pyarrow.parquet import ParquetFile
import pyarrow as pa
import pyarrow.parquet as pq
pf = ParquetFile(file_path)
take_rows = next(pf.iter_batches(batch_size=size_take))
file_content = pa.Table.from_batches([take_rows]).to_pandas()
table = pq.read_table(file_path, columns=[])
num_rows = table.num_rows
# file_content = pd.read_parquet(file_path, engine="pyarrow")
else:
raise ValueError("Invalid file format")
response = {}
column_list = []
for key, val in dict(file_content.dtypes).items():
if str(val) == 'object':
try:
pd.to_datetime(file_content[str(key)])
column_list.append({"column_name": str(key), 'data_type': 'datetime64'})
except ValueError:
column_list.append({"column_name": str(key), 'data_type': 'string'})
pass
else:
column_list.append({"column_name": str(key), 'data_type': str(val)})
response["column_list"] = column_list
response["data_html"] = file_content.to_html(classes='table table-striped text-left',table_id='testdata', justify='left', index=False)
response["record_count"] = num_rows
response["file_ext"] = file_ext
response["file_delim_test"] = file_delim_test
response["custom_test_delim"] = request.POST["custom_test_delim"]
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def get_label_and_weightage(request):
from appbe import labelling_utils as utils
test_file_ext = request.POST.get("test_file_ext")
file_delim_test = request.POST.get("file_delim_test")
marked_label_column = request.POST.get("marked_label_column")
custom_test_delim = request.POST.get("custom_test_delim")
label_list_with_weightage = utils.get_label_and_weightage(test_file_ext, marked_label_column, file_delim_test, custom_test_delim)
return HttpResponse(json.dumps(label_list_with_weightage), content_type="application/json")
def modelcompare(request):
deploypath = request.GET.get('DeployLocation')
filepath = os.path.join(deploypath,'etc','output.json')
with open(filepath) as file:
config = json.load(file)
file.close()
# training/testing data needs to be updated as below once it is available in deployment folder
#trainingDataPath = os.path.join(deploypath,'data','trainData.csv')
#testingDataPath = os.path.join(deploypath,'data','testData.csv')
trainingDataPath = os.path.join(deploypath,'data','postprocesseddata.csv.gz')
testingDataPath = os.path.join(deploypath,'data','postprocesseddata.csv.gz')
featureUsedInTraining=config['data']['featuresused']
targetFeature= config['data']['targetFeature']
scoringCriteria=config['data']['ScoreType']
scoringCriteria=scoringCriteria.lower()
problemType=config['data']['ModelType']
problemType=problemType.lower()
tempFeatureUsedInTraining = featureUsedInTraining.split(',')
finalFeatures=[]
for i in range (len(tempFeatureUsedInTraining)) :
tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace('[', '')
tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[i].replace(']', '')
tempFeatureUsedInTraining[i]=tempFeatureUsedInTraining[ |
i].replace("'", '')
tempFeatureUsedInTraining[i] = tempFeatureUsedInTraining[i].lstrip()
tempFeatureUsedInTraining[i] = tempFeatureUsedInTraining[i].rstrip()
finalFeatures.append(tempFeatureUsedInTraining[i])
featureUsedInTraining = finalFeatures
#print("trainingDataPath----",trainingDataPath)
#print("testingDataPath----",testingDataPath)
#print("problemType----",problemType)
#print("scoringCriteria----",scoringCriteria)
#print("featureUsedInTraining----",featureUsedInTraining,type(featureUsedInTraining))
#print("targetFeature----",targetFeature)
if problemType == 'classification':
try:
df1 = pd.read_csv(trainingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
df2 = pd.read_csv(testingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
trainX=df1[featureUsedInTraining]
trainY=df1[targetFeature]
testX=df2[featureUsedInTraining]
testY=df2[targetFeature].to_numpy()
from sklearn import linear_model
estimator = linear_model.LogisticRegression()
estimator.fit(trainX, trainY)
predictedData = estimator.predict(testX)
from learner.aion_matrix import aion_matrix
scoring = aion_matrix()
score = scoring.get_score(scoringCriteria, testY, predictedData)
context = {'Model': 'Logistic regression','Testing Score': score, 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
except Exception as e:
print("exception "+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
context = {'Model': 'Logistic regression','Testing Score': "Exception Occured", 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
if problemType == 'regression':
try:
df1 = pd.read_csv(trainingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
df2 = pd.read_csv(testingDataPath,encoding='utf-8',skipinitialspace = True,compression='gzip')
trainX=df1[featureUsedInTraining]
trainY=df1[targetFeature]
testX=df2[featureUsedInTraining]
testY=df2[targetFeature].to_numpy()
from sklearn import linear_model
estimator = linear_model.LinearRegression()
estimator.fit(trainX, trainY)
predictedData = estimator.predict(testX)
from learner.aion_matrix import aion_matrix
scoring = aion_matrix()
score = scoring.get_score(scoringCriteria, testY, predictedData)
context = {'Model': 'Linear regression','Testing Score': score, 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
except Exception as e:
print("exception")
context = {'Model': 'Linear regression','Testing Score': "Exception Occured", 'Confidence Score': "Not supported", 'Feature Engineering Method': "ModelBased"}
return HttpResponse(json.dumps(context), content_type="application/json")
def textsummarization(request):
return render(request, "textsummarization.html",context={'version':AION_VERSION,'selected': 'textsummarization'})
# LLM Testing Task ID 14533
def validate_llm(prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample):
default = {'temperature':{'default':0.9,'lower':0.0,'upper':1.0},'similarity_threshold':{'default':0.75,'lower':0.0,'upper':1.0},'perturbations_per_sample':5}
if not isinstance( prompts, (list,str)):
raise ValueError(f"Prompt should be of type str, got '{prompt}' of type {type(prompt)}")
elif prompts == '':
raise ValueError("Prompt field can not be empty")
if not isinstance( reference_generation, str):
raise ValueError(f"Reference Generated Answer should be of type str, got '{reference_generation}' of type {type(reference_generation)}")
# elif reference_generation == '':
# raise ValueError("Reference Generation field can not be empty")
if not isinstance( temperature, float) or temperature < default['temperature']['lower'] or temperature > default['temperature']['upper']:
if isinstance( temperature, str) and temperature == '':
temperature = default['temperature']['default']
else:
raise ValueError(f"Model Parameter Temperature should be of type float with range {default['temperature']['lower']} - {default['temperature']['upper']}, got {temperature} of type {type(temperature)}")
if not isinstance( similarity_threshold, float) or similarity_threshold < default['similarity_threshold']['lower'] or similarity_threshold > default['similarity_threshold']['upper']:
if isinstance( similarity_threshold, str) and similarity_threshold == '':
similarity_threshold = default['similarity_threshold']['default']
else:
raise ValueError(f"Similarity Threshold should be of type float with range {default['similarity_threshold']['lower']} - {default['similarity_threshold']['upper']}, got {similarity_threshold} of type {type(similarity_threshold)}")
if not isinstance( perturbations_per_sample, int):
if isinstance( perturbations_per_sample, str) and perturbations_per_sample == '':
perturbations_per_sample = default['perturbations_per_sample']
else:
raise ValueError(f"Perturbations Per Sample should be of type integer, got {perturbations_per_sample} of type {type(perturbations_per_sample)}")
return prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample
def llmtesting(request):
ftmodels = []
usecase = usecasedetails.objects.all().order_by('-id')
for x in usecase:
#print(x.id)
models = Existusecases.objects.filter(Status='SUCCESS',ModelName=x.id).order_by('-id')
if len(models) > 0:
for model in models:
#print(str(model.ConfigPath))
version = model.Version
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc','output.json')
with open(modelPath) as file:
outputconfig = json.load(file)
problemType = outputconfig['data']['ModelType']
if problemType.lower() == 'llm fine-tuning':
from appbe.models import get_instance
hypervisor,instanceid,region,image,status = get_instance(x.usecaseid+ '_' + str(version))
with open(str(model.ConfigPath)) as file:
configSettingsJson = json.load(file)
file.close()
from appbe.pages import getMLModels
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
ft = mlmodels+'-'+smodelsize+'-'+x.usecaseid+'_'+str(version)
finetunedModel = {}
finetunedModel['ft']=ft
finetunedModel['basemodel'] = mlmodels+'-'+smodelsize
finetunedModel['usecaseid'] = x.usecaseid+'_'+str(version)
ftmodels.append(finetunedModel)
return render(request, "llmtesting.html",context={'version':AION_VERSION,'selected': 'llmtesting','ftmodels':ftmodels})
# LLM Testing Result Task ID 14533
def llmtestingresult(request):
try:
context = {'result':result,'provider':provider,'tabledata':tabledata,'summary':summary,'modelName':modelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompt,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'version':AION_VERSION,'selected': 'llmtestingresults','success':'success'}
return render(request, "llmtestingresults.html",context)
except Exception as e:
print(e)
context = {'error': 'Fail to Generate LLM Testing Report '+str(e),'version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'}
return render(request, "llmtestingresults.html",context)
# LLM Testing Result Task ID 14533
def llmtestingresult(request):
try:
generate_test = request.POST['prompt_temp']
if generate_test == "generatetest":
UseCaseName = request.POST['selectusecase']
ModelName = request.POST['selectmodel']
temperature = request.POST['modelparam']
similarity_threshold = request.POST['similarity_threshold']
perturbations_per_sample = request.POST['perturbations_per_sample']
selecttype = request.POST['selectquestion']
reference_generation = (request.POST['reference_generation'])
baseModel = request.POST['basemodel']
from appbe.llmTesting import test_LLM
if selecttype == "Single":
prompts = request.POST['prompt']
else:
data_file = request.POST['dataFilePath']#Task 16794
file_name = os.path.splitext(data_file)[0]
file_extension = os.path.splitext(data_file)[-1].lower()
if file_extension != ".csv":
questions = []
answers = []
if file_extension == ".pdf":
with pdfplumber.open(data_file) as pdf:
for page in pdf.pages:
text = page.extract_text()
lines = text.split("\\n")
current_question = ""
current_answer = ""
reading_question = False
for line in lines:
line = line.strip()
if line.endswith("?"):
if reading_question:
questions.append(current_question)
answers.append(current_answer)
current_question = ""
current_answer = ""
current_question = line
reading_question = True
elif reading_question:
current_answer += " " + line
if reading_question:
questions.append(current_question)
answers.append(current_answer)
elif file_extension == ".docx":
doc = Document(data_file)
current_question = ""
current_answer = ""
reading_question = False
for paragraph in doc.paragraphs:
text = paragraph.text.strip()
if text.endswith("?"):
if reading_question:
questions.append(current_question)
answers.append(current_answer)
current_question = ""
current_answer = ""
current_question = text
reading_question = True
elif reading_question:
current_answer += " "+ text
if reading_question:
questions.append(current_question)
answers.append(current_answer)
else:
print("unsupported file format. please provide a pdf or docx file.")
faq = pd.DataFrame({'Question':questions, 'Answers':answers})
# print(faq)
data_file_csv = file_name+".csv"
faq.to_csv(data_file_csv, index=False, encoding='utf-8')
else:
faq = pd.read_csv(data_file,encoding='cp1252')
rows = faq.shape[0]
prompts = list(faq['Question'])
try:
temperature = float( temperature)
similarity_threshold = float(similarity_threshold)
perturbations_per_sample = int( perturbations_per_sample)
except:
pass
prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample = validate_llm(prompts, reference_generation,temperature, similarity_threshold, perturbations_per_sample)
from appbe.aion_config import get_llm_data
llm_key,llm_url,api_type,api_version=get_llm_data()
urls = {
'OPENAI_API_BASE' : llm_url,
'OPENAI_API_KEY' : llm_key,
'OPENAI_API_TYPE' :api_type,
'OPENAI_API_VERSION':api_version
}
llm_obj = test_LLM()
llm_obj.set_params(urls)
if selecttype == "Single":
print(UseCaseName,ModelName)
if ModelName.lower() == 'basemodel':
result = llm_obj.run_offline_model( UseCaseName,baseModel,temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,False )
llmModelName = baseModel
else:
result = llm_obj.run_offline_model( UseCaseName,ModelName,temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,True )
llmModelName = ModelName+'-'+UseCaseName
print(result)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'llmreport_' + filetimestamp+'.html')
result = result.split("LLMTestingResultOutput:")[-1]
output = json.loads(result)
with open(dataFile,'w') as htmlfile:
htmlfile.write(output['data']['html_file'])
request.session['llmtestreport'] = str(dataFile)
# provider = result.generation_kwargs['Provider']
provider = ""
# metric_name = list(result.metric[0].keys())[0]
metric_name = output['data']['metric_name']
# metric_values = output['data']['metric_values']
metric_values = eval(output['data']['metric_values'])
passed_tests = output['data']['passed_tests']
total_tests = output['data']['total_tests']
summary = f'{passed_tests}/{total_tests}'
tabledata = {}
prompts = output['data']['prompts']
generations= output['data']['generations']
Generations = []
for sub in generations:
Generations.append(sub.replace("\\ |
n", ""))
metricvalues = metric_values
text = [eval(x) for x in generations]
gen = [x[0]['generated_text'].split('\\n')[1:] for x in text]
Generations = [' '.join(x) for x in gen]
resultoutput = eval(output['data']['resultoutput'])[0]
for index,val in enumerate(Generations):
Generations[index]= Generations[index].strip()
if len(Generations[index])<=2:
metricvalues[index] = 0
resultoutput[index] = 0
tabledata = zip(prompts,Generations,metricvalues,resultoutput)
context = {'result':result,'provider':provider,'tabledata':tabledata,'summary':summary,'modelName':llmModelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompts,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'single':'single','version':AION_VERSION,'selected': 'llmtestingresults','success':'success'}
# context = {'result':result,'provider':"provider",'tabledata':"tabledata",'summary':"summary",'modelName':modelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'prompt':prompts,'reference_generation':reference_generation,'perturbations_per_sample':perturbations_per_sample,'single':'single','version':AION_VERSION,'selected': 'llmtestingresults','success':'success'}
else:
if ModelName.lower() == 'basemodel':
result_str =llm_obj.run_multiple_offline_model(UseCaseName,baseModel,temperature, similarity_threshold, perturbations_per_sample,faq,False)
llmModelName = baseModel
else:
result_str =llm_obj.run_multiple_offline_model(UseCaseName,ModelName,temperature, similarity_threshold, perturbations_per_sample,faq,True)
llmModelName = ModelName+'-'+UseCaseName
result_str = result_str.split("LLMTestingResultOutput:")[-1]
output = json.loads(result_str)
# result will be df converted from output['data']
result = pd.DataFrame(json.loads(output['data']))
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'llmreport_' + filetimestamp+'.csv')
request.session['llmtestreport'] = str(dataFile)
result.rename(columns={'Perturbed Prompts':'PerturbedPrompts','Similarity [Generations]':'Similarity'},inplace=True)
result_df = result.head(5)
result.to_csv(dataFile, index=False)
context={'result':result_df,'modelName':llmModelName,'temperature':temperature,'similarity_threshold':similarity_threshold,'perturbations_per_sample':perturbations_per_sample,'selected': 'llmtestingresults','multiple':'multiple','success':'success'}
return render(request, "llmtestingresults.html",context)
if generate_test == "download_prompt":
csvdata= os.path.join(DEFAULT_FILE_PATH,"Prompt_template.csv")
if os.path.isfile(csvdata) and os.path.exists(csvdata):
df = pd.read_csv(csvdata,encoding='utf8')
downloadFileName = 'llmreport.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+downloadFileName
df.to_csv(response, index=False)
return response
else:
context = {'error': 'Fail to Download File','version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'}
return render(request, "llmtestingresults.html",context)
except Exception as e:
print(e)
errormsg = str(e)
if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg:
errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.'
if 'Max retries exceeded with url' in errormsg:
errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.'
context = {'error':errormsg,'version':AION_VERSION,'selected': 'llmtestingresults','fail':'fail'}
return render(request, "llmtestingresults.html",context)
def llmtestreport(request):
file_path = request.session['llmtestreport']
# file_path = "C:\\AION\\To_Kiran\\To_Kiran\\codeCloneReport\\code_clone_report.txt"
report_path = os.path.join(file_path)
if os.path.exists(report_path):
with open(report_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(report_path)
return response
else:
return render(request, "llmtestingresults.html",context={"error":"Fail To Download File",'version':AION_VERSION,'result':'result','selected': 'llmtestingresults'})
### To display libraries in UI ####
def libraries(request):
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.normpath(os.path.join(current_dir,'..','..','lic',"requirement.csv"))
library_data = []
with open(file_path, 'r') as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
library_info = {
"library" :row["Library"] if row.get("Library") else "none",
"version" :row["Version"] if row.get("Version") else "none",
"license" :row["License"] if row.get("License") else "none"
}
library_data.append(library_info)
# print(library_data)
return render(request, "libraries.html", context={"data":library_data,'version':AION_VERSION,'selected': 'libraries'})
# For Code Clone Detection
def codeclonedetectionresult(request):
from appbe.codeclonedetection import CodeCloneDetectionFiles
try:
codecloneconfig = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','code_clone_config.json')
f = open(codecloneconfig, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
rootdir = request.POST.get('rootdirectory')
ccdmode = request.POST.get('ccdmode')
if(os.path.isdir(rootdir)):
llm_key,llm_url,api_type,api_version = get_llm_data()
openai_apiKey = llm_key
openai_baseUrl = llm_url
try:
openai_apiType = api_type
openai_apiVersion = api_version
except:
openai_apiType = configSettings['openaiApiType']
openai_apiVersion = configSettings['openaiApiVersion']
openai_embeddingEngine = configSettings['codeCloneDetection']['openaiEmbeddingEngine']
openai_embeddingModel = configSettings['codeCloneDetection']['openaiEmbeddingModel']
openai_chatModel = configSettings['codeCloneDetection']['openaiChatModel']
openai_deploymentId = configSettings['codeCloneDetection']['openaiDeploymentId']
rootDirFilesType = configSettings['codeCloneDetection']['rootDirFilesType']
else:
return render(request, "codeclone.html",context={"codeclonedetectionerror":"Please provide valid root directory file path.",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
filetimestamp = str(int(time.time()))
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'code_clone_config_' + filetimestamp + '.json')
updatedConfigSettings = json.dumps(configSettings)
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(updatedConfigSettings)
fpWrite.close()
from appbe.dataPath import DEPLOY_LOCATION
codeclonedir_path = os.path.join(DEPLOY_LOCATION,('codeCloneDetection_'+filetimestamp))
os.makedirs(codeclonedir_path,exist_ok=True)
request.session['clonereport'] = str(codeclonedir_path)
try:
if (rootDirFilesType.lower() == "python" and ccdmode.lower() == "openai"):
cdobj = CodeCloneDetectionFiles(rootdir,openai_baseUrl, openai_apiKey,openai_apiType,openai_apiVersion,codeclonedir_path,openai_embeddingEngine,openai_embeddingModel,openai_chatModel,openai_deploymentId)
report_str,report_dict,report_df,report_json = cdobj.getCloneReport()
clonetype = report_dict['Code_clones_count_by_clone_type'].to_dict()
for i in clonetype:
clonevalues = clonetype[i].values()
clonekeys = clonetype[i].keys()
clonetype = zip(clonekeys,clonevalues)
return render(request, "codeclonedetectionresult.html",context={'report_json':json.loads(report_json),'report_dict':report_dict,'clonetype':clonetype,'clonefunctions':report_dict['clone_functions'],'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult','openai':'openai'})
## Pls uncomment below code if you need to use sklearn based code clone detection.
# elif (ccdmode.lower() =="sklearn"):
# from appbe.codeclonedetection_sklearn import codeCloneDetectionSklearn
# chunk_size = 10000
# cdobj = codeCloneDetectionSklearn(rootdir,codeclonedir_path,chunk_size)
# report_dict = cdobj.get_clone()
# return render(request, "codeclonedetectionresult.html",context={'report_dict':report_dict,'function_df':report_dict['result_df'],'function_dict':report_dict['result_df'].to_dict(),'sklearn':'sklearn'})
else:
raise Exception ("Invalid clonedetection input.")
return render(request, "codeclone.html",context={"codeclonedetectionerror":"Python Files Are Only Supported."})
except Exception as e:
return render(request, "codeclone.html",context={"codeclonedetectionerror":"OpenAI Model Connection Error",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
except Exception as e:
print('code clone detection interface issue.Error message: ',e)
return render(request, "codeclone.html",context={"codeclonedetectionerror":"OpenAI Model Connection Error",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
def codeclonereport(request):
file_path = request.session['clonereport']
report_path = os.path.join(file_path, 'codeCloneReport','code_clone_report.txt')
if os.path.exists(report_path):
with open(report_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(report_path)
return response
else:
return render(request, "codeclonedetectionresult.html",context={"codeclonedetectionerror":"Fail To Download File",'version':AION_VERSION,'result':'result','selected': 'codeclonedetectionresult'})
def evaluatepromptmetrics(request):
""" Evaluate prompt only information for LLM Evaluation."""
import whylogs as why
from langkit import light_metrics
from whylogs.experimental.core.udf_schema import udf_schema
from whylogs.experimental.core.udf_schema import register_dataset_udf
from langkit import lang_config, response_column
import json
prompt_msg = request.GET.get('instruction')
text_schema = udf_schema()
llm_schema = light_metrics.init()
df = pd.DataFrame({
"prompt": [
prompt_msg
]})
results = why.log(df, schema=udf_schema()) # .profile()
view = results.view()
# import pdb
# pdb.set_trace()
from appbe.evaluate_prompt import evaluate_prompt_metrics
final_output_json,prompt_results = evaluate_prompt_metrics(prompt_msg)
prompt_results_json = json.dumps(prompt_results, indent=4)
# return prompt_results_json,prompt_results
return HttpResponse(final_output_json)
<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from appbe.pages import getversion
AION_VERSION = getversion()
def datagenrate(request):
from appbe.aion_config import settings
usecasetab = settings()
context = {'selected':'DataOperations','usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, "datagenrate.html",context)
def generateconfig(request):
from appbe import generate_json_config as gjc
try:
gjc.generate_json_config(request)
return render(request, "datagenrate.html",context={'success':'success','selected':'DataOperations'})
except Exception as e:
print(e)
return render(request, "datagenrate.html",context={'error':str(e),'selected':'DataOperations'})<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
from appbe.aion_config import getrunningstatus
import time
def computetoGCPLLaMA13B(request):
from appbe import compute
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings('GCP')
time.sleep(2)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def computetoLLaMMA7b |
(request):
from appbe import compute
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings('AWS')
time.sleep(2)
#print(1)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def computetoAWS(request):
from appbe import compute
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings('AWS')
time.sleep(2)
#print(1)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def setting_context(request):
from appbe.aion_config import get_graviton_data
from appbe.aion_config import get_edafeatures
from appbe.aion_config import get_telemetryoptout
from appbe.aion_config import get_llm_data
from appbe.aion_config import running_setting
from appbe import compute
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
graviton_url, graviton_userid = get_graviton_data()
No_of_Permissible_Features_EDA = get_edafeatures()
telemetryoptout = get_telemetryoptout()
llm_key,llm_url,api_type,api_version =get_llm_data()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
try:
context = {'computeinfrastructure':computeinfrastructure,'graviton_url':graviton_url,'graviton_userid':graviton_userid,'FeaturesEDA':No_of_Permissible_Features_EDA,'llm_key':llm_key,'llm_url':llm_url,'ruuningSetting':ruuningSetting,'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'api_type':api_type,'api_version':api_version,'telemetryoptout':telemetryoptout,
'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion':ModelVersion,'usecasetab':usecasetab,'azurestorage':get_azureStorage()}
context['version'] = AION_VERSION
return context
except Exception as e:
print(e)
context = {'computeinfrastructure':computeinfrastructure,'error':'Error in Settings'}
context['version'] = AION_VERSION
return context
def startKafka(request):
try:
nooftasks = getrunningstatus('AION_Consumer')
if len(nooftasks):
status = 'AION Kafka Consumer Already Running'
else:
import subprocess
kafkapath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','sbin','AION_Consumer.bat'))
#subprocess.Popen(kafkapath, shell=True)
os.system('start cmd /c "'+kafkapath+'"')
#addKafkaModel(request,request.session['datalocation'])
status = 'Kafka Consumer Initiated Successfully'
context = settings(request)
context['status'] = status
return render(request, 'settings_page.html', context)
except:
return render(request, 'settings_page.html', {'error':'Fail to start Kafka'})
def startPublishServices(request):
from appbe.models import startServices
startServices(request,usecasedetails,Existusecases)
status = 'Publish services start successfully'
context = setting_context(request)
context['status'] = status
return render(request, 'settings_page.html', context)
def saveopenaiconfig(request):
from appbe.aion_config import saveopenaisettings
try:
saveopenaisettings(request)
context = setting_context(request)
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html', context)
except:
context = {'error': 'error', 'runtimeerror': 'runtimeerror'}
return render(request, 'settings_page.html', context)
def savegravitonconfig(request):
from appbe.aion_config import savegravitonconfig
try:
savegravitonconfig(request)
context = setting_context(request)
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html', context)
except:
context={'error':'error','runtimeerror':'runtimeerror'}
return render(request, 'settings_page.html',context)
def saveaionconfig(request):
from appbe.aion_config import saveconfigfile
try:
saveconfigfile(request)
context = setting_context(request)
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html', context)
except:
context={'error':'error','runtimeerror':'runtimeerror'}
return render(request, 'settings_page.html',context)
def settings_page(request):
try:
context = setting_context(request)
context['version'] = AION_VERSION
context['selected'] = 'Settings'
return render(request, 'settings_page.html', context)
except:
return render(request, 'settings_page.html', {'error':'Please enter valid inputs','version':AION_VERSION})
def adds3bucket(request):
try:
if request.method == 'POST':
from appbe.s3bucketsDB import add_new_s3bucket
status = add_new_s3bucket(request)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Some values are missing','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
if status == 'error1':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Add S3bucket'})
def GCSbucketAdd(request):
try:
if request.method == 'POST':
from appbe.gcsbucketsDB import add_new_GCSBucket
status = add_new_GCSBucket(request)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Some values are missing','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
if status == 'error1':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except Exception as e:
print(e)
return render(request, 'settings_page.html',{'error': 'Fail to Add GCSbucket','version':AION_VERSION})
def azurestorageAdd(request):
try:
if request.method == 'POST':
from appbe.azureStorageDB import add_new_azureStorage
status = add_new_azureStorage(request)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Some values are missing','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
if status == 'error1':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Bucket with same name already exist','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Add Azure Container'})
def removeazurebucket(request,name):
try:
if request.method == 'GET':
from appbe.azureStorageDB import remove_azure_bucket
status = remove_azure_bucket(name)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Failed to delete Azure Bucket','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Failed to delete Azure Bucket'})
def removes3bucket(request,name):
try:
if request.method == 'GET':
from appbe.s3bucketsDB import remove_s3_bucket
status = remove_s3_bucket(name)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Failed to delete S3bucket','s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Failed to delete S3bucket'})
def removegcsbucket(request,name):
try:
if request.method == 'GET':
from appbe.gcsbucketsDB import remove_gcs_bucket
status = remove_gcs_bucket(name)
context = setting_context(request)
context['version'] = AION_VERSION
if status == 'error':
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = {'error':'Failed to delete GCS Bucket','gcsbuckets':get_gcs_bucket(),'s3buckets':get_s3_bucket(),'azurestorage':get_azureStorage(),'version':AION_VERSION}
return render(request,'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Failed to delete GCS Bucket'})
def gcpcomputesettings(request):
try:
from appbe import compute
status = compute.updateGCPConfig(request)
context = setting_context(request)
if status == 'error':
context['ErrorMsg'] = 'Some values are missing'
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Save GCP Settings','version':AION_VERSION})
def amazonec2settings(request):
try:
from appbe import compute
status = compute.updateComputeConfig(request)
context = setting_context(request)
if status == 'error':
context['ErrorMsg'] = 'Some values are missing'
context['version'] = AION_VERSION
context['success'] = True
return render(request, 'settings_page.html',context)
except:
return render(request, 'settings_page.html',{'error': 'Fail to Save AWS Settings','version':AION_VERSION})<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getusercasestatus
import os
import plotly.graph_objects as go
import time
import sys
from pathlib import Path
import csv
import pandas as pd
import numpy as np
from appbe.pages import getversion
AION_VERSION = getversion()
def uploadedData(request):
from appbe.dataIngestion import ingestDataFromFile
context = ingestDataFromFile(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
context['FeturesEDA'] = No_of_Permissible_Features_EDA
return render(request, 'upload.html', context)
def uploaddatafromscript(request):
from appbe.aion_config import |
settings
usecasetab = settings()
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
try:
scriptPath = request.POST.get('pythonscriptPath')
if(os.path.isfile(scriptPath) == False ):
context = {'tab': 'upload', 'error': 'File does not exist', 'selected': 'modeltraning','clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
if(scriptPath != ''):
try:
f = open(scriptPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
except Exception as e:
context = {'tab': 'upload', 'error': 'Error in script execution i.e., '+str(e), 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
if 'dfpy' not in ldict:
context = {'tab': 'upload', 'error': 'dfpy dataset not found', 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
raw_data = ''
if 'df_aion_raw' in ldict:
df_raw = ldict['df_aion_raw']
raw_data = df_raw.to_json(orient="records")
raw_data = json.loads(raw_data)
df = ldict['dfpy']
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
context = {'tab': 'tabconfigure','FeturesEDA':No_of_Permissible_Features_EDA,'computeinfrastructure':computeinfrastructure,'raw_data':raw_data,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False,'usecasetab':usecasetab}
return render(request, 'upload.html', context)
else:
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure, 'error': 'Please enter script path', 'selected': 'modeltraning','usecasetab':usecasetab,'clusteringModels':clusteringModels,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'upload.html', context)
except:
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
return render(request, 'upload.html', {'tab': 'upload','clusteringModels':clusteringModels,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'error':'Fail to upload data from script','selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion})
def listfiles(request):
from appbe.labels import label_filename
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
path = request.POST.get('scriptPath')
print(path)
dirstatus = os.path.isdir(path)
import glob
try:
if(path != '' and dirstatus == True):
radiovalue = request.POST.get('filetype')
# create csv
filetimestamp = str(int(time.time()))
header = ['File', 'Label']
filename = 'AION_List_' + selected_use_case + '.csv'
dataFile = os.path.join(DATA_FILE_PATH, filename)
csvfilename = 'AION_List_' + filetimestamp
request.session['csvfilename'] = dataFile
request.session['datalocation'] = path
type = 'NA'
request.session['fileExtension'] = radiovalue
if radiovalue in ['avi', 'wmv', 'mp4']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Video'
type = 'LLM_Video'
else:
request.session['datatype'] = 'Video'
type = 'Video'
elif radiovalue in ['jpeg', 'png', 'bmp']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Image'
type = 'LLM_Image'
else:
request.session['datatype'] = 'Image'
type = 'Image'
elif radiovalue in ['txt', 'log', 'pdf','docs','docx','doc']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Document'
type = 'LLM_Document'
else:
request.session['datatype'] = 'Document'
type = 'Document'
elif radiovalue in ['java','py']:
if request.POST.get('computeInfrastructure') in ['AWS','GCP']:
request.session['datatype'] = 'LLM_Code'
type = 'LLM_Code'
else:
request.session['datatype'] = 'Code'
type = 'Document'
if type == 'NA':
context = {'tab': 'upload', 'error': 'Please select the type', 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'version':AION_VERSION, 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'upload.html', context)
request.session['folderPath'] = path
request.session['csvfullpath'] = dataFile
file = open(dataFile, 'w', newline='')
writer = csv.DictWriter(file, fieldnames=header)
# writing data row-wise into the csv file
writer.writeheader()
#os.chdir(path)
tifCounter = 0
if radiovalue == 'doc':
tifCounter = len(glob.glob(os.path.join(path,"**/*."+'doc'),recursive=True))
tifCounter = tifCounter+len(glob.glob(os.path.join(path,"**/*."+'docx'),recursive=True) )
else:
tifCounter = len(glob.glob(os.path.join(path, "**/*." + radiovalue), recursive=True))
if radiovalue == 'jpeg':
tifCounter += len(glob.glob1(path,"*.jpg"))
labelfileexists = False
dflabels = pd.DataFrame()
if type == 'Image':
labelfilename = label_filename(request)
labelfileexists = os.path.isfile(labelfilename)
if labelfileexists == True:
dflabels = pd.read_csv(labelfilename)
if len(dflabels) == 0:
labelfileexists = False
else:
dflabels = dflabels.head(5)
if tifCounter == 0:
context = {'tab': 'upload', 'error': 'No files in the folder with selected file type', 'selected': 'modeltraning','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
return render(request, 'upload.html', context)
filesCount = 0
filesSize = 0
files=[]
for filename in glob.iglob(os.path.join(path, "**/*." + radiovalue), recursive=True):
files.append(filename)
if radiovalue == 'doc':
for filename in glob.iglob(os.path.join(path, "**/*." + 'docx'), recursive=True):
files.append(filename)
for filename in files:
filesCount = filesCount+1
writer.writerow({'File': filename, 'Label': ''})
get_size = os.path.getsize(filename)
filesSize = round(filesSize + get_size, 1)
if filesSize > 1048576:
size = round((filesSize / (1024 * 1024)), 1)
filesSize = str(size) + ' M'
elif filesSize > 1024:
size = round((filesSize /1024), 1)
filesSize = str(size) + ' K'
else:
filesSize = str(filesSize) + ' B'
files = pd.DataFrame(files,columns=['File'])
files.index = range(1, len(files) + 1)
files.reset_index(level=0, inplace=True)
files = files.to_json(orient="records")
files = json.loads(files)
if radiovalue == 'jpeg':
for filename in glob.iglob(os.path.join(path,"**/*.jpg"), recursive=True):
writer.writerow({'File': filename, 'Label': ''})
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
#filesSize = str(filesSize)+' M'
print(filesSize)
print(filesCount)
context = {'tab': 'upload','files':files,'filesCount':filesCount,'filesSize':filesSize,'filelist':dataFile,'finalstate':0, 'file': dataFile,'FeturesEDA':No_of_Permissible_Features_EDA, 'csvfilename': csvfilename,'type':type,'csvgenerated': True,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION,"selectedfile":radiovalue,"selectedPath":path}
return render(request, 'upload.html', context)
else:
context = {'tab': 'upload', 'error': 'Error: Folder path either not entered or does not exists.', 'modeltraning': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION,"selectedfile":radiovalue,"selectedPath":path}
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'tab': 'upload','error':'Folder path is mandatory','version':AION_VERSION,'computeinfrastructure':computeinfrastructure, 'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion})
def validatecsv(request):
from appbe.aion_config import settings
usecasetab = settings()
from appbe import exploratory_Analysis as ea
from appbe.labels import label_filename
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
#print(request.POST.get('validatesubmit'))
if request.POST.get('validatesubmit') == 'ObjectDetection':
df = pd.read_csv(request.session['csvfullpath'])
dataFile = label_filename(request)
request.session['LabelFileName'] = dataFile
request.session['currentIndex'] = 0
request.session['endIndex'] = len(df)-1
not_end = not(request.session['currentIndex'] == request.session['endIndex'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
request.session['labels'] = []
if os.path.isfile(dataFile):
image = df["File"].iloc[request.session['currentIndex']]
with open(dataFile, 'r') as file:
reader = csv.reader(file)
for row in reader:
if row[0] == image:
labels = request.session['labels']
labels.append({"id":row[1], "name":row[9], "xMin": row[3], "xMax":row[4], "yMin":row[5], "yMax":row[6], "height":row[7],"width":row[8], "angle":row[2]})
request.session['labels'] = labels
labels = request.session['labels']
else:
with open(dataFile,'w') as f:
f.write("File,id,angle,xmin,xmax,ymin,ymax,height,width,Label\\n")
f.close()
bounds = []
context = {'tab': 'upload','bounds':bounds,'labels': request.session['labels'],'directory':request.session['datalocation'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'filelist':df,'computeinfrastructure':computeinfrastructure}
context['version'] = AION_VERSION
return render(request, 'objectlabelling.html', context)
elif request.POST.get('validatesubmit') == 'bulkLabeling':
type = |
'BulkImage'
dataFile = request.session['csvfullpath']
csvfilename = request.session['csvfullpath']
labelfileexists = False
dflabels = pd.DataFrame()
context = {'tab': 'upload', 'file': dataFile, 'csvfilename': csvfilename,'type':type,'csvgenerated': True,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'labelfileexists':labelfileexists,'dflabels':dflabels,'computeinfrastructure':computeinfrastructure}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
elif request.POST.get('validatesubmit') == 'ImageClassification':
df = pd.read_csv(request.session['csvfullpath'])
dataFile = label_filename(request)
request.session['LabelFileName'] = dataFile
with open(dataFile,'w') as f:
f.write("File,Label\\n")
f.close()
request.session['currentIndex'] = 0
request.session['endIndex'] = len(df)-1
not_end = not(request.session['currentIndex'] == request.session['endIndex'])
filePath = os.path.join(request.session['datalocation'],df["File"].iloc[request.session['currentIndex']])
string = base64.b64encode(open(filePath, "rb").read())
request.session['labels'] = ''
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'tab': 'upload','id':request.session['currentIndex'],'labels': request.session['labels'],'image':image_64,'head':request.session['currentIndex']+1,'len':len(df),'computeinfrastructure':computeinfrastructure}
context['version'] = AION_VERSION
return render(request, 'imagelabelling.html', context)
elif request.POST.get('validatesubmit') == 'submitpreviouslabel':
dataFile = label_filename(request)
request.session['LabelFileName'] = dataFile
df = pd.read_csv(dataFile)
if len(df.columns) == 2:
context = imageeda(request)
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
else:
context = objecteda(request)
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
else:
df = pd.read_csv(request.session['csvfullpath'])
if request.session['datatype'] in ['LLM_Document','LLM_Code']:
from appfe.modelTraining.bc_views import basicconfig
return basicconfig(request)
else:
if df['Label'].isnull().sum() > 0:
# show error message
if request.session['datatype'] == 'Document':
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
if Path(filename).suffix == '.pdf':
from appbe.dataIngestion import pdf2text
text = pdf2text(filename)
dataDict["text"].append(text)
else:
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDf = pd.DataFrame.from_dict(dataDict)
tcolumns=['text']
wordcloudpic,df_text = ea.getWordCloud(dataDf,tcolumns)
status_msg = 'Successfully Done'
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'singletextdetails':wordcloudpic,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
else:
errormessage = str(df['Label'].isnull().sum()) + " rows do not contain label values"
context = {'error': errormessage}
else:
eda_result = ''
duplicate_img = ''
color_plt = ''
df2 = df.groupby('Label', as_index=False)['File'].count().reset_index().rename(columns ={'File':'Number of Files'})
df_json = df2.to_json(orient="records")
df_json = json.loads(df_json)
cfig = go.Figure()
xaxis_data = df2['Label'].tolist()
yaxis_data = df2['Number of Files'].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data))
cfig.update_layout(barmode='stack', xaxis_title='Label', yaxis_title='File')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=520)
firstFile = df.groupby('Label').first().reset_index()
#firstFile['FilePath'] = firstFile['File'].apply(lambda x: os.path.join(request.session['datalocation'], x))
images = []
if request.session['datatype'] == 'Image':
qualityscore,eda_result,duplicate_img,color_plt = ia.analysis_images(request.session['datalocation'])
#print(qualityscore)
for i in range(len(firstFile)):
filename = firstFile.loc[i, "File"]
filePath = os.path.join(request.session['datalocation'], filename)
string = base64.b64encode(open(filePath, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
firstFile.loc[i, "Image"] = image_64
firstFile.loc[i, "Quality"] = qualityscore[filename]
elif request.session['datatype'] == 'Document':
dataDrift = ''
dataDf = pd.DataFrame()
dataDict = {}
keys = ["text","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(df)):
filename = os.path.join(request.session['datalocation'],df.loc[i,"File"])
if Path(filename).suffix == '.pdf':
from appbe.dataIngestion import pdf2text
text = pdf2text(filename)
dataDict["text"].append(text)
dataDict["Label"].append(df.loc[i,"Label"])
else:
with open(filename, "r",encoding="utf-8") as f:
dataDict["text"].append(f.read())
f.close()
dataDict["Label"].append(df.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
wordcloudpic = ea.getCategoryWordCloud(dataDf)
status_msg = 'Successfully Done'
firstFile = pd.DataFrame()
context = {'tab': 'upload','firstFile':firstFile,'dataa': df_json,'textdetails':wordcloudpic,'featuregraph': bargraph,'status_msg': status_msg,'validcsv': True,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
return render(request, 'upload.html', context)
status_msg = 'Successfully Done'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'upload', 'featuregraph': bargraph,'dataa': df_json, 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'validcsv': True,'eda_result':eda_result,'duplicate_img':duplicate_img,'color_plt':color_plt, 'firstFile': firstFile,
'status_msg': status_msg,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except UnicodeDecodeError:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'upload.html', {'tab': 'upload','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Only utf8 file encoding supported','computeinfrastructure':computeinfrastructure})
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'upload.html', {'tab': 'upload','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'error':'Validation Failed','computeinfrastructure':computeinfrastructure})
def file_successfully_created(request,dataFile):
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
request.session['datalocation'] = str(dataFile)
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile,optimize=1)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures()
# ----------------------------
numberoffeatures = len(featuresList)
from appfe.modelTraining.views import getimpfeatures
imp_features = getimpfeatures(dataFile,numberoffeatures)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
from utils.file_ops import read_df
status,df_top = read_df(dataFile,nrows=10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
context = {'tab': 'tabconfigure','computeinfrastructure':computeinfrastructure,'range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'imp_features':imp_features, 'numberoffeatures':numberoffeatures,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning'})
def uploadDatafromSatandardDataset(request):
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
try:
dataobject = request.POST.get('dataset')
if dataobject == 'Iris':
from sklearn.datasets import load_iris
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['Species']=data['target']
df['Species']=df['Species'].apply(lambda x: data['target_names'][x])
elif dataobject == 'Boston':
from sklearn.datasets import load_boston
df1 = load_boston()
df = pd.DataFrame(data=df1.data, columns=df1.feature_names)
df["target"] = df1.target
elif dataobject == 'BreastCancer':
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
df = pd.DataFrame(np.c_[cancer['data'], cancer['target']],columns= np.append(cancer['feature_names'], ['target']))
elif dataobject == 'Diabetes':
from sklearn.datasets import load_diabetes
data = load_diabetes()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['y']=data['target']
elif dataobject == 'Wine':
from sklearn.datasets import load_wine
data = load_wine()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['class']=data['target']
df['class']=df['class'].apply(lambda x: data['target_names'][x])
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
# EDA Subsampling changes
# ----------------------------
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeature = eda_obj.getFeatures()
# ----------------------------
numberoffeatures = len(featuresList)
from appfe.modelTraining.views import getimpfeatures
imp_features = getimpfeatures(dataFile,numberoffeatures)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session[' |
UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
from appfe.modelTraining.models import Existusecases
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
context = {'tab': 'tabconfigure','computeinfrastructure':computeinfrastructure,'range':range(1,101),'FeturesEDA':No_of_Permissible_Features_EDA,'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList':featuresList,'data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'clusteringModels':clusteringModels,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'imp_features':imp_features, 'numberoffeatures':numberoffeatures,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
return render(request, 'upload.html', {'error':'Failed to upload Data','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning'})
def sqlAlchemy(request):
from appbe import alchemy
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
dbType = request.POST.get('dbType')
request.session['dbType'] = dbType
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
from appbe.aion_config import get_edafeatures
No_of_Permissible_Features_EDA = get_edafeatures()
if dbType.lower() == "sqlite":
request.session['filepath'] = request.POST.get('filepath')
request.session['tablenamesql'] = request.POST.get('tablenamesql')
table_details = {"Database Type": dbType, "File Path": request.session['filepath']}
if dbType.lower() in ["postgresql", "mysql", "mssql"]:
if dbType.lower()=='mssql':
db = "mssql"
else:
db = "postgresql"
request.session['tablename'] = request.POST.get('tablename'+'_'+db)
request.session['dbname'] = request.POST.get('dbname'+'_'+db)
request.session['password'] = request.POST.get('password'+'_'+db)
request.session['username'] = request.POST.get('username'+'_'+db)
request.session['port'] = request.POST.get('port'+'_'+db)
request.session['host'] = request.POST.get('host'+'_'+db)
table_details = {"Database Type": dbType, "Database Name": request.session['dbname'],
"Host": request.session['host'], "Port": request.session['port']}
if dbType.lower() == "mssql":
request.session['driver'] = request.POST.get('driver'+'_'+db)
table_details.update({"driver": request.session['driver']})
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
#print(dbType)
submit_button = request.POST.get('sql_submit')
if submit_button == 'multitable':
try:
connection_string = alchemy.get_connection(request)
import sqlalchemy as db
engine = db.create_engine(connection_string)
engine.connect()
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
print(request.POST.get('dbType'))
context = {'tab': 'tabconfigure','FeturesEDA':No_of_Permissible_Features_EDA,'computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'version':AION_VERSION}
context.update({'db_details':table_details})
return render(request, 'querybuildersql.html', context)
except Exception as e:
print(str(e))
if "No module named 'psycopg2'" in str(e):
error = 'Not found module: psycopg2. Please install and try again'
else:
error = 'Error in connecting to the database'
return render(request, 'upload.html', {'tab': 'tabconfigure', 'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'selected': 'modeltraning', 'version': AION_VERSION,
'error': error})
else:
try:
df = alchemy.getDataFromSingleTable(request)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'FeturesEDA':No_of_Permissible_Features_EDA,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
if "No module named 'psycopg2'" in str(e):
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"error":"Not found module: psycopg2. Please install and try again"}
else:
context = {'tab': 'upload','computeinfrastructure':computeinfrastructure,'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"error":"Error in fetching the data from database."}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
def get_table_list(request):
from appbe import alchemy
dbType = request.session['dbType']
table_list = alchemy.list_tables(request)
#print(json.dumps(table_list))
return HttpResponse(json.dumps(table_list), content_type="application/json")
def get_tables_fields_list(request):
from appbe import alchemy
table_list = request.GET.get("sel_tables")
table_field_list = alchemy.list_tables_fields(request,table_list)
return HttpResponse(table_field_list, content_type="application/json")
def validate_query(request):
from appbe import alchemy
query = request.GET.get("query")
table_details = request.GET.get("table_details")
join_details = request.GET.get("join_details")
where_details = request.GET.get("where_details")
request.session["table_details"]=table_details
request.session["join_details"]=join_details
request.session["where_details"]=where_details
df,msg = alchemy.validatequery(request,table_details,join_details,where_details)
return HttpResponse(json.dumps(msg), content_type="application/json")
def submitquery(request):
from appbe import alchemy
from appbe import compute
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
computeinfrastructure = compute.readComputeConfig()
try:
query = request.POST.get("txtfinalquery")
table_details = request.session["table_details"]
join_details = request.session["join_details"]
where_details = request.session["where_details"]
df,msg = alchemy.validatequery(request,table_details,join_details,where_details)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False}
return render(request, 'upload.html', context)
except:
return render(request, 'upload.html', {'tab': 'tabconfigure','selected_use_case': selected_use_case,'computeinfrastructure':computeinfrastructure,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','error':'Failed to upload datafile'})
def EDAReport(request):
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'EDA','Yes')
from appbe import exploratory_Analysis as ea
request.session['defaultfilepath'] = DEFAULT_FILE_PATH
request.session['configfilepath'] = CONFIG_FILE_PATH
request.session['deploylocation'] = DEPLOY_LOCATION
from appbe import compute
computeinfrastructure = compute.readComputeConfig()
submit_button = request.POST.get('upload_submit')
ModelVersion = request.session['ModelVersion']
#print(submit_button)
if submit_button == 'data_eda':
try:
from appbe.aion_config import settings
usecasetab = settings()
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
context = ea.get_eda(request)
context['computeinfrastructure'] = computeinfrastructure
context['s3buckets'] = get_s3_bucket()
context['gcsbuckets'] = get_gcs_bucket()
context['azurestorage'] = get_azureStorage()
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
except Exception as e:
print(e)
context = {'error':'Error in doing the EDA','ModelVersion': ModelVersion,'version':AION_VERSION}
return render(request, 'upload.html', context)
def get_features_datatype(v,num_list,cat_list,text_list):
""" To get exact datatype of the feature in Data Overview."""
if v in cat_list:
return 'Categorical'
elif v in num_list:
return 'Numerical'
elif v in text_list:
return 'Text'
def downloadedareport(request):
des1 = json.loads(request.POST.get('des1'))
des1 = pd.DataFrame(des1)
cluster_df = json.loads(request.POST.get('cluster_df'))
cluster_df = pd.DataFrame(cluster_df)
pca_df = []
if request.POST.get('pca_df') != 'Empty DataFrame\\r\\nColumns: []\\r\\nIndex: []':
pca_df = json.loads(request.POST.get('pca_df'))
pca_df = pd.DataFrame(pca_df)
cor_mat = json.loads(request.POST.get('cor_mat'))
cor_mat = pd.DataFrame(cor_mat)
cor_mat.replace(np.nan, 0, inplace=True)
cor_mat.fillna('None',inplace=True)
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
edaFileName = usename + '_EDA.xlsx'
from io import BytesIO as IO
excel_file = IO()
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
##For Task 17622
actual_df = json.loads(request.POST.get('data_deep_json'))
actual_df = pd.DataFrame(actual_df)
actual_df.replace(np.nan, 0,inplace=True)
actual_df.fillna('None',inplace=True)
top_10_rows = actual_df.head(10)
top_10_rows.to_excel(excel_writer, sheet_name='Top 10 Rows',index=True)
des1 = des1.fillna(0)
#Write everything in one single column
actual_df_numerical_features = actual_df.select_dtypes(exclude='object')
actual_df_categorical_features = actual_df.select_dtypes(include='object')
#For text features
textFeature = json.loads(request.POST.get('textFeature'))
textFeature_df = actual_df.filter(textFeature)
actual_df_categorical_features = actual_df_categorical_features.drop(textFeature, axis=1)
for i in des1['Features']:
num_cols = actual_df_numerical_features.columns.to_list()
cat_cols = actual_df_categorical_features.columns.to_list()
text_cols = textFeature
des1['Features Type'] = des1['Features'].apply(lambda x: get_features_datatype(x, num_cols,cat_cols,text_cols))
curr_ |
columns = des1.columns.to_list()
curr_columns.remove('Features Type')
insert_i = curr_columns.index('Features')+1
curr_columns.insert(insert_i,'Features Type')
des1 = des1[curr_columns]
des1.to_excel(excel_writer, sheet_name='Data Overview',startrow=0, startcol=0,index=False)
## Hopkins value addition
hopkins_value = str(request.POST.get('hopkins_val'))
hopkins_tip = request.POST.get('hopkins_tip')
hopkins_dict = {'Hopkins_value':[hopkins_value],"hopkins_information":[hopkins_tip]}
hopkins_df = pd.DataFrame.from_dict(hopkins_dict)
##Data Distribution
from appbe.eda import ux_eda
eda_obj = ux_eda(actual_df)
datadist_dict={}
for k,v in enumerate(actual_df.columns.to_list()):
distname, sse = eda_obj.DistributionFinder(actual_df[v])
datadist_dict[v]=[distname,sse]
data_dist_df = pd.DataFrame(datadist_dict)
data_dist_df = data_dist_df.T
data_dist_df.reset_index(inplace=True)
data_dist_df.columns = ['Features','Distribution','SSE']
data_dist_df.drop(['SSE'],axis=1,inplace=True)
data_dist_df.fillna("NA",inplace = True)
data_dist_df = data_dist_df.replace(['',None,pd.NaT],"NA")
data_dist_df = data_dist_df.replace(["geom"],"geometric")
data_dist_df.to_excel(excel_writer, sheet_name='Data Distribution',index=False)
if len(pca_df) > 0:
pca_df.to_excel(excel_writer, sheet_name='Feature Importance',index=False)
cor_mat.to_excel(excel_writer, sheet_name='Correlation Analysis',index=False)
#Unsupervised clustering
cdf_start_row = 1+len(hopkins_df)+6
if not textFeature:
import io
hs_info = "Hopkins Statistics"
hs_info_df = pd.read_csv(io.StringIO(hs_info), sep=",")
hs_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=0, startcol=2,index=False)
hopkins_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=2, startcol=0,index=False)
else:
# If text features available in data.
import io
hs_info = "Hopkins Statistics is not availble for data with text features. Unselect text features and retry EDA."
hs_info_df = pd.read_csv(io.StringIO(hs_info), sep=",")
hs_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=0, startcol=3,index=False)
#cluster_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row, startcol=1,index=True)
cdf_start_row = 1+len(hopkins_df)+4
cluster_info = " Unsupervised clustering results (Excluding text features) "
cluster_info_df = pd.read_csv(io.StringIO(cluster_info), sep=",")
cluster_info_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row-2, startcol=1,index=False)
cluster_df.to_excel(excel_writer, sheet_name='Unsupervised Clustering',startrow=cdf_start_row, startcol=0,index=False)
workbook = excel_writer.book
#excel_writer.save() #Save() is deprecated,instead we need to use close().
excel_writer.close()
excel_file.seek(0)
response = HttpResponse(excel_file.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + edaFileName
return response <s> from django.db import models
class usecasedetails(models.Model):
id = models.AutoField(primary_key=True)
UsecaseName = models.CharField(max_length=50)
usecaseid = models.CharField(max_length=10, default=UsecaseName)
Description = models.CharField(max_length=200)
class Meta:
db_table = "usecasedetails"
class Existusecases(models.Model):
id = models.AutoField(primary_key=True)
ModelName = models.ForeignKey(usecasedetails, on_delete=models.CASCADE)
Version = models.IntegerField(default=0)
DataFilePath = models.FileField(upload_to=None)
ConfigPath = models.FileField(upload_to=None)
DeployPath = models.FileField(upload_to=None)
Status = models.CharField(max_length=200)
publishStatus = models.CharField(max_length=20, default='')
publishPID = models.IntegerField(default=0)
trainingPID = models.IntegerField(default=0)
driftStatus = models.CharField(max_length=20, default='')
ProblemType = models.CharField(max_length=20, default='')
modelType = models.CharField(max_length=40, default='')
portNo = models.IntegerField(default=0)
TrainOuputLocation = models.CharField(max_length=200, default='')
class Meta:
db_table = "Existusecases" <s> #
# AirflowLib.py
#
# It contains methods to consume rest API of Apache airflow instance
# Apache Airflow exposed experimental API
# One can achieve the API output just by using the methods implemented within this python file by importing the same
#
import requests
import pandas as pd
# base_url = 'http://localhost:8080/api/experimental'
# It defines the API error which actually raised when error occured during API consumption
from modelTraining.airflow_config import base_url
class ApiError(Exception):
"""An API Error Exception"""
def __init__(self, status):
self.status = status
def __str__(self):
return "APIError: status={}".format(self.status)
# This method takes dagId as parameter and return the list of Dag Run from apache airflow instance
def GetDagRunList(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/dag_runs')
if resp.status_code != 200:
raise ApiError('GetDagRunList {}'.format(resp))
dfData = ConvertJSONtoDF(resp.json())
return dfData
# It is responsible to create/trigger dag of the Airflow instance
# It takes 2 parameter dagId and paramJson
def TriggerDag(dagId, paramJson):
paramJson = {"conf": "{\\"key\\":\\"value\\"}"}
resp = requests.post(base_url + '/dags/' + dagId + '/dag_runs', json=paramJson)
print(resp)
if resp.status_code != 200:
raise ApiError('TriggerDag {}'.format(resp))
return resp.json()
# This method toggle the Dag as off in the airflow instance
def PauseDagRun(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/paused/true')
if resp.status_code != 200:
raise ApiError('PauseDagRun {}'.format(resp))
return resp.json()
# This method toggle the Dag as on in the airflow instance
def UnPauseDagRun(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/paused/false')
if resp.status_code != 200:
raise ApiError('UnPauseDagRun {}'.format(resp))
return resp.json()
# It checks if Apache Airflow instance is up and running
def TestAPI():
resp = requests.get(base_url + '/test')
if resp.status_code != 200:
raise ApiError('TestAPI {}'.format(resp))
return resp.json()
# It return the latest dag run info for each available dag
def GetLatestDagRun():
resp = requests.get(base_url + '/latest_runs')
if resp.status_code != 200:
raise ApiError('GetLatestDagRun {}'.format(resp))
dfData = ConvertJSONtoDF(resp.json()['items'])
return dfData
# It will return the list of available pools
def GetPoolsList():
resp = requests.get(base_url + '/pools')
if resp.status_code != 200:
raise ApiError('GetPoolsList {}'.format(resp))
return resp.json()
# It return the specific pool info by pool Name
def GetPoolInfo(poolName):
resp = requests.get(base_url + '/pools/' + poolName)
if resp.status_code != 200:
raise ApiError('GetPoolInfo {}'.format(resp))
return resp.json()
# Return the task info created within the DAG
def GetDagTaskInfo(dagId, taskId):
resp = requests.get(base_url + '/dags/' + dagId + '/tasks/' + taskId)
if resp.status_code != 200:
raise ApiError('GetDagTaskInfo {}'.format(resp))
return resp.json()
# Returns the Paused state of a DAG
def GetDagPausedState(dagId):
resp = requests.get(base_url + '/dags/' + dagId + '/paused')
if resp.status_code != 200:
raise ApiError('GetDagPausedState {}'.format(resp))
return resp.json()
# It will create a pool into the Airflow instance
def CreatePool(name, description, slots):
paramJson = {"description": description, "name": name, "slots": slots}
resp = requests.post(base_url + '/pools', json=paramJson)
if resp.status_code != 200:
raise ApiError('CreatePool {}'.format(resp))
return resp.json()
# It is responsible to delete the specific pool by pool Name
def DeletePool(name):
resp = requests.delete(base_url + '/pools/' + name)
if resp.status_code != 200:
raise ApiError('DeletePool {}'.format(resp))
return resp.json()
def ConvertJSONtoDF(jsonData):
df = pd.json_normalize(jsonData)
return df<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import time
from django.template import loader
from django import template
from appbe.aion_config import get_llm_data
from django.views.decorators.csrf import csrf_exempt
import os
import json
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from utils.file_ops import read_df_compressed
from appbe.dataPath import LOG_LOCATION
from appbe.pages import getversion
AION_VERSION = getversion()
def QueryToOpenAI(text,tempPrompt):
FragmentationAllowed="yes" #yes or no
try:
import openai
key,url,api_type,api_version=get_llm_data()
if (key == "") and (url == "") :
print("No API Key")
return("API Key and URL not provided")
openai.api_key = key
openai.api_base = url
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
deployment_name="Text-Datvinci-03"
import tiktoken
encoding = tiktoken.encoding_for_model("text-davinci-003")
maxTokens=1024 #4096-1024 == 3072
lgt=0
if FragmentationAllowed=="yes" :
words = text.split(".")
chunk=""
chunks=[]
multipleChunk="no"
partialData="no"
for i in range(len(words)):
chunk=chunk+words[i]+"."
chunk_token_count = encoding.encode(chunk)
length=len(chunk_token_count)
partialData="yes"
if length > 2800 :
chunks.append(chunk)
chunk=""
#print("\\n\\n\\n")
partialData="no"
multipleChunk="yes"
if (multipleChunk =="no" ):
chunks.append(chunk)
chunk=""
if ((partialData =="yes") and (multipleChunk =="yes")):
chunks.append(chunk)
chunk=""
summaries = []
for chunk in chunks:
response = openai.Completion.create(engine=deployment_name, prompt=f"{tempPrompt}: {chunk}",temperature=0.2, max_tokens=maxTokens,frequency_penalty=0,presence_penalty=0)
summary = response['choices'][0]['text'].replace('\\n', '').replace(' .', '.').strip()
summaries.append(summary)
wordsInSum = summary.split()
summaries=' '.join(summaries)
wordsInSum = summaries.split()
return summaries
else :
return "ok"
except openai.error.Timeout as e:
return "exception : Timeout Error due to Network Connection"
except Exception as e:
return "exception : "+str(e)
def azureOpenAiDavinciSumarization(request):
inputDataType = str(request.GET.get('FileType'))
import time
t1=time.time()
documentType=""
if inputDataType == 'file':
dataPath = str(request.GET.get('dataPath'))
#print("Datapath--",dataPath)
if dataPath.endswith(".pdf"):
from appbe.dataIngestion import pdf2text
originalText=pdf2text(dataPath)
if dataPath.endswith(".txt"):
data=[]
with open(dataPath, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
originalText=str1
if dataPath.endswith(".docx"):
import docx
doc = docx.Document(dataPath)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
fullText= '\\n'.join(fullText)
originalText=fullText
if inputDataType == 'rawText':
originalText = str(request.GET.get('textDataProcessing'))
dataPath=""
if originalText== "None" or original |
Text== "":
context = {'originalText': originalText,'returnedText': "No Input given"}
print("returned due to None")
return render(request, "textsummarization.html",context)
KeyWords=str(request.GET.get('userUpdatedKeyword'))
contextOfText=str(request.GET.get('userUpdatedContext'))
doctype = str(request.GET.get('doctypeUserProvided'))
docDomainType = ["medical","other"]
Prompts = [
"Summarize the following article within 500 words with proper sub-heading so that summarization include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;",
"Summarize the following article with minimum 500 words so that summarization include all main points from topics like: "
]
for i in range (len(docDomainType)) :
if docDomainType[i] in doctype.lower() :
docDomainPrompts=Prompts[i]
if docDomainType[i]=="medical" :
print("medical doc")
documentType="medical"
docDomainFinalPrompts=docDomainPrompts
tempPrompt1="Summarize the following article so that summarization must include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"
tempPrompt2="Summarize the following article within 500 words with proper sub-heading so that summarization include all main points from topics like: study objective; study design;demographics of patients; devices used in study; duration of exposure to device; study outcomes; complications;adverse events;confounding factors; study limitations and weakness;usability of the device; misuse and off-label use of the device;conflict of interest;statistical analysis;conclusions;"
else :
print("other doc-a-")
docDomainFinalPrompts=docDomainPrompts+" "+contextOfText
tempPrompt1="Summarize the following article with minimum 500 words so that summarization include all main points from topics like: "+contextOfText
tempPrompt2=tempPrompt1
break
if (i== len(docDomainType)-1) :
print("other doc-b-")
docDomainPrompts=Prompts[i]
docDomainFinalPrompts=docDomainPrompts+" "+contextOfText
tempPrompt1="Summarize the following article so that summarization include all main points from topics like: "+contextOfText
tempPrompt2=tempPrompt1
try:
pattern =['Summary','Study Objective','Study Design', 'Demographics of Patients', 'Devices Used in Study','Duration of Exposure to Device','Study Outcomes','Complications','Adverse Events','Confounding Factors','Study Limitations and Weakness','Usability of the Device','Misuse and Off-Label Use of the Device','Conflict of Interest','Statistical Analysis','Conclusions']
import tiktoken
encoding = tiktoken.encoding_for_model("text-davinci-003")
encodedData = encoding.encode(originalText)
totalToken=len(encodedData)
while totalToken > 2800:
originalText=QueryToOpenAI(originalText,tempPrompt1)
encodedData = encoding.encode(originalText)
totalToken=len(encodedData)
retText=QueryToOpenAI(originalText,tempPrompt2)
import re
summary1=retText
summary2=retText
if documentType=="medical" :
for i in range(len(pattern)):
summary1=summary1.replace(pattern[i]+':','<br>'+'<u>'+pattern[i]+'</u>'+'<br>')
for i in range(len(pattern)):
summary1=summary1.replace(pattern[i],'<br>'+'<u>'+pattern[i]+'</u>'+'<br>')
for i in range(len(pattern)):
summary2=summary2.replace(pattern[i]+':','')
for i in range(len(pattern)):
summary2=summary2.replace(pattern[i],'')
#retText2=""
#tempPrompt="Find some most highlighting points in the following article"
#retText2=QueryToOpenAI(originalText,tempPrompt)
#retText3=""
#tempPrompt="Find only one or two risk factors that are mentioned in the following article"
#retText3=QueryToOpenAI(originalText,tempPrompt)
#retText4=""
#tempPrompt="Find statistical informtation that are mentioned in the following article"
#retText4=QueryToOpenAI(originalText,tempPrompt)
#retText5=""
#tempPrompt="Find name of the author only one time that are mentioned in the following article"
#retText5=QueryToOpenAI(originalText,tempPrompt)
#retText6=""
#tempPrompt="Suggest the name of the title for the following article"
#retText6=QueryToOpenAI(originalText,tempPrompt)
t2=time.time()
#print("\\n time taken-->", t2-t1 ,"length of sum",str(length))
print("\\n time taken-->", t2-t1 )
#print("\\n summary from LLM-->\\n",returnedText)
#context = {'title': retText6, 'summary': summary1, 'summary2': summary2, 'AuthorName': "Author names :"+retText5,'BulletPoints': retText2,'Riskfactor': retText3,'StatInfo': retText4}
context = {'title': "", 'summary': summary1, 'summary2': summary2, 'AuthorName': "",'BulletPoints': "",'Riskfactor': "",'StatInfo': ""}
return HttpResponse(json.dumps(context), content_type="application/json")
except:
context = {'returnedText': "exception"}
return HttpResponse(json.dumps(context), content_type="application/json")
def azureOpenAiDavinci(request):
key,url,api_type,api_version=get_llm_data()
inputDataType = str(request.POST.get('FileType'))
if inputDataType == 'file':
Datapath = request.FILES['file']
#dataPath = str(request.GET.get('dataPath'))
ext = str(Datapath).split('.')[-1]
temp1=str(Datapath).split('.')
filetimestamp = str(int(time.time()))
if ext.lower() in ['pdf','txt','docx']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' +temp1[0]+'_'+filetimestamp+'.'+ext)
#dataFile = os.path.join(DATA_FILE_PATH,'AION_' +filetimestamp+'.'+ext)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
dataPath = dataFile
if dataPath.endswith(".pdf"):
from appbe.dataIngestion import pdf2text
originalText=pdf2text(dataPath)
if dataPath.endswith(".txt"):
data=[]
with open(dataPath, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
originalText=str1
if dataPath.endswith(".docx"):
import docx
doc = docx.Document(dataPath)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
fullText= '\\n'.join(fullText)
originalText=fullText
if inputDataType == 'rawText':
originalText = str(request.POST.get('textDataProcessing'))
dataPath=""
doctype = str(request.POST.get('doctypeUserProvided'))
if originalText== "None" or originalText== "":
context = {'originalText': originalText,'returnedText': "No Input given"}
print("returned due to None")
return render(request, "textsummarization.html",context)
length=len(originalText.split())
inputTextPromptForKeyWords="Create a list of keywords to summrizing the following document."
inputTextPromptForKeyWords="Suggest only ten most important keywords from the following document."
inputTextPromptForContext="Suggest ten most important context in the following article. "
#inputTextPromptForDocType="Suggest on which domain or field or area the following article is or the article is on sports or politics or medical or music or technology or legal field. "
try:
tempPrompt=inputTextPromptForKeyWords
retText=QueryToOpenAI(originalText,tempPrompt)
KeyWords=retText
tempPrompt=inputTextPromptForContext
retText=QueryToOpenAI(originalText,tempPrompt)
contextOfText=retText
#tempPrompt=inputTextPromptForDocType
#retText=QueryToOpenAI(originalText,tempPrompt)
#doctype=retText
context = {'originalText': originalText,'KeyWords': KeyWords,'contextOfText': contextOfText,'doctype': doctype,'dataPath' :dataPath}
return HttpResponse(json.dumps(context), content_type="application/json")
except Exception as e:
print(e)
context = {'originalText': originalText,'KeyWords': KeyWords,'contextOfText': contextOfText,'doctype': doctype,'dataPath' :dataPath}
return HttpResponse(json.dumps(context), content_type="application/json")
# Text Data Labelling using LLM related changes
# --------------------------------------------------------
def uploadedTextData(request):
from appbe.dataIngestion import ingestTextData
context = ingestTextData(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request, 'textdatalabelling.html', context)
def getTextLabel(request):
from appbe.llm_textdatalabelling import generateTextLabel
context = generateTextLabel(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
return render(request, 'textdatalabelling.html', context)
def downloadTextLabelReport(request):
file_path = request.session['texttopicdatapath']
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)
return response
raise Http404
# QnA Generator using LLM related changes
# --------------------------------------------------------
def genearateQA(request):
from appbe.llm_generateQnA import ingestDataForQA
context = ingestDataForQA(request,DATA_FILE_PATH)
context['version'] = AION_VERSION
context['selected'] = "llm_features"
return render(request, 'QnA.html', context)
def downloadQnAReport(request):
file_path = request.session['QnAfilepath']
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)
return response
raise Http404
# --------------------------------------------------------<s> from django.apps import AppConfig
class ModelTrainingConfig(AppConfig):
name = 'appfe.modelTraining'
<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getusercasestatus
import pandas as pd
import numpy as np
from appbe.pages import getversion
import logging
import json
import time
import os
from appbe import compute
AION_VERSION = getversion()
def sensitivityAnalysis(request): #usnish
from appbe.pages import usecases_page
t1 = time.time()
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
try:
from trusted_ai.sensitivity_analysis import startSA
# request.session['deploypath'] = str(p.DeployPath)
sensitivitystr= startSA(request)
sensitivitystr = json.loads(sensitivitystr)
ModelStatus = request.session['ModelStatus']
if sensitivitystr['Status']=='Success':
sensitivityGraph = sensitivitystr['graph']
t2 = time.time()
log.info('Sensitivity Analysis : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return HttpResponse(json.dumps(sensitivitystr))
else:
error = sensitivitystr['reason']
raise Exception(error)
except Exception as e:
print(e)
log.info('Sensitivity Analysis : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to Perform Sensitivity Analysis, ' + str(e))
outputstr = json.dumps({'Status':'','msg':'Failed to Perform Sensitivity Analysis. '+str(e)})
return HttpResponse(outputstr)
def handlefairness(request):
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemType = 'classification'
for key in configSettings['basic']['analysisType']:
if configSettings['basic']['analysisType'][key] == 'True':
problemType = key
break
trainingfeature = configSettings['basic']['trainingFeatures']
targetfeature = configSettings['basic']['targetFeature']
featuretype = configSettings['advance']['profiler']['featureDict']
catfeature = []
for feat_conf in featuretype:
colm = feat_conf.get('feature', '')
if feat_conf['type'] == "c |
ategorical":
catfeature.append(feat_conf['feature'])
output={'targetfeature':targetfeature,'trainingfeature':trainingfeature,'catfeature':catfeature,'problemType':problemType}
return HttpResponse(json.dumps(output))
def fairnesmetrics(request): #Richard--Task-13581
from appbe.pages import usecases_page
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
t1 = time.time()
log = logging.getLogger('log_ux')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
from trusted_ai.fairness_metrics import get_metrics
output = get_metrics(request)
t2 = time.time()
log.info('Fairness Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return HttpResponse(output)
except Exception as e:
print(e)
log.info('Fairness Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Fairness Metrics, ' + str(e))
return HttpResponse('')
def performance_metrics(request):
from appbe.pages import usecases_page
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
t1 = time.time()
log = logging.getLogger('log_ux')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
from trusted_ai.performance import get_metrics
output = get_metrics(request)
t2 = time.time()
log.info('Performance Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
print( output)
return HttpResponse(json.dumps(output))
except Exception as e:
print(e)
log.info('Performance Metrics : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Performance Metrics, ' + str(e))
return HttpResponse('')
def uquncertainty(request):
from trusted_ai.trustedai_uq import trustedai_uq
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
output = trustedai_uq(request)
return HttpResponse(output)
def uqtransparency(request):
t1 = time.time()
log = logging.getLogger('log_ux')
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
selected_use_case = request.session['UseCaseName']
model_version = request.session['ModelVersion']
try:
deploypath = request.session['deploypath']
configpath = os.path.join(deploypath,'etc','display.json')
f = open(configpath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemType = configSettings['problemType']
model_Features = configSettings['modelFeatures']
if problemType.lower() == 'classification':
from trusted_ai.brier_score import get_brier_score
problem_type, brier_score = get_brier_score(request)
display_dict = {"ProblemType":problem_type.title(),"BrierScore":round(brier_score, 2),'model_Features':model_Features,'problemTypeuq':problemType}
else:
display_dict = {"ProblemType":problemType,"BrierScore":'','model_Features':model_Features,'problemTypeuq':problemType}
display_json = json.dumps(display_dict)
t2 = time.time()
log.info('Brier Score : ' + str(selected_use_case) + ' : ' + str(
model_version) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return HttpResponse(display_json, content_type="application/json")
except Exception as e:
print(e)
log.info('Brier Score : ' + str(selected_use_case) + ' : ' + str(
model_version) + ' : ' + '0' + 'sec' + ' : ' + 'Error : Failed to diaplay Brier Score, ' + str(e))
return HttpResponse('') <s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import os
from django.db.models import Max, F
import pandas as pd
from appbe.publish import check_input_data
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
import json
from appbe import compute
import logging
def get_instance_id(modelID):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID)
print(data)
if len(data) > 0:
return (data[3]+' instance '+data[2])
else:
return 'Instance ID not available'
else:
return 'Instance ID not available'
def PredictForSingleInstance(request):
from appbe.trainresult import ParseResults
submittype = request.POST.get('predictsubmit')
from appbe.prediction import singleInstancePredict
context = singleInstancePredict(request,Existusecases,usecasedetails)
if submittype.lower() == 'predict':
from appbe.train_output import get_train_model_details
trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request)
imagedf = ''
model_count = Existusecases.objects.filter(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'],Status='SUCCESS').count()
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+", encoding="utf-8")
training_output = f.read()
f.close()
result,survical_images = ParseResults(training_output)
context.update({'result':result})
context['version'] = AION_VERSION
context['modelType'] = modelType
context['bestmodel'] = bestmodel
return render(request, 'prediction.html', context)
else:
context['version'] = AION_VERSION
return context
def getTrainingStatus(request):
model = Existusecases.objects.get(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+", encoding="utf-8")
training_output = f.read()
f.close()
from appbe.trainresult import FeaturesUsedForTraining
return FeaturesUsedForTraining(training_output)
def Prediction(request):
log = logging.getLogger('log_ux')
from appbe.trainresult import ParseResults
from appbe.dataIngestion import delimitedsetting
from appbe import service_url
from appbe.aion_config import settings
usecasetab = settings()
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
#print(computeinfrastructure)
if ModelStatus != 'SUCCESS':
log.info('Prediction:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first or launch an existing trained model')
return render(request, 'prediction.html', {
'error': 'Please train the model first or launch an existing trained model',
'selected': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecasetab':usecasetab,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION})
else:
if 'ModelVersion' not in request.session:
log.info('Prediction:' + str(selected_use_case) + ':' + str(
ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first')
return render(request, 'prediction.html',
{'usecasetab':usecasetab,'error': 'Please train the model first', 'selected': 'prediction','version':AION_VERSION})
elif request.session['ModelVersion'] == 0:
log.info('Prediction:' + str(selected_use_case) + ':' + str(
ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error: Please train the model first')
return render(request,'prediction.html',{'usecasetab':usecasetab,'error':'Please train the model first','selected':'prediction','version':AION_VERSION})
else:
from appbe.train_output import get_train_model_details
trainingStatus,modelType,bestmodel = get_train_model_details(DEPLOY_LOCATION,request)
imagedf = ''
model_count = Existusecases.objects.filter(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'],Status='SUCCESS').count()
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
result,survical_images = ParseResults(training_output)
if model_count >= 1:
updatedConfigFile = request.session['config_json']
#print(updatedConfigFile)
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
analysisType = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in analysisType.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type.lower() == 'recommendersystem':
modelName = ""
recommender_models = configSettingsJson['basic']['algorithms']['recommenderSystem']
for k in recommender_models.keys():
if configSettingsJson['basic']['algorithms']['recommenderSystem'][k] == 'True':
modelName = k
break
if modelName.lower() == 'associationrules-apriori':
return render(request, 'prediction.html', {
'error': 'Prediction not supported for Association Rules (Apriori)',
'selected': 'prediction','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version':AION_VERSION})
delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier'])
#problemtypes = configSettingsJson['basic']['analysisType']
#print(problemtypes.keys())
from appfe.modelTraining.train_views import getMLModels
problem_type,dproblemtype,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
iterName = request.session['usecaseid'].replace(" ", "_")
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict = {'noofforecasts': 10}
elif problem_type == 'recommenderSystem' and mlmodels=='ItemRating':
inputFieldsDict = {"uid": 1, "numberOfRecommendation":10} #Task 11190
elif problem_type == 'stateTransition':
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'}
else:
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
if configSettingsJson['basic']['contextFeature'] != '':
inputFeaturesList.append(configSettingsJson['basic']['contextFeature'])
if problem_type == 'llmFineTuning':
inputFeaturesList.append('Temperature')
inputFeaturesList.append('Max Tokens')
if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997
if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na':
inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature'])
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
if problem_type != 'llmFineTuning':
if os.path.isfile(dataFilePath):
df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace')
|
try:
inputFieldsDict = df.to_dict(orient='index')[0]
except:
inputFieldsDict = pd.Series(0, index =inputFeaturesList).to_dict()
else:
inputFieldsDict = {"File":"EnterFileContent"}
else:
inputFieldsDict = pd.Series('', index =inputFeaturesList).to_dict()
inputFieldsDict['Temperature'] = '0.1'
from appbe.prediction import get_instance
hypervisor,instanceid,region,image = get_instance(iterName+'_'+str(ModelVersion))
if hypervisor.lower() == 'aws':
inputFieldsDict['Max Tokens'] = '1024'
else:
inputFieldsDict['Max Tokens'] = '4096'
inputFields = []
inputFields.append(inputFieldsDict)
settings_url = ''
if problem_type == 'llmFineTuning':
ser_url = get_instance_id(iterName+'_'+str(ModelVersion))
settings_url = ''
modelSize = ''
if 'modelSize' in configSettingsJson['basic']:
selectedModelSize = configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels]
for k in selectedModelSize.keys():
if configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True':
modelSize = k
break
mlmodels = mlmodels+'-'+modelSize
elif problem_type == 'stateTransition':
ser_url = service_url.read_service_url_params(request)
settings_url = service_url.read_service_url_params(request)
ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+iterName+'&version='+str(ModelVersion)
settings_url = settings_url+'pattern_anomaly_settings?usecaseid='+iterName+'&version='+str(ModelVersion)
else:
ser_url = service_url.read_service_url_params(request)
ser_url = ser_url+'predict?usecaseid='+iterName+'&version='+str(ModelVersion)
onnx_runtime = False
analyticsTypes = problem_type
usecasename = request.session['usecaseid'].replace(" ", "_")
return render(request, 'prediction.html',
{'inputFields': inputFields,'usecasename':usecasename,'mlmodels':mlmodels,'configSettingsJson':configSettingsJson,'result':result,'imagedf':imagedf, 'selected_use_case': selected_use_case,'ser_url':ser_url,'analyticsType':analyticsTypes,'settings_url':settings_url,
'ModelStatus': ModelStatus,'onnx_edge':onnx_runtime,'ModelVersion': ModelVersion, 'selected': 'prediction','computeinfrastructure':computeinfrastructure,'version':AION_VERSION,'modelType':modelType,'bestmodel':bestmodel,'usecasetab':usecasetab})
else:
log.info('Prediction; Error: Please train the model first')
return render(request, 'prediction.html',
{'usecasetab':usecasetab,'error': 'Please train the model first', 'selected': 'prediction','version':AION_VERSION})
except Exception as e:
print(e)
log.info('Prediction:' + str(selected_use_case) + ':' + str(
ModelVersion) + ':' + '0' + 'sec' + ':' + 'Error:'+str(e))
return render(request, 'prediction.html',{'usecasetab':usecasetab,'error': 'Failed to perform prediction', 'selected': 'prediction','version':AION_VERSION})<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import os
from django.db.models import Max, F
import pandas as pd
from appbe.publish import check_input_data
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe import installPackage
import json
from appbe import service_url
from appbe import compute
import sys
import csv
import time
from appbe.training import checkModelUnderTraining
import logging
def Distribution(request):
from appbe import exploratory_Analysis as ea
log = logging.getLogger('log_ux')
from appbe.aion_config import settings
usecasetab = settings()
computeinfrastructure = compute.readComputeConfig()
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Drift','Yes')
t1 = time.time()
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
training_output = json.loads(training_output)
featuresused = training_output['data']['featuresused']
feature = eval(featuresused)
dataFilePath = request.session['datalocation']
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performanceusecaseid='+iterName+'&version='+str(ModelVersion)
if request.POST.get('inputdriftsubmit') == 'trainingdatadrift':
historicadata = request.session['datalocation']
trainingdf = pd.read_csv(historicadata)
trainingDrift = ea.getDriftDistribution(feature, trainingdf)
newDataDrift = ''
concatDataDrift = ''
drift_msg = ''
driftdata = 'NA'
else:
historicadata = request.session['datalocation']
trainingdf = pd.read_csv(historicadata)
trainingDrift = ''
type = request.POST.get("optradio")
if type == "url":
try:
url = request.POST.get('urlpathinput')
newdatadf = pd.read_csv(url)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv')
newdatadf.to_csv(dataFile, index=False)
request.session['drift_datalocations']= dataFile
driftdata = request.session['drift_datalocations']
except Exception as e:
request.session['currentstate'] = 0
e = str(e)
if e.find("tokenizing")!=-1:
error = "This is not an open source URL to access data"
elif e.find("connection")!=-1:
error = "Can not access the URL through HCL network, please try with other network"
else:
error = 'Please provide a correct URL'
context = {'error': error,'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'emptycsv':'emptycsv','s3buckets': get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),
'kafkaSetting':'kafkaSetting','ruuningSetting':'ruuningSetting','usecasetab':usecasetab}
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+error+', ' + e)
return render(request, 'upload.html', context)
else:
if request.FILES:
Datapath = request.FILES['DataFilePath']
from io import StringIO
content = StringIO(Datapath.read().decode('utf-8'))
reader = csv.reader(content)
df = pd.DataFrame(reader)
df.columns = df.iloc[0]
df = df[1:]
ext = str(Datapath).split('.')[-1]
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
if(os.path.isfile(dataFile) == False):
context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case,
' ModelStatus': ModelStatus, 'ModelVersion': ModelVersion}
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Data file does not exist')
return render(request, 'inputdrif.html', context)
request.session['drift_datalocations'] = dataFile
driftdata = request.session['drift_datalocations']
newdatadf = pd.read_csv(driftdata)
newDataDrift = ea.getDriftDistribution(feature, trainingdf, newdatadf)
condf = pd.concat([trainingdf, newdatadf], ignore_index=True, sort=True)
concatDataDrift = ea.getDriftDistribution(feature,trainingdf,condf)
drift_msg,htmlPath = Drift(request,historicadata, dataFile, feature)
if htmlPath != 'NA':
file = open(htmlPath, "r",errors='ignore')
driftdata = file.read()
file.close()
else:
driftdata = 'NA'
t2 = time.time()
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return render(request, 'inputdrif.html',
{'trainingDrift': trainingDrift, 'newDataDrift': newDataDrift, 'concatDataDrift': concatDataDrift,'usecasetab':usecasetab,
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'version' :AION_VERSION,
'selected': 'monitoring', 'drift_msg': drift_msg,'htmlPath':driftdata,'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure})
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performanceusecaseid='+iterName+'&version='+str(ModelVersion)
context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': 'Failed to perform drift analysis', 'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'version' : AION_VERSION}
log.info('Input Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to do drift analysis'+', '+str(inst))
log.info('Details : '+str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'inputdrif.html', context)
def Drift(request,trainingdatalocation, newdatalocation, features):
log = logging.getLogger('log_ux')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
try:
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":newdatalocation}
inputFieldsJson = json.dumps(inputFieldsJson)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = service_url.read_monitoring_service_url_params(request)
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
import requests
try:
#print(inputFieldsJson)
#print(ser_url)
response = requests.post(ser_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
return outputStr
except Exception as inst:
print(inst)
if 'Failed to establish a new connection' in str(inst):
Msg = 'AION Service needs to be started'
else:
Msg = 'Error during Drift Analysis'
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + Msg+', '+str(inst))
return Msg
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
#print(decoded_data)
htmlPath = 'NA'
if decoded_data['status'] == 'SUCCESS':
data = decoded_data['data']
|
htmlPath = decoded_data['htmlPath']
if 'Message' in data:
Msg = []
Msg.append(data['Message'])
else:
Msg = data['Affected Columns']
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Success')
else:
Msg = 'Error during Drift Analysis'
htmlPath = 'NA'
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' +str(Msg))
return Msg,htmlPath
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
log.info('Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + str(e))
log.info('Details : ' +str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def Evaluate(request):
from appbe.aion_config import settings
usecasetab = settings()
log = logging.getLogger('log_ux')
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Drift','Yes')
t1 = time.time()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
type = request.POST.get("optradio")
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'_'+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion)
if type == "url":
try:
url = request.POST.get('urlpathinput')
newdatadf = pd.read_csv(url)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv')
newdatadf.to_csv(dataFile, index=False)
except Exception as e:
request.session['currentstate'] = 0
e = str(e)
if e.find("tokenizing")!=-1:
error = "This is not an open source URL to access data"
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+error+', '+str(e))
elif e.find("connection")!=-1:
error = "Can not access the URL through HCL network, please try with other network"
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + error +', '+e)
else:
error = 'Please provide a correct URL'
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:' + error+', '+e)
context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': error,'ModelVersion': ModelVersion,'computeinfrastructure':computeinfrastructure,'emptycsv':'emptycsv','kafkaSetting':'kafkaSetting','ruuningSetting':'ruuningSetting','usecasetab':usecasetab,'version':AION_VERSION}
return render(request, 'upload.html', context)
else:
if request.FILES:
Datapath = request.FILES['DataFilePath']
from io import StringIO
content = StringIO(Datapath.read().decode('utf-8'))
reader = csv.reader(content)
df = pd.DataFrame(reader)
df.columns = df.iloc[0]
df = df[1:]
ext = str(Datapath).split('.')[-1]
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
if(os.path.isfile(dataFile) == False):
context = {'error': 'Data file does not exist', 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'version':AION_VERSION}
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + ' Error : Data file does not exist')
return render(request, 'inputdrif.html', context)
trainingdatalocation = request.session['datalocation']
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":dataFile}
inputFieldsJson = json.dumps(inputFieldsJson)
import requests
try:
#response = requests.post(pser_url,auth=(aion_service_username,aion_service_password),data=inputFieldsJson,headers={"Content-Type":"application/json",})
response = requests.post(pser_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
log.info('Performance Drift:' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error: Status code != 200')
return outputStr
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
Msg = 'AION Service needs to be started'
else:
Msg = 'Error during Drift Analysis'
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' +'0 ' + 'sec' + ' : ' + 'Error : '+Msg+', ' + str(inst))
return Msg
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
#print(decoded_data)
if decoded_data['status'] == 'SUCCESS':
htmlPath = decoded_data['htmlPath']
#print(htmlPath)
if htmlPath != 'NA':
file = open(htmlPath, "r",errors='ignore')
driftdata = file.read()
file.close()
else:
driftdata = 'NA'
print(htmlPath)
context = {'status':'SUCCESS','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'htmlPath': driftdata,'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
t2 = time.time()
log.info('Performance Drift:' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2-t1)) + 'sec' + ' : ' + 'Success')
return render(request, 'inputdrif.html', context=context)
else:
driftdata = 'Error'
context = {'status':'ERROR','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'htmlPath': driftdata,'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : driftdata = Error')
return render(request, 'inputdrif.html', context=context)
except Exception as e:
print(e)
context = {'ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'error': 'Fail to perform Drift Analysis', 'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
log.info('Performance Drift : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Fail to perform Drift Analysis' + ', ' + str(e))
return render(request, 'inputdrif.html', context=context) <s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getusercasestatus
import pandas as pd
import numpy as np
from appbe.pages import getversion
import logging
import json
import time
import os
import subprocess
import sys
import base64
from appbe import compute
import urllib
AION_VERSION = getversion()
def Sagemaker(request):
if request.method == "POST":
try:
datafile = request.POST['datap']
endpoint = request.POST['endpoint']
awsaccountid = request.POST['awsaccountid']
accesskeyid = request.POST['accesskeyid']
secretaccesskey = request.POST['secretaccesskey']
sessionToken = request.POST['sessionToken']
region = request.POST['region']
if (awsaccountid != "" and accesskeyid != "" and secretaccesskey != "" and sessionToken != "" and endpoint != "") :
awsSagemaker = {}
awsSagemaker['awsID'] = awsaccountid
awsSagemaker['accesskeyID'] = request.POST['accesskeyid']
awsSagemaker['secretAccesskey'] = request.POST['secretaccesskey']
awsSagemaker['sessionToken'] = request.POST['sessionToken']
awsSagemaker['region'] = request.POST['region']
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
configSettingsJson['awsSagemaker'] = awsSagemaker
if(os.path.exists(datafile)):
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
df = pd.read_csv(datafile)
# df1 = df.iloc[0, :]
df2 = df.head(1)
df3 =df2.to_dict(orient='records')[0]
inputFields = []
inputFields.append(df3)
# models = models.rsplit('.', 1)[1]
context = {'sagepredict':'sagepredict','endpoint':endpoint,'datafile':datafile,'inputFields':inputFields,'sagemaker':configSettingsJson,'version':AION_VERSION}
else:
context = {'exception':'exception','error':'Data File Error','version':AION_VERSION}
else:
context = {'error': 'Error: Please enter valid input','runtimeerror':'runtimeerror','version':AION_VERSION}
except Exception as e:
context = {'exception':'exception','error':'Exception :'+str(e),'sagepredict':'sagepredict','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
def Tfliteneural(request):
try:
if request.method == "POST":
try:
models = request.POST['model1']
datafile = request.POST['datafile1']
if(os.path.isfile(models)):
modelformat = models.rsplit('.', 1)[1]
if(os.path.isfile(models) and os.path.exists(datafile) and modelformat.lower()=='tflite'):
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
df = pd.read_csv(datafile)
df2 = df.head(1)
df3 =df2.to_dict(orient='records')[0]
inputFields = []
inputFields.append(df3)
context = {'mlalgotf':'mlalgotf','models':models,'datafile':datafile,'inputFields':inputFields,'selected':'mllite','version':AION_VERSION}
elif inputDataType.lower() == 'jpg':
from PIL import Image
img = Image.open(datafile)
string = base64.b64encode(open(datafile, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'dlalgotf':'dlalgotf','models':models,'datafile':datafile,'im':image_64,'selected':'mllite','version':AION_VERSION}
else:
context={'error':'Either model path or data path does not exists','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
except Exception as e:
context={'error':'Exception i.e., '+str(e),'runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
except:
context={'error':'Failed to perform TFlite Runtime Prediction','runtimeerror':'runtimeerror','selected':'mllite'}
return render(request, 'ConvertOnnx.html',context)
def openneural(request):
try:
if request.method == "POST":
models = request.POST['model']
datafile = request. |
POST['datafile']
if(os.path.isfile(models)):
modelformat = models.rsplit('.', 1)[1]
if(os.path.isfile(models) and os.path.exists(datafile)) and modelformat.lower()=='onnx':
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
df = pd.read_csv(datafile)
df2 = df.head(1)
df3 =df2.to_dict(orient='records')[0]
inputFields = []
inputFields.append(df3)
# models = models.rsplit('.', 1)[1]
context = {'mlalgo':'mlalgo','models':models,'datafile':datafile,'selected':'mllite','inputFields':inputFields,'version':AION_VERSION}
elif inputDataType.lower() == 'jpg':
from PIL import Image
img = Image.open(datafile)
string = base64.b64encode(open(datafile, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
context = {'dlalgo':'dlalgo','models':models,'datafile':datafile,'im':image_64,'selected':'mllite','version':AION_VERSION}
else:
context={'error':'Either model path or data path does not exists','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
except:
context={'error':'Failed to perform ONNX Runtime Prediction','runtimeerror':'runtimeerror','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
def ConvertOnnx(request):
try:
if request.method == "POST":
modelpath = request.POST['models']
deploypath = request.POST['deploy']
outputonnx = request.POST['outputonnx']
inputtonnx = request.POST['inputtonnx']
outputonnx = request.POST['outputonnx']
Features = request.POST['Features']
modelinput = inputtonnx
modeloutput = outputonnx
if (os.path.exists(modelpath) == False) and (outputonnx !="sagemaker") and (os.path.exists(deploypath) == False):
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'error2':'error2','convert':'convert','logfile':'','selected':'mllite','version':AION_VERSION}
elif outputonnx !="sagemaker":
filetimestamp = str(int(time.time()))
convjson = os.path.join(DEFAULT_FILE_PATH, 'conversion.json')
with open(convjson, 'r+') as f:
conv = json.load(f)
f.close()
conv['basic']['modelName'] = 'conversion_'+ str(filetimestamp)
conv['basic']['modelVersion'] = "1"
conv['advance']['aionConversionUtility']['modelpath'] = modelpath
conv['advance']['aionConversionUtility']['deployedlocation'] = deploypath
conv['advance']['aionConversionUtility']['numberoffeatures'] = Features
temp = {}
temp['inputModelType'] = inputtonnx
temp['outputModelType'] = outputonnx
inputtype = conv['advance']['aionConversionUtility']['inputModelType']
outputtype = conv['advance']['aionConversionUtility']['outputModelType']
for i in list(inputtype.keys()):
conv['advance']['aionConversionUtility']['inputModelType'][i] = 'False'
for i in list(outputtype.keys()):
conv['advance']['aionConversionUtility']['outputModelType'][i] = 'False'
conv['advance']['aionConversionUtility']['inputModelType'][temp['inputModelType'][0].lower() + temp['inputModelType'][1:]] = 'True'
conv['advance']['aionConversionUtility']['outputModelType'][temp['outputModelType'][0].lower() + temp['outputModelType'][1:]] = 'True'
conv = json.dumps(conv)
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'conv' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(conv)
fpWrite.close()
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
try:
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','convertmodel','-c',config_json_filename])
outputStr = outputStr.decode('utf-8')
outputStr= outputStr.replace('\\'','\\"')
#print('ou',outputStr)
outputStr = outputStr.strip()
MLlite = json.loads(outputStr)
logsfile = MLlite['logfiles']
if MLlite['Convert'] == 'Success':
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'convert1':'convert1','convert':'convert','logfile':MLlite['logfiles'],'selected':'mllite','version':AION_VERSION}
else:
logfile = logsfile.replace('\\\\','@')
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'error1':'error1','convert':'convert','logfile':logfile,'selected':'mllite','version':AION_VERSION}
except Exception as e:
print(e)
context = {'modelpath':modelpath,'deploypath':deploypath,'inputtype':modelinput,'outputtype':modeloutput,'Features':Features,'Notconvert':'Notconvert','convert':'convert','version':AION_VERSION}
elif ( outputonnx =="sagemaker") :
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
#print(configFile)
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
configSettingsJson['modelInput'] = request.POST.get('ModelInput')
#print('pushonly:',request.POST.get('sagemaker'))
if request.POST.get('sagemaker') == 'CreateDeploy':
configSettingsJson['sagemakerDeploy'] = 'True'
configSettingsJson['deployExistingModel']['status'] = 'False'
else:
configSettingsJson['sagemakerDeploy'] = 'False'
if request.POST.get('sagemaker') == 'DeployOnly':
configSettingsJson['deployExistingModel']['status'] = 'True'
else:
configSettingsJson['deployExistingModel']['status'] = 'False'
#configSettingsJson['deployExistingModel']['status'] = request.POST.get('Status')
configSettingsJson['deployExistingModel']['dockerImageName'] = request.POST.get('imagename')
configSettingsJson['deployExistingModel']['deployModeluri'] = request.POST.get('deploymodel')
configSettingsJson['modelOutput']['cloudInfrastructure'] = request.POST.get('problemname')
configSettingsJson['endpointName'] = request.POST.get('endpointname')
configSettingsJson['awsSagemaker']['awsID'] = request.POST.get('awskeyid1')
configSettingsJson['awsSagemaker']['accesskeyID'] = request.POST.get('accesskey1')
configSettingsJson['awsSagemaker']['secretAccesskey'] = request.POST.get('secretaccess1')
configSettingsJson['awsSagemaker']['sessionToken'] = request.POST.get('token1')
configSettingsJson['awsSagemaker']['region'] = request.POST.get('region1')
configSettingsJson['awsSagemaker']['IAMSagemakerRoleArn'] = request.POST.get('fullaccess')
conv = json.dumps(configSettingsJson)
'''
filetimestamp = str(int(time.time()))
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'Sagemaker' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(conv)
fpWrite.close()
'''
from bin.aion_sagemaker import aion_sagemaker
aion_sagemaker(configSettingsJson)
#print(conv)
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_sagemaker.py'))
#outputStr = subprocess.check_output([sys.executable, scriptPath, conv])
#outputStr = outputStr.decode('utf-8')
#outputStr=outputStr.strip()
#print('kir',outputStr)
context = {'convert':'convert','sagemaker1':'sagemaker1','mlflow':'mlflow','inputtype':modelinput,'outputtype':modeloutput,'deploy':outputStr,'selected':'mllite','version':AION_VERSION}
else:
context={'exception':'exception','error':'Please Enter Valid Inputs','selected':'mllite','version':AION_VERSION}
except Exception as e:
print(e)
context={'exception':'exception','error':'Error during Conversion','selected':'mllite','version':AION_VERSION}
return render(request, 'ConvertOnnx.html',context)
def sageprediction(request):
#print("=========asdecdefefefefefefefef=======")
values = request.POST['value']
keys = request.POST['keys']
endpoint = request.POST['endpointname']
x = keys.split(",")
y = values.split(",")
dictionary = {key:value for key, value in zip(x,y)}
awsSagemaker={}
awsSagemaker['awsID'] = request.POST['awsaccountid']
awsSagemaker['accesskeyID'] = request.POST['accesskeyid']
awsSagemaker['secretAccesskey'] = request.POST['secretaccesskey']
awsSagemaker['sessionToken'] = request.POST['sessionToken']
awsSagemaker['region'] = request.POST['region']
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
awsSagemaker['IAMSagemakerRoleArn'] = configSettingsJson['awsSagemaker']['IAMSagemakerRoleArn']
configSettingsJson['awsSagemaker'] = awsSagemaker
configSettingsJson['data'] = dictionary
configSettingsJson['endpointName'] = endpoint
configSettingsJson['prediction']['status'] = 'True'
conv = json.dumps(configSettingsJson)
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_sagemaker.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath, conv])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr=outputStr.strip()
output = json.loads(outputStr)
if output['status'] == 'SUCCESS':
outputStr = output['data']
outputStr = pd.json_normalize(outputStr)
outputStr = outputStr.to_html()
else:
outputStr = output['msg']
return HttpResponse(outputStr)
def runtimeutility(request):
if request.method == "POST":
models = request.POST['model']
datafile = request.POST['datafile']
inputDataType = datafile.rsplit('.', 1)[1]
if inputDataType.lower() == 'csv':
values = request.POST['value']
keys = request.POST['keys']
x = keys.split(",")
y = values.split(",")
dictionary = {key:value for key, value in zip(x,y)}
jsondata = json.dumps(dictionary, indent = 4)
#print(jsondata)
config_json_filename = os.path.join(DEFAULT_FILE_PATH, 'runtime.json')
#print(config_json_filename)
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(jsondata)
fpWrite.close()
from conversions.runtime_utility import runTimeTesting
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','conversions', 'runtime_utility.py'))
config_json_file = os.path.join(DEFAULT_FILE_PATH, 'runtime.json')
#outputStr = subprocess.check_output([sys.executable, scriptPath, models, config_json_file])
#outputStr = outputStr.decode('utf-8')
outputStr=runTimeTesting(models,config_json_file)
# context = {'outputStr':outputStr,'modeltype':modeltype}
else:
from conversions.runtime_utility import runTimeTesting
outputStr=runTimeTesting(models,datafile)
return HttpResponse(outputStr)<s> # Generated by Django 3.2.8 on 2023-03-29 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0008_existusecases_publishtask'),
]
operations = [
migrations.RemoveField(
model_name='existusecases',
name='publishtask',
),
migrations.AddField(
model_name='existusecases',
name='publishPID',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='existusecases',
name='Version',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='existusecases',
name='portNo',
field=models.IntegerField(default=0),
),
]
<s> # Generated by Django 3.0.8 on 2020-08-03 12:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='existusecases',
name='ModelName',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modelTraining.usecasedetails'),
),
migrations.AlterField(
model_name='existusecases',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name=' |
usecasedetails',
name='Description',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='usecasedetails',
name='UsecaseName',
field=models.CharField(max_length=50),
),
]
<s> # Generated by Django 3.0.8 on 2020-08-01 17:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Existusecases',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ModelName', models.CharField(max_length=200)),
('Version', models.IntegerField()),
('DataFilePath', models.FileField(upload_to=None)),
('ConfigPath', models.FileField(upload_to=None)),
('DeployPath', models.FileField(upload_to=None)),
('Status', models.CharField(max_length=200)),
],
options={
'db_table': 'Existusecases',
},
),
migrations.CreateModel(
name='usecasedetails',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('UsecaseName', models.CharField(max_length=20)),
('Description', models.CharField(max_length=100)),
],
options={
'db_table': 'usecasedetails',
},
),
]
<s> # Generated by Django 3.2.8 on 2023-03-28 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0006_auto_20230206_1759'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='driftStatus',
field=models.CharField(default='', max_length=20),
),
migrations.AddField(
model_name='existusecases',
name='portNo',
field=models.CharField(default='', max_length=5),
),
migrations.AddField(
model_name='existusecases',
name='publishStatus',
field=models.CharField(default='', max_length=20),
),
migrations.AlterField(
model_name='existusecases',
name='ProblemType',
field=models.CharField(default='', max_length=20),
),
]
<s> # Generated by Django 3.2.8 on 2023-02-06 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0004_existusecases_problemtype'),
]
operations = [
migrations.AddField(
model_name='usecasedetails',
name='UserDefinedName',
field=models.CharField(default=models.CharField(max_length=50), max_length=50),
),
]
<s> # Generated by Django 3.2.8 on 2023-02-06 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0005_usecasedetails_userdefinedname'),
]
operations = [
migrations.RemoveField(
model_name='usecasedetails',
name='UserDefinedName',
),
migrations.AddField(
model_name='usecasedetails',
name='usecaseid',
field=models.CharField(default=models.CharField(max_length=50), max_length=10),
),
]
<s> # Generated by Django 3.2.8 on 2023-03-29 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0009_auto_20230329_0541'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='modelType',
field=models.CharField(default='', max_length=40),
),
]
<s> # Generated by Django 3.2.8 on 2022-10-28 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0003_existusecases_trainouputlocation'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='ProblemType',
field=models.CharField(default='', max_length=100),
),
]
<s><s> # Generated by Django 3.0.8 on 2020-09-18 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0002_auto_20200803_1820'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='TrainOuputLocation',
field=models.CharField(default='', max_length=200),
),
]
<s> # Generated by Django 3.2.8 on 2023-03-28 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0007_auto_20230328_1823'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='publishtask',
field=models.CharField(default='', max_length=500),
),
]
<s> # Generated by Django 4.1.7 on 2023-05-17 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelTraining', '0010_existusecases_modeltype'),
]
operations = [
migrations.AddField(
model_name='existusecases',
name='trainingPID',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='ProblemType',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='TrainOuputLocation',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='driftStatus',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='modelType',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='portNo',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='existusecases',
name='publishPID',
field=models.IntegerField(blank=True, null=True),
),
]
<s> from django.contrib.staticfiles.management.commands.runserver import Command as RunServer
class Command(RunServer):
def check(self, *args, **kwargs):
self.stdout.write(self.style.WARNING("SKIPPING SYSTEM CHECKS!\\n"))
def check_migrations(self, *args, **kwargs):
self.stdout.write(self.style.WARNING("SKIPPING MIGRATION CHECKS!\\n"))<s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s><s> from django import forms
from modelTraining.models import usecasedetails
import os
class usecasedetailsForm(forms.ModelForm):
class Meta:
model = usecasedetails
fields = "__all__"
from modelTraining.models import Existusecases
class ExistusecasesForm(forms.ModelForm):
class Meta:
model = Existusecases
fields = "__all__" <s><s> """
Django settings for mpgWebApp project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from os.path import expanduser
import platform
from appbe.dataPath import DATA_DIR
#from cloghandler import ConcurrentRotatingFileHandler
sql_database_path = os.path.join(DATA_DIR,'sqlite')
if os.path.isdir(sql_database_path) == False:
os.makedirs(sql_database_path)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
DATA_UPLOAD_MAX_MEMORY_SIZE = None
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath()))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y8d*&k0jv4c*zu^ykqz$=yyv@(lcmz495uj^()hthjs=x&&g0y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appfe.modelTraining',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'appfe.ux.error_handler.ErrorHandlerMiddleware'
]
ROOT_URLCONF = 'appfe.ux.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'appfe.ux.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(sql_database_path, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,'static')
<s> from django.http import HttpResponse
from django.conf import settings
import traceback
class ErrorHandlerMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_exception(self, request, exception):
if not settings.DEBUG:
if exception:
# Format your message here
message = "**{url}**\\n\\n{error}\\n\\n````{tb}````".format(
url=request.build_absolute_uri(),
error=repr(exception),
tb=traceback.format_exc()
)
# Do now whatever with this message
# e.g. requests.post(<slack channel/teams channel>, data=message)
return HttpResponse("Error processing the request.", status=500)<s> """mpgWebApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other |
_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include, re_path
from appfe.modelTraining import views
from appfe.modelTraining import upload_views
from appfe.modelTraining import bc_views
from appfe.modelTraining import mltest_views
from appfe.modelTraining import train_views
from appfe.modelTraining import dg_views
from appfe.modelTraining import settings_views
from appfe.modelTraining import drift_views
from appfe.modelTraining import landing_views
from appfe.modelTraining import mllite_views
from appfe.modelTraining import trustedAI_views
from appfe.modelTraining import llm_views
from appfe.modelTraining import visualizer_views as v
from appfe.modelTraining import prediction_views
from django.urls import path, re_path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('appfe.api.urls')),
path('', views.index, name="index"),
re_path('^$',views.index,name='Homepage'),
re_path('prediction', prediction_views.Prediction, name="Prediction"),
path('edit/<int:id>', views.edit),
path('update/<int:id>', views.update),
path('opentraining/<int:id>/<int:currentVersion>',views.opentraining),
path('opentraininglogs/<int:id>/<int:currentVersion>',landing_views.opentraininglogs),
path('show',views.show,name="show"),
path('ucdetails/<int:id>',views.ucdetails,name='ucdetails'),
path('delete/<int:id>', views.destroy,name='DeleteUseCase'),
path('deleteversion/<int:id>',views.remove_version,name='RemoveVersion'),
path('deletes3Bucket/<str:name>', settings_views.removes3bucket,name='removes3bucket'),
path('deleteGcsBucket/<str:name>', settings_views.removegcsbucket,name='removegcsbucket'),
path('deleteAzureBucket/<str:name>', settings_views.removeazurebucket,name='removeazurebucket'),
path('publish/<int:id>',views.publish),
path('createpackagedocker/<int:id>/<int:version>',views.createpackagedocker),
path('stoptraining',train_views.stoptraining),
path('downloadPackage/<int:id>/<int:version>',views.downloadpackage),
re_path('startmodelservice',views.startmodelservice,name="startmodelservice"),
re_path('stopmodelservice',views.stopmodelservice,name="stopmodelservice"),
path('retrain/<int:id>/<int:currentVersion>', landing_views.retrain),
re_path('computetoAWS',settings_views.computetoAWS,name='computeInfrastructure'),
re_path('computetoLLaMMA7b',settings_views.computetoLLaMMA7b,name='computeInfrastructure'),
re_path('computetoGCPLLaMA13B',settings_views.computetoGCPLLaMA13B,name='computeInfrastructure'),
re_path('help',views.help,name = "help"),
re_path('mlac_userguide',views.mlac_userguide,name = "mlac_userguide"),
path('launchmodel/<int:id>/<int:version>', landing_views.launchmodel),
path('modxplain/<int:id>/<int:version>', landing_views.modxplain),
path('moddrift/<int:id>/<int:version>',landing_views.moddrift),
re_path('ConvertOnnx', mllite_views.ConvertOnnx, name="ConvertOnnx"),
re_path('runtimeutility', mllite_views.runtimeutility, name="runtimeutility"),
re_path('sagepredict', mllite_views.sageprediction, name="sageprediction"),
re_path('mlstyles', views.mlstyles, name="mlstyles"),
re_path('mltrain', views.mltrain, name="mltrain"),
re_path('usecasefilter', views.usecasefilter, name="usecasefilter"),
re_path('mlpredict', views.mlpredict, name="mlpredict"),
re_path('getdataclasses',views.getdataclasses,name="getdataclasses"),
re_path('usecases', views.AIusecases, name="AIusecases"),
re_path('modelkafka',views.modelkafka,name="ModelKafka"),
re_path('AionProblem', views.AionProblem, name="AionProblem"),
re_path('UQTesting', mltest_views.UQTesting, name="UQTesting"),
re_path('maaccommand',views.maaccommand,name='MAAC'),
re_path('GCSbucketAdd',settings_views.GCSbucketAdd,name="gcsbucket"),
re_path('adds3bucket',settings_views.adds3bucket,name="adds3bucket"),
re_path('azurestorageAdd',settings_views.azurestorageAdd,name="azurestorageAdd"),
re_path('features', views.features, name="features"),
re_path('downloadedareport',upload_views.downloadedareport,name="downloadedareport"),
re_path('downloadxplainreport',views.downloadxplainreport,name="downloadxplainreport"),
re_path('downlpredictreport',views.downlpredictreport,name="DownloadPrediction"),
re_path('LoadBasicConfiguration',views.LoadBasicConfiguration,name='LoadBasicConfiguration'),
re_path('LoadAdvanceConfiguration',views.LoadAdvanceConfiguration,name='LoadAdvanceConfiguration'),
re_path('uploaddatafromscript',upload_views.uploaddatafromscript,name='uploaddatafromscript'),
re_path('features', views.features, name="features"),
re_path('uploadDatafromSatandardDataset',upload_views.uploadDatafromSatandardDataset,name="uploadDatafromSatandardDataset"),
re_path('uploadDatafromunsupervisedmodel',views.uploadDatafromunsupervisedmodel,name="uploadDatafromunsupervisedmodel"),
re_path('mltesting',mltest_views.mltesting,name='mltesting'),
re_path('mllite',views.mllite,name="MLLite"),
re_path('settings',settings_views.settings_page,name="settings"),
re_path('openneural',mllite_views.openneural,name="openneural"),
re_path('Tfliteneural',mllite_views.Tfliteneural,name="Tfliteneural"),
re_path('encryptedpackage',views.encryptedpackage,name='encryptedpackage'),
re_path('ABtesting', mltest_views.ABtest, name="ABtesting"),
re_path('uploadedData', upload_views.uploadedData, name='uploadedData'),
# Text Data Labelling using LLM related changes
# --------------------------------------------------------
re_path('uploadedTextData', llm_views.uploadedTextData, name='uploadedTextData'),
re_path('getTextLabel', llm_views.getTextLabel, name='getTextLabel'),
re_path('downloadTextLabelReport',llm_views.downloadTextLabelReport,name="downloadTopicReport"),
# --------------------------------------------------------
# QnA Generator using LLM related changes
# --------------------------------------------------------
re_path('genearateQA', llm_views.genearateQA, name='genearateQA'),
re_path('downloadQnAReport',llm_views.downloadQnAReport,name="downloadQnAReport"),
# --------------------------------------------------------
re_path('advanceconfig', bc_views.savebasicconfig, name='Advance'),
re_path('edaReport',upload_views.EDAReport,name='edareport'),
re_path('readlogfile',views.readlogfile,name="readlogfile"),
re_path('flcommand',views.flcommand,name="flcommand"),
re_path('openmlflow',views.mlflowtracking,name="MLflow"),
re_path('basicconfig',bc_views.basicconfig,name='basicConfig'),
re_path('Advance',views.Advance,name='Advance'),
re_path('uploaddata', views.uploaddata, name='uploaddata'),
re_path('dataupload', views.Dataupload, name='dataupload'),
re_path('trainmodel', train_views.trainmodel, name='next'),
#Sagemaker
re_path('Sagemaker',mllite_views.Sagemaker,name="Sagemaker"),
re_path('batchlearning',views.batchlearning,name="batchlearning"),
# EDA Reports changes
re_path('gotoreport', views.gotoreport, name='report'),
re_path('llmmodelevaluate',train_views.llmmodelevaluate, name='llmmodelevaluate'),
# EDA Visualization changes
re_path('getgraph',views.getgraph,name="getgraph"),
# Fairness Metrics changes
re_path('getmetrics',views.getmetrics,name="getmetrics"),
re_path('getDeepDiveData',views.getDeepDiveData,name="getDeepDiveData"),
# 12686:Data Distribution related Changes
re_path('getDataDistribution',views.getDataDistribution,name="getDataDistribution"),
re_path('licensekey',views.licensekey,name="licensekey"),
# -------------------------------- Graviton-Integration Changes S T A R T --------------------------------
re_path('getuserdata',views.getuserdata,name="getuserdata"),
re_path('getdataservice',views.getdataservice,name="getdataservice"),
# ------------------------------------------------ E N D -------------------------------------------------
re_path('getdataimbalance',views.getdataimbalance,name="getdataimbalance"),
re_path('trainresult',train_views.trainresult,name='trainresult'),
re_path('LoadDataForSingleInstance',views.LoadDataForSingleInstance,name='LoadDataForSingleInstance'),
re_path('PredictForSingleInstance',prediction_views.PredictForSingleInstance,name='PredictForSingleInstance'),
re_path('stateTransitionSettings',views.stateTransitionSettings,name='stateTransitionSettings'),
re_path('instancepredict',views.instancepredict,name='predict'),
re_path('onnxruntime',views.onnxruntime,name='onnxruntime'),
re_path('home',views.Dataupload,name='manage'),
re_path('show',views.show,name="show"),
re_path('delete',views.show,name="delete"),
re_path('inputdrift', landing_views.inputdrift, name='inputdrift'),
re_path('dotextSummarization',views.dotextSummarization,name='textSummarization'),
re_path('outputdrift', views.outputdrift, name='outputdrift'),
re_path('xplain', v.xplain, name='xplain'),
re_path('sensitivity', trustedAI_views.sensitivityAnalysis, name='sensitivity'),
re_path('fairnesmetrics', trustedAI_views.fairnesmetrics, name='fairnesmetrics'),
re_path('handlefairness', trustedAI_views.handlefairness, name='handlefairness'),
re_path('performance', trustedAI_views.performance_metrics, name='performance'),
re_path('uquncertainty', trustedAI_views.uquncertainty, name='uquncertainty'),
re_path('uqtransparency', trustedAI_views.uqtransparency, name='uqtransparency'),
re_path('RLpath',views.RLpath,name='RLpath'),
path('opendetailedlogs/<int:id>/<int:currentVersion>', views.opendetailedlogs, name='logfile'),
path('downloadlogfile/<int:id>/<int:currentVersion>',views.downloadlogfile),
path('openmodelevaluation/<int:id>',views.openmodelevaluation,name='openmodelevaluation'),
re_path('startPublishServices',settings_views.startPublishServices,name="PublishService"),
re_path('startKafka',settings_views.startKafka,name='startKafka'),
re_path('startService',views.startService,name='startService'),
re_path('startTracking',views.startTracking,name="startTracking"),
re_path('Drift', drift_views.Drift, name='Drift'),
re_path('Distribution', drift_views.Distribution, name='Distribution'),
re_path('newfile', views.newfile, name='newfile'),
re_path('Evaluate', drift_views.Evaluate, name='Evaluate'),
re_path('qlearning',views.qlearning,name='qlearning'),
re_path('listfiles',upload_views.listfiles,name='listfiles'),
#url('actionavalanche',views.actionavalanche,name='actionavalanche'),
re_path('sqlAlchemy',upload_views.sqlAlchemy,name='sqlAlchemy'),
re_path('submitquery',upload_views.submitquery,name='submitquery'),
re_path('validatecsv',upload_views.validatecsv,name='validatecsv'),
path('ObjLabelAdd/<int:id>',views.ObjLabelAdd),
path('objectlabel/<int:id>',views.objectlabel),
path('imagelabel/<int:id>',views.imagelabel),
path('ObjLabelRemove/<int:id>',views.ObjLabelRemove),
re_path('objectlabelling',views.objectlabelling,name='objectlabelling'),
re_path('imagelabelling',views.imagelabelling,name='imagelabelling'),
re_path('ObjLabelDiscard',views.ObjLabelDiscard,name='ObjLabelDiscard'),
re_path('ObjLabelNext',views.ObjLabelNext,name='ObjLabelNext'),
re_path('ObjLabelPrev',views.ObjLabelPrev,name="ObjLabelPrev"),
re_path('saveaionconfig',settings_views.saveaionconfig,name='saveaionconfig'),
re_path('savegravitonconfig',settings_views.savegravitonconfig,name='savegravitonconfig'),
re_path('saveopenaiconfig',settings_views.saveopenaiconfig,name="saveopenaiconfig"),
re_path('getvalidateddata',views.getvalidateddata,name="getvalidateddata"),
re_path('updateawsconfig',settings_views.amazonec2settings,name="amazonec2settings"),
re_path('updategcpconfig',settings_views.gcpcomputesettings,name="gcpcomputesettings"),
re_path('localsetings',views.localsetings,name="localsetings"),
re_path('ImgLabelNext',views.ImgLabelNext,name='ImgLabelNext'),
re_path('objectlabeldone',views.objectlabeldone,name='ObjectLabeling'),
re_path(r'^get_table_list', upload_views.get_table_list, name='get_table_list'),
re_path(r'^getdatasetname', views.getdatasetname, name='getdatasetname'),
re_path(r' |
^get_tables_fields_list', upload_views.get_tables_fields_list, name='get_tables_fields_list'),
re_path(r'^validate_query', upload_views.validate_query, name='validate_query'),
re_path(r'^trigger_DAG', views.trigger_DAG, name = 'trigger_DAG'),
# The home page
path('dataoperations', views.dataoperations, name='dataoperations'),
path('summarization',views.summarization,name='summarization'),
path('datalabel', views.datalabel, name='datalabel'),
path('upload_and_read_file_data', views.upload_and_read_file_data, name='upload_and_read_file_data'),
path('apply_rule', views.apply_rule, name='apply_rule'),
path('apply_rule_ver2', views.apply_rule_ver2, name='apply_rule_ver2'),
path('download_result_dataset', views.download_result_dataset, name='download_result_dataset'),
path('get_sample_result_of_individual_rule', views.get_sample_result_of_individual_rule,
name='get_sample_result_of_individual_rule'),
path('get_sample_result_of_individual_rule_ver2', views.get_sample_result_of_individual_rule_ver2,
name='get_sample_result_of_individual_rule_ver2'),
path('upload_and_read_test_data', views.upload_and_read_test_data, name='upload_and_read_test_data'),
path('get_label_and_weightage', views.get_label_and_weightage, name='get_label_and_weightage'),
path('datagenrate', dg_views.datagenrate, name='datagenrate'),
path('generateconfig', dg_views.generateconfig, name='generateconfig'),
path('StationarySeasonalityTest', views.StationarySeasonalityTest, name='StationarySeasonalityTest'),
path('modelcompare', views.modelcompare, name='modelcompare'),
path('textsummarization', views.textsummarization, name='textsummarization'),
path('azureOpenAiDavinci', llm_views.azureOpenAiDavinci, name='azureOpenAiDavinci'),
path('azureOpenAiDavinciSumarization', llm_views.azureOpenAiDavinciSumarization, name='azureOpenAiDavinciSumarization'),
# LLM Testing
path('llmtesting', views.llmtesting, name='llmtesting'),
path('llmtestingresult', views.llmtestingresult, name='llmtestingresult'),
re_path('llmtestreport',views.llmtestreport,name="llmtestreport"),
# Code Clone Detection
path('codeclonedetectionresult', views.codeclonedetectionresult, name='codeclonedetectionresult'),
re_path('codeclonereport',views.codeclonereport,name="codeclonereport"),
re_path('evaluateprompt',views.evaluatepromptmetrics,name="evaluatepromptmetrics"),
path('libraries', views.libraries, name='libraries'), #To display libraries
]
#df=pd.read_csv("C:\\Project\\Analytics\\Deployment\\germancredit_9\\germancreditdata.csv")
#
#bool_cols = [col for col in df if np.isin(df[col].dropna().unique(), [0, 1]).all()]
#
#bool_cols
<s><s> """
WSGI config for ux project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ux.settings')
application = get_wsgi_application()
<s> """
ASGI config for ux project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ux.settings')
application = get_asgi_application()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#from sklearn.externals import joblib
import joblib
# import pyreadstat
# import sys
# import math
import time
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
import argparse
import json
import os
import pathlib
from tensorflow.keras.models import load_model
# from tensorflow.keras import backend as K
import tensorflow as tf
# from sklearn.decomposition import LatentDirichletAllocation
from pathlib import Path
#from aionUQ import aionUQ
from uq_main import aionUQ
import os
from datetime import datetime
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument('savFile')
parser.add_argument('csvFile')
parser.add_argument('features')
parser.add_argument('target')
args = parser.parse_args()
from appbe.dataPath import DEPLOY_LOCATION
if ',' in args.features:
args.features = [x.strip() for x in args.features.split(',')]
else:
args.features = args.features.split(",")
models = args.savFile
if Path(models).is_file():
# if Path(args.savFile.is_file()):
model = joblib.load(args.savFile)
# print(model.__class__.__name__)
# print('class:',model.__class__)
# print(type(model).__name__)
# try:
# print('Classess=',model.classes_)
# except:
# print("Classess=N/A")
# print('params:',model.get_params())
# try:
# print('fea_imp =',model.feature_importances_)
# except:
# print("fea_imp =N/A")
ProblemName = model.__class__.__name__
Params = model.get_params()
# print("ProblemName: \\n",ProblemName)
# print("Params: \\n",Params)
# print('ProblemName:',model.__doc__)
# print(type(ProblemName))
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']:
Problemtype = 'Classification'
else :
Problemtype = 'Regression'
if Problemtype == 'Classification':
df = pd.read_csv(args.csvFile)
object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object']
df = df.drop(object_cols, axis=1)
df = df.dropna(axis=1)
df = df.reset_index(drop=True)
modelfeatures = args.features
# dfp = df[modelfeatures]
tar = args.target
# target = df[tar]
y=df[tar]
X = df.drop(tar, axis=1)
#for dummy test,train values pass
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar)
#accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,"uqtest")
accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification()
# print("UQ Classification: \\n",output_jsonobject)
print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per)
print("End of UQ Classification.\\n")
else:
df = pd.read_csv(args.csvFile)
modelfeatures = args.features
# print("modelfeatures: \\n",modelfeatures)
# print("type modelfeatures: \\n",type(modelfeatures))
dfp = df[modelfeatures]
tar = args.target
target = df[tar]
#Not used, just dummy X,y split
y=df[tar]
X = df.drop(tar, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression()
print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject)
print("End of UQ reg\\n")
elif Path(models).is_dir():
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
model = load_model(models)
ProblemName = model.__class__.__name__
Problemtype = 'Classification'
# print('class:',model.__class__)
# print('class1',model.__class__.__name__)
# print(model.summary())
# print('ProblemName1:',model.get_config())
def Params(model: tf.keras.Model):
Params = []
model.Params(print_fn=lambda x: Params.append(x))
return '\\n'.join(Params)
df = pd.read_csv(args.csvFile)
modelfeatures = args.features
dfp = df[modelfeatures]
tar = args.target
target = df[tar]
df3 = dfp.astype(np.float32)
predic = model.predict(df3)
if predic.shape[-1] > 1:
predic = np.argmax(predic, axis=-1)
else:
predic = (predic > 0.5).astype("int32")
matrixconfusion = pd.DataFrame(confusion_matrix(predic,target))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose()
classificationreport = round(classificationreport,2)
classificationreport = classificationreport.to_json(orient='index')
output = {}
output["Precision"] = "%.3f" % precision_score(target, predic,average='weighted')
output["Recall"] = "%.3f" % recall_score(target, predic,average='weighted')
output["Accuracy"] = "%.3f" % accuracy_score(target, predic)
output["ProblemName"] = ProblemName
output["Params"] = Params
output["Problemtype"] = Problemtype
output["Confusionmatrix"] = matrixconfusion
output["classificationreport"] = classificationreport
print(json.dumps(output))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from matplotlib import pyplot
import sys
import os
import json
import matplotlib.pyplot as plt
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
from uq360.algorithms.ucc_recalibration import UCCRecalibration
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
from uq360.metrics.regression_metrics import compute_regression_metrics
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
# from math import sqrt
from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error
# from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature
from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by |
_feature
#Added libs from MLTest
import sys
import time
from sklearn.metrics import confusion_matrix
from pathlib import Path
import logging
# import json
class aionUQ:
# def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model):
def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation):
# #printprint("Inside aionUQ \\n")
try:
#print("Inside aionUQ init\\n ")
self.data=df
self.dfFeatures=dfp
self.uqconfig_base=Params
self.uqconfig_meta=Params
self.targetFeature=targetfeature
self.target=target
self.selectedfeature=modelfeatures
self.y=self.target
self.X=self.dfFeatures
self.log = logging.getLogger('eion')
self.basemodel=model
self.model_name=ProblemName
self.Deployment = os.path.join(deployLocation,'log','UQ')
os.makedirs(self.Deployment,exist_ok=True)
self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph')
os.makedirs(self.uqgraphlocation,exist_ok=True)
except Exception as e:
self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus):
from sklearn.model_selection import train_test_split
# To get each class values and uncertainty
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest
# y_val = y_train.append(y_test)
else:
# y_val = self.y
df=self.data
y=df[self.targetFeature]
X = df.drop(self.targetFeature, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# from sklearn.tree import DecisionTreeRegressor
# from sklearn.linear_model import LinearRegression,Lasso,Ridge
# from sklearn import linear_model
# from sklearn.ensemble import RandomForestRegressor
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
y_hat_total_mean=np.mean(y_hat)
y_hat_lb_total_mean=np.mean(y_hat_lb)
y_hat_ub_total_mean=np.mean(y_hat_ub)
mpiw_20_per=(y_hat_total_mean*20/100)
mpiw_lower_range = y_hat_total_mean - mpiw_20_per
mpiw_upper_range = y_hat_total_mean + mpiw_20_per
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw))
self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range))
self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range))
self.log.info('Model total picp_percentage : '+str(picp_percentage))
return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range
def display_results(self,X_test, y_test, y_mean, y_lower, y_upper):
try:
global x_feature,y_feature
if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)):
x_feature=''.join(map(str, self.selectedfeature))
else:
x_feature= str(self.selectedfeature)
# self.selectedfeature=str(self.selectedfeature)
X_test=np.squeeze(X_test)
y_feature=str(self.targetFeature)
pred_dict = {x_feature: X_test,
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
pred_df_sorted = pred_df.sort_values(by=x_feature)
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound')
plt.legend()
plt.xlabel(x_feature)
plt.ylabel(y_feature)
plt.title('UQ Confidence Interval Plot.')
# plt.savefig('uq_test_plt.png')
if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'):
os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png')
plt.savefig(str(self.Deployment)+'/uq_test_plt.png')
plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png')
plt.clf()
plt.cla()
plt.close()
pltreg=plot_picp_by_feature(X_test, y_test,
y_lower, y_upper,
xlabel=x_feature)
#pltreg.savefig('x.png')
pltr=pltreg.figure
if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'):
os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png')
pltr.savefig(str(self.Deployment)+'/picp_per_feature.png')
pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
except Exception as e:
# #print("display exception: \\n",e)
self.log.info('<!------------- UQ model Display Error ---------------> '+str(e))
def classUncertainty(self,pred,score):
try:
outuq = {}
classes = np.unique(pred)
for c in classes:
ids = pred == c
class_score = score[ids]
predc = 'Class_'+str(c)
outuq[predc]=np.mean(class_score)
x = np.mean(class_score)
#Uncertaininty in percentage
x=x*100
self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x)))
return outuq
except Exception as e:
# #print("display exception: \\n",e)
self.log.info('<!------------- UQ classUncertainty Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus):
try:
# print("Inside uqMain_BBMClassification\\n")
# print("lenth of x_train {}, x_test {}, y_train {}, y_test {}".format(x_train, x_test, y_train, y_test))
aionstatus = str(aionstatus)
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
else:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification
from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.neighbors import KNeighborsClassifier
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
#print(model_name)
try:
#geting used features
model_used_features=self.basemodel.feature_names_in_
self.log.info("Base model used training features are (UQ Testing): \\n"+str(model_used_features))
except:
pass
model_params=self.basemodel.get_params()
uq_scoring_param='accuracy'
basemodel=None
if (model_name == "GradientBoostingClassifier"):
basemodel=GradientBoostingClassifier
elif (model_name == "SGDClassifier"):
basemodel=SGDClassifier
elif (model_name == "GaussianNB"):
basemodel=GaussianNB
elif (model_name == "DecisionTreeClassifier"):
basemodel=DecisionTreeClassifier
elif(model_name == "RandomForestClassifier"):
basemodel=RandomForestClassifier
elif (model_name == "SVC"):
basemodel=SVC
elif(model_name == "KNeighborsClassifier"):
basemodel=KNeighborsClassifier
elif(model_name.lower() == "logisticregression"):
basemodel=LogisticRegression
elif(model_name == "XGBClassifier"):
basemodel=XGBClassifier
elif(model_name == "LGBMClassifier"):
basemodel=LGBMClassifier
else:
basemodel=LogisticRegression
calibrated_md |
l=None
if (model_name == "SVC"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SVC(**model_params)
calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base = calibrated_mdl.predict_proba(X_test)[:, :]
elif (model_name == "SGDClassifier"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SGDClassifier(**model_params)
calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base = calibrated_mdl.predict_proba(X_test)[:, :]
else:
from sklearn.calibration import CalibratedClassifierCV
base_mdl = basemodel(**model_params)
calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3)
basemodelfit = calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base=calibrated_mdl.predict_proba(X_test)[:, :]
cal_model_params=calibrated_mdl.get_params()
acc_score_base=accuracy_score(y_test, basepredict)
base_estimator_calibrate = cal_model_params['base_estimator']
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,
base_config=model_params, meta_config=model_params)
try:
X_train=X_train[model_used_features]
X_test=X_test[model_used_features]
except:
pass
uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train))
# uqmodel_fit = uq_model.fit(X_train, y_train)
y_t_pred, y_t_score = uq_model.predict(X_test)
acc_score=accuracy_score(y_test, y_t_pred)
test_accuracy_perc=round(100*acc_score)
if(aionstatus == "aionuq"):
test_accuracy_perc=round(test_accuracy_perc,2)
#uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0.
uq_aurrrc=0
pass
else:
bbm_c_plot = plot_risk_vs_rejection_rate(
y_true=y_test,
y_prob=predprob_base,
selection_scores=y_t_score,
y_pred=y_t_pred,
plot_label=['UQ_risk_vs_rejection'],
risk_func=accuracy_score,
num_bins = 10 )
# This done by kiran, need to uncomment for GUI integration.
# bbm_c_plot_sub = bbm_c_plot[4]
bbm_c_plot_sub = bbm_c_plot
if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'):
os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png')
# bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png')
re_plot=plot_reliability_diagram(y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
plot_label=['UQModel reliability_diagram'],
num_bins=10 )
# This done by kiran, need to uncomment for GUI integration.
# re_plot_sub = re_plot[4]
re_plot_sub = re_plot
if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'):
os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png')
# re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png')
uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
selection_scores=y_t_score,
attributes=None,
risk_func=accuracy_score,subgroup_ids=None, return_counts=False,
num_bins=10)
uq_aurrrc=uq_aurrrc
test_accuracy_perc=round(test_accuracy_perc)
#metric_all=compute_classification_metrics(y_test, y_prob, option='all')
metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy')
#expected_calibration_error
uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False)
# uq_aurrrc=uq_aurrrc
confidence_score=acc_score_base-uq_ece
ece_confidence_score=round(confidence_score,2)
# Model uncertainty using ECE score
# model_uncertainty_ece = 1-ece_confidence_score
#Uncertainty Using model inherent predict probability
mean_predprob_total=np.mean(y_t_score)
model_confidence=mean_predprob_total
model_uncertainty = 1-mean_predprob_total
model_confidence = round(model_confidence,2)
# To get each class values and uncertainty
if (aionstatus.lower() == 'aionuq'):
y_val = np.append(y_train,y_test)
else:
y_val = self.y
self.log.info('------------------> Model Confidence Score '+str(model_confidence))
outuq = self.classUncertainty(y_t_pred,y_t_score)
# Another way to get conf score
model_uncertainty_per=round((model_uncertainty*100),2)
model_confidence_per=round((model_confidence*100),2)
acc_score_per = round((acc_score*100),2)
uq_ece_per=round((uq_ece*100),2)
output={}
recommendation = ""
if (uq_ece > 0.5):
# RED text
recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).'
else:
# self.log.info('Model has good ECE score and accuracy, ready to deploy.\\n.')
if (uq_ece <= 0.1 and model_confidence >= 0.9):
# Green Text
recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. '
else:
# Orange
recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. '
#Adding each class uncertainty value
classoutput = {}
for k,v in outuq.items():
classoutput[k]=(str(round((v*100),2)))
output['classes'] = classoutput
output['ModelConfidenceScore']=(str(model_confidence_per))
output['ExpectedCalibrationError']=str(uq_ece_per)
output['ModelUncertainty']=str(model_uncertainty_per)
output['Recommendation']=recommendation
# output['user_msg']='Please check the plot for more understanding of model uncertainty'
#output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4)
output['Accuracy']=str(acc_score_per)
output['Problem']= 'Classification'
#self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %'))
# #print("Prediction mean for the given model:",np.mean(y_hat),"\\n")
#self.log.info(recommendation)
#self.log.info("Model_confidence_score: " +str(confidence_score))
#self.log.info("Model_uncertainty: " +str(round(model_uncertainty,2)))
#self.log.info('Please check the plot for more understanding of model uncertainty.\\n.')
uq_jsonobject = json.dumps(output)
with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f:
json.dump(output, f)
return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per
except Exception as inst:
self.log.info('\\n < ---------- UQ Model Execution Failed Start--------->')
self.log.info('\\n<------Model Execution failed!!!.' + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
def aion_confidence_plot(self,df):
df=df
df = df.sort_values(by=self.selectedfeature)
best_values=df.Best_values.to_list()
best_upper=df.Best__upper.to_list()
best_lower=df.Best__lower.to_list()
Total_Upper_PI=df.Total_Upper_PI.to_list()
Total_Low_PI=df.Total_Low_PI.to_list()
Obseved = df.Observed.to_list()
plt.plot(df[x_feature], df['Observed'], 'o', label='Observed')
plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey')
plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey')
plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red')
plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5)
plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5)
plt.legend()
plt.xlabel(self.selectedfeature)
plt.ylabel(self.targetFeature)
plt.title('UQ Best & Good Area Plot')
if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'):
os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png')
plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png')
plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png')
def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus):
aionstatus = str(aionstatus)
# if (aionstatus.lower() == 'aionuq'):
# X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
# else:
# X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, |
test_size=0.3, random_state=0)
# modelName = ""
self.log.info('<!------------- Inside BlackBox MetaModel Regression process. ---------------> ')
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
# #print("model_params['criterion']: \\n",model_params['criterion'])
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# modelname='sklearn.linear_model'+'.'+model_name
# X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest
#Geeting trained model name and to use the model in BlackboxMetamodelRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.ensemble import RandomForestRegressor
if (model_name == "DecisionTreeRegressor"):
basemodel=DecisionTreeRegressor
elif (model_name == "LinearRegression"):
basemodel=LinearRegression
elif (model_name == "Lasso"):
basemodel=Lasso
elif (model_name == "Ridge"):
basemodel=Ridge
elif(model_name == "RandomForestRegressor"):
basemodel=RandomForestRegressor
else:
basemodel=LinearRegression
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
else:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus)
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
# #print("X_train.shape: \\n",X_train.shape)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
self.log.info('<!------------- observed_picp: ---------------> '+str(observed_alphas_picp))
self.log.info('<!------------- observed_widths_mpiw: ---------------> '+str(observed_widths_mpiw))
# UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2”
#metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss.
metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None)
metric_used=''
for k,v in metric_all.items():
metric_used=str(round(v,2))
self.log.info('<!------------- Metric used for regression UQ: ---------------> '+str(metric_all))
# Determine the confidence level and recommentation to the tester
# test_data=y_test
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
#Calculate total uncertainty for all features
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data)
# df1=self.data
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
recommendation=""
output={}
if (observed_alphas_picp >= 0.95 and total_picp >= 0.75):
# Add GREEN text
self.log.info('Model has good confidence for the selected feature, ready to deploy.\\n.')
recommendation = "Model has good confidence score, ready to deploy."
elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)):
# Orange
recommendation = "Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."
self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .')
else:
# RED text
recommendation = "Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."
self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .')
#Build uq json info dict
output['ModelConfidenceScore']=(str(total_picp_percentage)+'%')
output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%')
output['SelectedFeatureConfidence']=(str(picp_percentage)+'%')
output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%')
output['PredictionIntervalCoverageProbability']=observed_alphas_picp
output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw)
output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range)))
output['Recommendation']=str(recommendation)
output['Metric']=uq_scoring_param
output['Score']=metric_used
output['Problemtype']= 'Regression'
self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %'))
self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %'))
#self.log.info('Please check the plot for more understanding of model uncertainty.\\n.')
#self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub)
uq_jsonobject = json.dumps(output)
with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f:
json.dump(output, f)
#To get best and medium UQ range of values from total predict interval
y_hat_m=y_hat.tolist()
y_hat_lb=y_hat_lb.tolist()
upper_bound=y_hat_ub.tolist()
y_hat_ub=y_hat_ub.tolist()
for x in y_hat_lb:
y_hat_ub.append(x)
total_pi=y_hat_ub
medium_UQ_range = y_hat_ub
best_UQ_range= y_hat.tolist()
ymean_upper=[]
ymean_lower=[]
y_hat_m=y_hat.tolist()
for i in y_hat_m:
y_hat_m_range= (i*20/100)
x=i+y_hat_m_range
y=i-y_hat_m_range
ymean_upper.append(x)
ymean_lower.append(y)
min_best_uq_dist=round(min(best_UQ_range))
max_best_uq_dist=round(max(best_UQ_range))
# initializing ranges
list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi))
list_best = y_hat_m
X_test = np.squeeze(X_test)
'''
uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m,
'Best__upper':ymean_upper,
'Best__lower':ymean_lower,
'Total_Low_PI': y_hat_lb,
'Total_Upper_PI': upper_bound,
}
print(uq_dict)
uq_pred_df = pd.DataFrame(data=uq_dict)
uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values')
uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False)
csv_path=str(self.Deployment)+"/uq_pred_df.csv"
df=pd.read_csv(csv_path)
self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.')
#Callconfidence olot fn only for UQTest interface
if (aionstatus.lower() == 'aionuq'):
#No need to showcase confidence plot for aion main
pass
else:
self.aion_confidence_plot(df)
'''
return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject)
except Exception as inst:
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from matplotlib import pyplot
import sys
import os
import matplotlib.pyplot as plt
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
from uq360.metrics.regression_metrics import compute_regression_metrics
import numpy as np
from sklearn |
.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error
from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature
import sys
import time
from sklearn.metrics import confusion_matrix
from pathlib import Path
import logging
import logging.config
from os.path import expanduser
import platform
from sklearn.utils import shuffle
class aionUQ:
# def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model):
def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature):
try:
self.data=df
self.dfFeatures=dfp
self.uqconfig_base=Params
self.uqconfig_meta=Params
self.targetFeature=targetfeature
self.log = logging.getLogger('aionUQ')
self.target=target
self.selectedfeature=modelfeatures
self.y=self.target
self.X=self.dfFeatures
from appbe.dataPath import DEPLOY_LOCATION
self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time()))))
os.makedirs(self.Deployment,exist_ok=True)
self.basemodel=model
self.model_name=ProblemName
# self.X, self.y = shuffle(self.X, self.y)
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0)
self.xtrain = X_train
self.xtest = X_test
self.ytrain = y_train
self.ytest = y_test
# self.deployLocation=deployLocation
except Exception as e:
# self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
# self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def totalUncertainty(self,df,basemodel,model_params):
try:
# from sklearn.model_selection import train_test_split
# df=self.data
# y=df[self.targetFeature]
# X = df.drop(self.targetFeature, axis=1)
if (isinstance(self.selectedfeature,list)):
selectedfeature=[self.selectedfeature[0]]
selectedfeature=' '.join(map(str,selectedfeature))
if (isinstance(self.targetFeature,list)):
targetFeature=[self.targetFeature[0]]
targetFeature=' '.join(map(str,targetFeature))
X = self.data[selectedfeature]
y = self.data[targetFeature]
X = X.values.reshape((-1,1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# from sklearn.tree import DecisionTreeRegressor
# from sklearn.linear_model import LinearRegression,Lasso,Ridge
# from sklearn import linear_model
# from sklearn.ensemble import RandomForestRegressor
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
y_hat_total_mean=np.mean(y_hat)
y_hat_lb_total_mean=np.mean(y_hat_lb)
y_hat_ub_total_mean=np.mean(y_hat_ub)
mpiw_20_per=(y_hat_total_mean*20/100)
mpiw_lower_range = y_hat_total_mean - mpiw_20_per
mpiw_upper_range = y_hat_total_mean + mpiw_20_per
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
# self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw))
# self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range))
# self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range))
# self.log.info('Model total picp_percentage : '+str(picp_percentage))
except Exception as e:
print("totalUncertainty fn error: \\n",e)
return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range
def display_results(self,X_test, y_test, y_mean, y_lower, y_upper):
try:
global x_feature,y_feature
if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)):
x_feature=','.join(map(str, self.selectedfeature))
else:
x_feature= str(self.selectedfeature)
# self.selectedfeature=str(self.selectedfeature)
X_test=np.squeeze(X_test)
y_feature=str(self.targetFeature)
pred_dict = {x_feature: X_test,
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
x_feature1 = x_feature.split(',')
pred_df_sorted = pred_df.sort_values(by=x_feature1)
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y'], 'o', label='Observed')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound')
plt.legend()
plt.xlabel(x_feature1[0])
plt.ylabel(y_feature)
plt.title('UQ Confidence Interval Plot.')
# plt.savefig('uq_test_plt.png')
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png')
'''
plt.savefig(str(self.Deployment)+'/uq_test_plt.png')
#plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png')
confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
pltreg=plot_picp_by_feature(X_test, y_test,
y_lower, y_upper,
xlabel=x_feature)
#pltreg.savefig('x.png')
pltr=pltreg.figure
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png')
'''
pltr.savefig(str(self.Deployment)+'/picp_per_feature.png')
picpPlot = os.path.join(self.Deployment,'picp_per_feature.png')
#pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
except Exception as e:
print("display exception: \\n",e)
# self.log.info('<!------------- UQ model Display Error ---------------> '+str(e))
return confidencePlot,picpPlot
def classUncertainty(self,predprob_base):
# from collections import Counter
predc="Class_"
classes = np.unique(self.y)
total = len(self.y)
list_predprob=[]
counter = Counter(self.y)
#for loop for test class purpose
for k,v in counter.items():
n_samples = len(self.y[self.y==k])
per = ((v/total) * 100)
prob_c=predprob_base[:,int(k)]
list_predprob.append(prob_c)
# #print("Class_{} : {}/{} percentage={}% \\n".format(k,n_samples,total,per ))
outuq={}
for k in classes:
predc += str(k)
mean_predprob_class=np.mean(list_predprob[int(k)])
uncertainty=1-mean_predprob_class
predc+='_Uncertainty'
outuq[predc]=uncertainty
predc="Class_"
return outuq
def uqMain_BBMClassification(self):
# self.log.info('<!------------- Inside BlackBox MetaModel Classification process. ---------------> ')
# import matplotlib.pyplot as plt
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification
except:
##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification.
from uq360.algorithms.blackbox_metamodel import MetamodelClassification
# from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
# from sklearn import datasets
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import accuracy_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# from sklearn.linear_model import LogisticRegression
# import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
try:
#geting used features
model_used_features=self.basemodel.feature_names_in_
except:
pass
X_train, X_test, y_train, y_test = self.xt |
rain,self.xtest,self.ytrain,self.ytest
uq_scoring_param='accuracy'
basemodel=None
if (model_name == "GradientBoostingClassifier"):
basemodel=GradientBoostingClassifier
elif (model_name == "SGDClassifier"):
basemodel=SGDClassifier
elif (model_name == "GaussianNB"):
basemodel=GaussianNB
elif (model_name == "DecisionTreeClassifier"):
basemodel=DecisionTreeClassifier
elif(model_name == "RandomForestClassifier"):
basemodel=RandomForestClassifier
elif (model_name == "SVC"):
basemodel=SVC
elif(model_name == "KNeighborsClassifier"):
basemodel=KNeighborsClassifier
elif(model_name == "LogisticRegression"):
basemodel=LogisticRegression
else:
basemodel=LogisticRegression
try:
try:
##Removed meta_config because leave meta model config as default ml model params
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params)
except:
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params)
except:
##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification
try:
##Removed meta_config because leave meta model config as default ml model params
uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params)
except:
uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
try:
X_train=X_train[model_used_features]
X_test=X_test[model_used_features]
except:
pass
uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train))
# uqmodel_fit = uq_model.fit(X_train, y_train)
#Test data pred, score
y_t_pred, y_t_score = uq_model.predict(X_test)
#predict probability
# uq_pred_prob=uq_model.predict_proba(X_test)
# predprob_base=basemodel.predict_proba(X_test)[:, :]
#if (model_name == "SVC" or model_name == "SGDClassifier"):
# if model_name in ['SVC','SGDClassifier']:
if (model_name == "SVC"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SVC(**model_params)
calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_svc.fit(X_train, y_train)
basepredict = basemodel.predict(X_test)
predprob_base = calibrated_svc.predict_proba(X_test)[:, :]
elif (model_name == "SGDClassifier"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SGDClassifier(**model_params)
calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_svc.fit(X_train, y_train)
basepredict = basemodel.predict(X_test)
predprob_base = calibrated_svc.predict_proba(X_test)[:, :]
else:
base_mdl = basemodel(**model_params)
basemodelfit = base_mdl.fit(X_train, y_train)
basepredict = base_mdl.predict(X_test)
predprob_base=base_mdl.predict_proba(X_test)[:, :]
acc_score=accuracy_score(y_test, y_t_pred)
test_accuracy_perc=round(100*acc_score)
'''
bbm_c_plot = plot_risk_vs_rejection_rate(
y_true=y_test,
y_prob=predprob_base,
selection_scores=y_t_score,
y_pred=y_t_pred,
plot_label=['UQ_risk_vs_rejection'],
risk_func=accuracy_score,
num_bins = 10 )
# This done by kiran, need to uncomment for GUI integration.
try:
bbm_c_plot_sub = bbm_c_plot[4]
bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png')
riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png')
except Exception as e:
print(e)
pass
riskPlot = ''
'''
riskPlot = ''
'''
try:
re_plot=plot_reliability_diagram(y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
plot_label=['UQModel reliability_diagram'],
num_bins=10)
# This done by kiran, need to uncomment for GUI integration.
re_plot_sub = re_plot[4]
# re_plot_sub = re_plot
re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png')
reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png')
except Exception as e:
print(e)
pass
reliability_plot = ''
'''
reliability_plot = ''
uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
selection_scores=y_t_score,
attributes=None,
risk_func=accuracy_score,subgroup_ids=None, return_counts=False,
num_bins=10)
uq_aurrrc=uq_aurrrc
test_accuracy_perc=round(test_accuracy_perc)
#metric_all=compute_classification_metrics(y_test, y_prob, option='all')
metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy')
#expected_calibration_error
uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False)
uq_aurrrc=uq_aurrrc
confidence_score=acc_score-uq_ece
ece_confidence_score=round(confidence_score,2)
# Model uncertainty using ECE score
# model_uncertainty_ece = 1-ece_confidence_score
# #print("model_uncertainty1: \\n",model_uncertainty_ece)
#Uncertainty Using model inherent predict probability
mean_predprob_total=np.mean(predprob_base)
model_uncertainty = 1-mean_predprob_total
model_confidence=mean_predprob_total
model_confidence = round(model_confidence,2)
# To get each class values and uncertainty
outuq = self.classUncertainty(predprob_base)
# Another way to get conf score
model_uncertainty_per=round((model_uncertainty*100),2)
# model_confidence_per=round((model_confidence*100),2)
model_confidence_per=round((ece_confidence_score*100),2)
acc_score_per = round((acc_score*100),2)
uq_ece_per=round((uq_ece*100),2)
output={}
recommendation = ""
if (uq_ece > 0.5):
# RED text
recommendation = 'Model has high ece (expected calibration error) score compare to threshold (50%),not good to deploy. Add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).'
msg = 'Bad'
else:
# self.log.info('Model has good ECE score and accuracy, ready to deploy.\\n.')
if (uq_ece <= 0.1 and model_confidence >= 0.9):
# Green Text
recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. '
msg = 'Best'
else:
# Orange
recommendation = 'Model has average confidence score (ideal is >90% confidence) and good ECE score (ideal is <10% error).Model can be improved by adding more training data across all feature ranges and re-training the model.'
msg = 'Good'
#Adding each class uncertainty value
output['Problem']= 'Classification'
output['recommend']= 'recommend'
output['msg']= msg
output['UQ_Area_Under_Risk_Rejection_Rate_Curve']=round(uq_aurrrc,4)
output['Model_Total_Confidence']=(str(model_confidence_per)+str('%'))
output['Expected_Calibration_Error']=(str(uq_ece_per)+str('%'))
output['Model_Total_Uncertainty']=(str(model_uncertainty_per)+str('%'))
# output['Risk Plot'] = str(riskPlot)
# output['Reliability Plot'] = str(reliability_plot)
for k,v in outuq.items():
output[k]=(str(round((v*100),2))+str(' %'))
output['Recommendation']=recommendation
# output['user_msg']='Please check the plot for more understanding of model uncertainty'
output['Metric_Accuracy_Score']=(str(acc_score_per)+str(' %'))
outputs = json.dumps(output)
with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f:
json.dump(output, f)
return test_accuracy_perc,uq_ece,outputs
def aion_confidence_plot(self,df):
try:
global x_feature
df=df
df = df.sort_values(by=self.selectedfeature)
best_values=df.Best_values.to_list()
best_upper=df.Best__upper.to_list()
best_lower=df.Best__lower.to_list()
Total_Upper_PI=df.Total_Upper_PI.to_list()
Total_Low_PI=df.Total_Low_PI.to_list()
Obseved = df.Observed.to_list()
x_feature1 = x_feature.split(',')
plt.plot(df[x_feature1[0]], df['Observed'], 'o', label='Observed')
plt.plot(df[x_feature1[0]], df['Best__upper'],'r--', lw=2, color='grey')
plt.plot(df[x_feature1[0]], df['Best__lower'],'r--', lw=2, color='grey')
plt.plot(df[x_feature1[0]], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red')
plt.fill_between(df[x_feature1[0]], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5)
plt.fill_between(df[x_feature1[0]],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5)
plt.legend()
plt.xlabel(x_feature1[0])
plt.ylabel(self.targetFeature)
plt.title('UQ Best & Good Area Plot')
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png')
plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png')
'''
plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png')
uq_confidence_plt = os.path.join(str(self.Deployment),'uq_confidence_plt.png')
except Exception as inst:
print('-----------dsdas->',inst)
uq_confidence_plt = ''
return uq_confidence_plt
def uqMain_BBMRegression(self):
# modelName = ""
# self.log.info('<!------------- Inside BlockBox MetaModel Regression process. ---------------> ')
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_ |
base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
# #print("model_params['criterion']: \\n",model_params['criterion'])
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# modelname='sklearn.linear_model'+'.'+model_name
# self.xtrain = self.xtrain.values.reshape((-1,1))
# self.xtest = self.xtest.values.reshape((-1,1))
if (isinstance(self.selectedfeature,list)):
selectedfeature=[self.selectedfeature[0]]
selectedfeature=' '.join(map(str,selectedfeature))
if (isinstance(self.targetFeature,list)):
targetFeature=[self.targetFeature[0]]
targetFeature=' '.join(map(str,targetFeature))
X = self.data[selectedfeature]
y = self.data[targetFeature]
X = X.values.reshape((-1,1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
#Geeting trained model name and to use the model in BlackboxMetamodelRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.ensemble import RandomForestRegressor
if (model_name == "DecisionTreeRegressor"):
basemodel=DecisionTreeRegressor
elif (model_name == "LinearRegression"):
basemodel=LinearRegression
elif (model_name == "Lasso"):
basemodel=Lasso
elif (model_name == "Ridge"):
basemodel=Ridge
elif(model_name == "RandomForestRegressor"):
basemodel=RandomForestRegressor
else:
basemodel=LinearRegression
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
if (uq_scoring_param.lower() == 'picp'):
uq_scoring_param='prediction interval coverage probability score (picp)'
else:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='prediction interval coverage probability score (picp)'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
# UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2”
metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None)
metric_used=''
for k,v in metric_all.items():
metric_used=str(round(v,2))
# Determine the confidence level and recommentation to the tester
# test_data=y_test
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
#Calculate total uncertainty for all features
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data)
# df1=self.data
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params)
recommendation=""
observed_widths_mpiw = round((observed_widths_mpiw/1000000)*100)
if observed_widths_mpiw > 100:
observed_widths_mpiw = 100
output={}
if (observed_alphas_picp >= 0.90 and total_picp >= 0.75):
# GREEN text
recommendation = "Model has good confidence and MPIW score, ready to deploy."
msg='Good'
elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.90) and (total_picp >= 0.50)):
# Orange
recommendation = " Model has average confidence compare to threshold (ideal is both model confidence and MPIW should be >90%) .Model can be improved by adding more training data across all feature ranges and re-training the model."
msg = 'Average'
else:
# RED text
recommendation = "Model has less confidence compare to threshold (ideal is both model confidence and MPIW should be >90%), need to be add more input data across all feature ranges and retrain base model, also try with different regression algorithms/ensembling."
msg = 'Bad'
#Build uq json info dict
output['Model_total_confidence']=(str(total_picp_percentage)+'%')
output['Model_total_Uncertainty']=(str(total_Uncertainty_percentage)+'%')
output['Selected_feature_confidence']=(str(picp_percentage)+'%')
output['Selected_feature_Uncertainty']=(str(Uncertainty_percentage)+'%')
output['Prediction_Interval_Coverage_Probability']=observed_alphas_picp
output['Mean_Prediction_Interval_Width']=str(observed_widths_mpiw)+'%'
output['Desirable_MPIW_range']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range)))
output['Recommendation']=str(recommendation)
output['Metric_used']=uq_scoring_param
output['Metric_value']=metric_used
output['Problem']= 'Regression'
output['recommend']= 'recommend'
output['msg'] = msg
with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f:
json.dump(output, f)
#To get best and medium UQ range of values from total predict interval
y_hat_m=y_hat.tolist()
y_hat_lb=y_hat_lb.tolist()
upper_bound=y_hat_ub.tolist()
y_hat_ub=y_hat_ub.tolist()
for x in y_hat_lb:
y_hat_ub.append(x)
total_pi=y_hat_ub
medium_UQ_range = y_hat_ub
best_UQ_range= y_hat.tolist()
ymean_upper=[]
ymean_lower=[]
y_hat_m=y_hat.tolist()
for i in y_hat_m:
y_hat_m_range= (i*20/100)
x=i+y_hat_m_range
y=i-y_hat_m_range
ymean_upper.append(x)
ymean_lower.append(y)
min_best_uq_dist=round(min(best_UQ_range))
max_best_uq_dist=round(max(best_UQ_range))
# initializing ranges
list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi))
list_best = y_hat_m
'''
print(X_test)
print(X_test)
X_test = np.squeeze(X_test)
print(x_feature)
'''
uq_dict = pd.DataFrame(X_test)
#print(uq_dict)
uq_dict['Observed'] = y_test
uq_dict['Best_values'] = y_hat_m
uq_dict['Best__upper'] = ymean_upper
uq_dict['Best__lower'] = ymean_lower
uq_dict['Total_Low_PI'] = y_hat_lb
uq_dict['Total_Upper_PI'] = upper_bound
'''
uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m,
'Best__upper':ymean_upper,
'Best__lower':ymean_lower,
'Total_Low_PI': y_hat_lb,
'Total_Upper_PI': upper_bound,
}'''
uq_pred_df = pd.DataFrame(data=uq_dict)
uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values')
uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False)
csv_path=str(self.Deployment)+"/uq_pred_df.csv"
df=pd.read_csv(csv_path)
# self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.')
# confidenceplot = self.aion_confidence_plot(df)
# output['Confidence Plot']= confidenceplot
uq_jsonobject = json.dumps(output)
print("UQ regression problem training completed...\\n")
return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject
except Exception as inst:
print('-------',inst)
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import os
import datetime, time, timeit
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import pickle
import logging
class recommendersystem():
def __init__(self,features,svd_params):
self.features = features
self.svd_input = svd_params
self.log = logging.getLogger('eion')
print ("recommendersystem starts \\n")
#To extract dict key,values
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
def recommender_model(self,df,outputfile):
from sklearn.metrics.pairwise import cosine_similarity
from utils.file_ops import save_csv
USER_ITEM_MATRIX = 'user_item_matrix'
ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix'
selectedColumns = self.features.split(',')
data = pd.DataFrame()
for i in range(0,len(selectedColumns)):
data[selectedColumns[i]] = df[selectedColumns[i]]
dataset = data
self.log.info('-------> Top(5) Rows')
self.log.info(data.head(5))
start = time.time()
self.log.info('\\n----------- Recommender System Training Starts -----------')
#--------------- Task 11190:recommender system changes Start ---Usnish------------------#
# selectedColumns = ['userId', 'movieId', 'rating']
df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns |
[2], 'count')).reset_index()
self.log.info('-------> Top 10 most rated Items:')
self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10))
matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2])
relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv')
matrix.to_csv(relative_file)
item_similarity_cosine = cosine_similarity(matrix.fillna(0))
item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'))
self.log.info('---------> Item-Item Similarity matrix created:')
self.log.info(item_similarity_cosine.head(5))
relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv')
save_csv(item_similarity_cosine,relative_file)
# --------------- recommender system changes End ---Usnish------------------#
executionTime=time.time() - start
self.log.info("------->Execution Time: "+str(executionTime))
self.log.info('----------- Recommender System Training End -----------\\n')
return "filename",matrix,"NA","",""<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pickle
import pandas as pd
import sys
import time
import os
from os.path import expanduser
import platform
from sklearn.preprocessing import binarize
import logging
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D
from sklearn.metrics.pairwise import cosine_similarity, cosine_distances
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers
## Keras subclassing based siamese network
class siameseNetwork(Model):
def __init__(self, activation,inputShape, num_iterations):
self.activation=activation
self.log = logging.getLogger('eion')
super(siameseNetwork, self).__init__()
i1 = layers.Input(shape=inputShape)
i2 = layers.Input(shape=inputShape)
featureExtractor = self.build_feature_extractor(inputShape, num_iterations)
f1 = featureExtractor(i1)
f2 = featureExtractor(i2)
#distance vect
distance = layers.Concatenate()([f1, f2])
cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
c_loss=cosine_loss(f1, f2)
similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2])
outputs = layers.Dense(1, activation="sigmoid")(distance)
self.model = Model(inputs=[i1, i2], outputs=outputs)
##Build dense sequential layers
def build_feature_extractor(self, inputShape, num_iterations):
layers_config = [layers.Input(inputShape)]
for i, n_units in enumerate(num_iterations):
layers_config.append(layers.Dense(n_units))
layers_config.append(layers.Dropout(0.2))
layers_config.append(layers.BatchNormalization())
layers_config.append(layers.Activation(self.activation))
model = Sequential(layers_config, name='feature_extractor')
return model
def call(self, x):
return self.model(x)
def euclidean_distance(vectors):
(f1, f2) = vectors
sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True)
return K.sqrt(K.maximum(sumSquared, K.epsilon()))
def cosine_similarity(vectors):
(f1, f2) = vectors
f1 = K.l2_normalize(f1, axis=-1)
f2 = K.l2_normalize(f2, axis=-1)
return K.mean(f1 * f2, axis=-1, keepdims=True)
def cos_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0],1)
class eion_similarity_siamese:
def __init__(self):
self.log = logging.getLogger('eion')
def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file):
try:
self.log.info('-------> Read Embedded File')
home = expanduser("~")
if platform.system() == 'Windows':
modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity')
else:
modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity')
if os.path.isdir(modelsPath) == False:
os.makedirs(modelsPath)
embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt')
if not os.path.exists(embedding_file_path):
from pathlib import Path
import urllib.request
import zipfile
location = modelsPath
local_file_path = os.path.join(location,"glove.6B.zip")
file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path)
with zipfile.ZipFile(local_file_path, 'r') as zip_ref:
zip_ref.extractall(location)
os.unlink(os.path.join(location,"glove.6B.zip"))
if os.path.isfile(os.path.join(location,"glove.6B.50d.txt")):
os.unlink(os.path.join(location,"glove.6B.50d.txt"))
if os.path.isfile(os.path.join(location,"glove.6B.300d.txt")):
os.unlink(os.path.join(location,"glove.6B.300d.txt"))
if os.path.isfile(os.path.join(location,"glove.6B.200d.txt")):
os.unlink(os.path.join(location,"glove.6B.200d.txt"))
X = df[[col1,col2]]
Y = df[targetColumn]
testPercentage = testPercentage
self.log.info('\\n-------------- Test Train Split ----------------')
if testPercentage == 0:
xtrain=X
ytrain=Y
xtest=X
ytest=Y
else:
testSize=testPercentage/100
self.log.info('-------> Split Type: Random Split')
self.log.info('-------> Train Percentage: '+str(testSize))
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize)
self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->')
self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->')
self.log.info('-------------- Test Train Split End ----------------\\n')
self.log.info('\\n-------------- Train Validate Split ----------------')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->')
self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->')
self.log.info('-------------- Train Validate Split End----------------\\n')
self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test')
train_sentence1 = pipe.texts_to_sequences(X_train[col1].values)
train_sentence2 = pipe.texts_to_sequences(X_train[col2].values)
val_sentence1 = pipe.texts_to_sequences(X_val[col1].values)
val_sentence2 = pipe.texts_to_sequences(X_val[col2].values)
len_vec = [len(sent_vec) for sent_vec in train_sentence1]
max_len = np.max(len_vec)
len_vec = [len(sent_vec) for sent_vec in train_sentence2]
if (max_len < np.max(len_vec)):
max_len = np.max(len_vec)
train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post')
train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post')
val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post')
val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post')
y_train = y_train.values
y_val = y_val.values
activation = str(conf['activation'])
model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10])
model.compile(
loss="binary_crossentropy",
optimizer=optimizers.Adam(learning_rate=0.0001),
metrics=["accuracy"])
es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True)
rlp = callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1
)
x_valid=X_val
y_valid=y_val
n_epoch = int(conf['num_epochs'])
batch_size = int(conf['batch_size'])
similarityIndex = conf['similarityIndex']
model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size,
validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp])
scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0)
self.log.info('-------> Model Score Matrix: Accuracy')
self.log.info('-------> Model Score (Validate Data) : '+str(scores[1]))
self.log.info('Status:- |... Algorithm applied: SIAMESE')
test_sentence1 = pipe.texts_to_sequences(X_test[col1].values)
test_sentence2 = pipe.texts_to_sequences(X_test[col2].values)
test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post')
test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post')
prediction = model.predict([test_sentence1, test_sentence2 ])
n_epoch = conf['num_epochs']
batch_size = conf['batch_size']
activation = conf['activation']
similarityIndex = conf['similarityIndex']
self.log.info('-------> similarityIndex : '+str(similarityIndex))
prediction = np.where(prediction > similarityIndex,1,0)
rocauc_sco = roc_auc_score(y_test,prediction)
acc_sco = accuracy_score(y_test, prediction)
predict_df = pd.DataFrame()
predict_df['actual'] = y_test
predict_df['predict'] = prediction
predict_df.to_csv(predicted_data_file)
self.log.info('-------> Model Score Matrix: Accuracy')
self.log.info('-------> Model Score (Validate Data) : '+str(scores[1]))
self.log.info('Status:- |... Algorithm applied: SIAMESE')
test_sentence1 = pipe.texts_to_sequences(X_test[col1].values)
test_sentence2 = pipe.texts_to_sequences(X_test[col2].values)
test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post')
test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post')
prediction = model.predict([test_sentence1, test_sentence2 ])
prediction = np.where(prediction > similarityIndex,1,0)
rocauc_sco = roc_auc_score(y_test,prediction)
acc_sco = accuracy_score(y_test, prediction)
predict_df = pd.DataFrame()
predict_df['actual'] = y_test
predict_df['predict'] = prediction
predict_df.to_csv(predicted_data_file)
self.log.info("predict_df: \\n"+str(predict_df))
sco = acc_sco
self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco))
self.log.info('Status:- |... Testing Score: '+str(acc_sco))
self. |
log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco))
matrix = '"Accuracy":'+str(acc_sco)+',"ROC AUC":'+str(rocauc_sco)
prediction = model.predict([train_sentence1, train_sentence2])
prediction = np.where(prediction > similarityIndex,1,0)
train_rocauc_sco = roc_auc_score(y_train,prediction)
train_acc_sco = accuracy_score(y_train, prediction)
self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco))
self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco))
trainmatrix = '"Accuracy":'+str(train_acc_sco)+',"ROC AUC":'+str(train_rocauc_sco)
model_tried = '{"Model":"SIAMESE","Score":'+str(sco)+'}'
saved_model = 'textsimilarity_'+iterName+'_'+iterVersion
# filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav')
# filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5')
## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model.
filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion)
model.save(filename)
# model.save_weights(filename)
model_name = 'SIAMESE MODEL'
return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex)
except Exception as inst:
self.log.info("SIAMESE failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
Subsets and Splits