file
stringlengths 6
44
| content
stringlengths 38
162k
|
---|---|
__init__.py | from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \
compute_classification_metrics, entropy_based_uncertainty_decomposition
from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \
plot_uncertainty_by_feature, plot_picp_by_feature
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
|
__init__.py | from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
|
uncertainty_characteristics_curve.py | from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps, trapz
from sklearn.isotonic import IsotonicRegression
DEFAULT_X_AXIS_NAME = 'excess'
DEFAULT_Y_AXIS_NAME = 'missrate'
class UncertaintyCharacteristicsCurve:
"""
Class with main functions of the Uncertainty Characteristics Curve (UCC).
"""
def __init__(self, normalize=True, precompute_bias_data=True):
"""
:param normalize: set initial axes normalization flag (can be changed via set_coordinates())
:param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based
UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call
if bias-based UCC is not needed.
"""
self.axes_name2idx = {"missrate": 1, "bandwidth": 2, "excess": 3, "deficit": 4}
self.axes_idx2descr = {1: "Missrate", 2: "Bandwidth", 3: "Excess", 4: "Deficit"}
self.x_axis_idx = None
self.y_axis_idx = None
self.norm_x_axis = False
self.norm_y_axis = False
self.std_unit = None
self.normalize = normalize
self.d = None
self.gt = None
self.lb = None
self.ub = None
self.precompute_bias_data = precompute_bias_data
self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize)
def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None):
"""
Assigns user-specified type to the axes and normalization behavior (sticky).
:param x_axis_name: None-> unchanged, or name from self.axes_name2idx
:param y_axis_name: ditto
:param normalize: True/False will activate/deactivate norming for specified axes. Behavior for
Axes_name that are None will not be changed.
Value None will leave norm status unchanged.
Note, axis=='missrate' will never get normalized, even with normalize == True
:return: none
"""
normalize = self.normalize if normalize is None else normalize
if x_axis_name is None and self.x_axis_idx is None:
raise ValueError("ERROR(UCC): x-axis has not been defined.")
if y_axis_name is None and self.y_axis_idx is None:
raise ValueError("ERROR(UCC): y-axis has not been defined.")
if x_axis_name is None and y_axis_name is None and normalize is not None:
# just set normalization on/off for both axes and return
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
return
if x_axis_name is not None:
self.x_axis_idx = self.axes_name2idx[x_axis_name]
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
if y_axis_name is not None:
self.y_axis_idx = self.axes_name2idx[y_axis_name]
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
def set_std_unit(self, std_unit=None):
"""
Sets the UCC's unit to be used when displaying normalized axes.
:param std_unit: if None, the unit will be calculated as stddev of the ground truth data
(ValueError raised if data has not been set at this point)
or set to the user-specified value.
:return:
"""
if std_unit is None: # set it to stddev of data
if self.gt is None:
raise ValueError("ERROR(UCC): No data specified - cannot set stddev unit.")
self.std_unit = np.std(self.gt)
if np.isclose(self.std_unit, 0.):
print("WARN(UCC): data-based stddev is zero - resetting axes unit to 1.")
self.std_unit = 1.
else:
self.std_unit = float(std_unit)
def fit(self, X, gt):
"""
Calculates internal arrays necessary for other methods (plotting, auc, cost minimization).
Re-entrant.
:param X: [numsamples, 3] numpy matrix, or list of numpy matrices.
Col 1: predicted values
Col 2: lower band (deviate) wrt predicted value (always positive)
Col 3: upper band wrt predicted value (always positive)
If list is provided, all methods will output corresponding metrics as lists as well!
:param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X
:return: self
"""
if not isinstance(X, list):
X = [X]
newX = []
for x in X:
assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt))
newX.append(self._sanitize_input(x))
self.d = [gt - x[:, 0] for x in newX]
self.lb = [x[:, 1] for x in newX]
self.ub = [x[:, 2] for x in newX]
self.gt = gt
self.set_std_unit()
self.plotdata_for_scale = []
self.plotdata_for_bias = []
# precompute plotdata:
for i in range(len(self.d)):
self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False))
if self.precompute_bias_data:
self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True))
return self
def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True,
search=('scale', 'bias')):
"""
Find minima of a linear cost function for each component.
Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value.
A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg).
The function returns a 'recipe' how to achieve the corresponding minimum, for each component.
:param x_axis_cost: weight of one unit on x_axis
:param y_axis_cost: weight of one unit on y_axis
:param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be
pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes.
:param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'.
:return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are -
'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to
add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that
minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point).
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if augment_cost_by_normfactor:
if self.norm_x_axis:
x_axis_cost /= self.std_unit
if self.norm_y_axis:
y_axis_cost /= self.std_unit
print("INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f" %
(x_axis_cost, y_axis_cost))
if isinstance(search, tuple):
search = list(search)
if not isinstance(search, list):
search = [search]
min_costs = []
for d in range(len(self.d)):
# original OP cost
m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d])
original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][
self.y_axis_idx]
plotdata = self.plotdata_for_scale[d]
cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_scale_multiplier = plotdata[minidx_scale][0]
mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx]
mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx]
if 'bias' in search:
if not self.precompute_bias_data:
raise ValueError(
"ERROR(UCC): Cannot perform minimization - instantiated without bias data computation")
plotdata = self.plotdata_for_bias[d]
cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_bias_add = plotdata[minidx_bias][0]
mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx]
mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx]
if 'bias' in search and 'scale' in search:
if cost_bias < cost_scale:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'scale' in search:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'bias' in search:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
raise ValueError("(ERROR): Unknown search element (%s) requested." % ",".join(search))
if len(min_costs) < 2:
return min_costs[0]
else:
return min_costs
def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None,
req_critical_value=None, vary_bias=False):
"""
Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns
a list of recipes how to achieve the point (x,y), for each component. If there is only one component,
returns a single recipe dict.
:param req_x_axis_value: requested x value on UCC (normalization status is taken from current display)
:param req_y_axis_value: requested y value on UCC (normalization status is taken from current display)
:param vary_bias: set to True when referring to bias-induced UCC (scale UCC default)
:return: list of dicts (recipes), or a single dict
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1:
raise ValueError("ERROR(UCC): exactly one axis value must be requested at a time.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
recipe = []
for dc in range(len(self.d)):
plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc]
if req_x_axis_value is not None:
tgtidx = self.x_axis_idx
req_value = req_x_axis_value * xnorm
elif req_y_axis_value is not None:
tgtidx = self.y_axis_idx
req_value = req_y_axis_value * ynorm
elif req_critical_value is not None:
req_value = req_critical_value
tgtidx = 0 # first element in plotdata is always the critical value (scale of bias)
else:
raise RuntimeError("Unhandled case")
closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata]))
recipe.append({'operation': ('bias' if vary_bias else 'scale'),
'modvalue': plotdata[closestidx][0],
'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm,
'new_y': plotdata[closestidx][self.y_axis_idx] / ynorm})
if len(recipe) < 2:
return recipe[0]
else:
return recipe
def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2):
"""
Find s minimum cost function value and corresp. position index in plotdata
:param plotdata: liste of tuples
:param idx1: idx of x-axis item within the tuple
:param idx2: idx of y-axis item within the tuple
:param cost1: cost factor for x-axis unit
:param cost2: cost factor for y-axis unit
:return: min cost value, index within plotdata where minimum occurs
"""
raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata]
minidx = np.argmin(raw)
return raw[minidx], minidx
def _sanitize_input(self, x):
"""
Replaces problematic values in input data (e.g, zero error bars)
:param x: single matrix of input data [n, 3]
:return: sanitized version of x
"""
if np.isclose(np.sum(x[:, 1]), 0.):
raise ValueError("ERROR(UCC): Provided lower bands are all zero.")
if np.isclose(np.sum(x[:, 2]), 0.):
raise ValueError("ERROR(UCC): Provided upper bands are all zero.")
for i in [1, 2]:
if any(np.isclose(x[:, i], 0.)):
print("WARN(UCC): some band values are 0. - REPLACING with positive minimum")
m = np.min(x[x[:, i] > 0, i])
x = np.where(np.isclose(x, 0.), m, x)
return x
def _calc_avg_excess(self, d, lb, ub):
"""
Excess is amount an error bar overshoots actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average excess over array
"""
excess = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx])
negidx = np.where(d < 0)[0]
excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx])
return np.mean(excess)
def _calc_avg_deficit(self, d, lb, ub):
"""
Deficit is error bar insufficiency: bar falls short of actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average deficit over array
"""
deficit = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx])
negidx = np.where(d < 0)[0]
deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx])
return np.mean(deficit)
def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0):
"""
Calculates recall at a given scale/bias, average bandwidth and average excess
:param d: delta
:param lb: lower band
:param ub: upper band
:param scale: scale * (x + bias)
:param bias:
:return: miss rate, average bandwidth, avg excess, avg deficit
"""
abslband = scale * np.where((lb + bias) < 0., 0., lb + bias)
absuband = scale * np.where((ub + bias) < 0., 0., ub + bias)
recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d)
avgbandwidth = np.mean([absuband, abslband])
avgexcess = self._calc_avg_excess(d, abslband, absuband)
avgdeficit = self._calc_avg_deficit(d, abslband, absuband)
return 1 - recall, avgbandwidth, avgexcess, avgdeficit
def _calc_plotdata(self, d, lb, ub, vary_bias=False):
"""
Generates data necessary for various UCC metrics.
:param d: delta (predicted - actual) vector
:param ub: upper uncertainty bandwidth (above predicted)
:param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth)
:param vary_bias: True will switch to additive bias instead of scale
:return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit)
"""
# step 1: collect critical scale or bias values
critval = []
for i in range(len(d)):
if not vary_bias:
if d[i] >= 0:
critval.append(d[i] / ub[i])
else:
critval.append(-d[i] / lb[i])
else:
if d[i] >= 0:
critval.append(d[i] - ub[i])
else:
critval.append(-lb[i] - d[i])
critval = sorted(critval)
plotdata = []
for i in range(len(critval)):
if not vary_bias:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
scale=critval[i])
else:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
bias=critval[i])
plotdata.append((critval[i], missrate, bandwidth, excess, deficit))
return plotdata
def get_AUUCC(self, vary_bias=False, aucfct="trapz", partial_x=None, partial_y=None):
"""
returns approximate area under the curve on current coordinates, for each component.
:param vary_bias: False == varies scale, True == varies bias
:param aucfct: specifies AUC integrator (can be "trapz", "simps")
:param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC.
The interval bounds refer to axes as visualized (ie. potentially normed)
:param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None.
:return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if partial_x is not None and partial_y is not None:
raise ValueError("ERROR(UCC): partial_x and partial_y can not be specified at the same time.")
assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2))
assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2))
# find starting point (where the x axis value starts to actually change)
rv = []
# do this for individual streams
xind = self.x_axis_idx
aucfct = simps if aucfct == "simps" else trapz
for s in range(len(self.d)):
plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s]
prev = plotdata[0][xind]
t = 1
cval = plotdata[t][xind]
while cval == prev and t < len(plotdata) - 1:
t += 1
prev = cval
cval = plotdata[t][xind]
startt = t - 1 # from here, it's a valid function
endtt = len(plotdata)
if startt >= endtt - 2:
rvs = 0. # no area
else:
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)]
x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)]
if partial_x is not None:
from_i = self._find_closest_index(partial_x[0], x)
to_i = self._find_closest_index(partial_x[1], x) + 1
elif partial_y is not None:
from_i = self._find_closest_index(partial_y[0], y)
to_i = self._find_closest_index(partial_y[1], y)
if from_i > to_i: # y is in reverse order
from_i, to_i = to_i, from_i
to_i += 1 # as upper bound in array indexing
else:
from_i = 0
to_i = len(x)
to_i = min(to_i, len(x))
if to_i < from_i:
raise ValueError("ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data.")
if to_i - from_i < 2:
raise RuntimeError("ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified")
rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i])
rv.append(rvs)
if len(rv) < 2:
return rv[0]
else:
return rv
@ staticmethod
def _find_closest_index(value, array):
"""
Returns an index of the 'array' element closest in value to 'value'
:param value:
:param array:
:return:
"""
return np.argmin(np.abs(np.asarray(array)-value))
def _get_single_OP(self, d, lb, ub, scale=1., bias=0.):
"""
Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: single tuple (x point, y point, unit of x, unit of y)
"""
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias)
op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here)
return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm)
def get_OP(self, scale=1., bias=0.):
"""
Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only
1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
op = []
for dc in range(len(self.d)):
op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias))
if len(op) < 2:
return op[0]
else:
return op
def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None,
xlim=None, ylim=None, **kwargs):
""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown
if there are multiple data components (via fit())
:param titlestr: Plot title string
:param syslabel: list is label strings to appear in the plot legend. Can be single, if one component.
:param outfn: base name of an image file to be created (will append .png before creating)
:param vary_bias: True will switch to varying additive bias (default is multiplicative scale)
:param markers: None or a list of marker styles to be used for each curve.
List must be same or longer than number of components.
Markers can be one among these ['o', 's', 'v', 'D', '+'].
:param xlim: tuples or lists of specifying the range for the x axis, or None (auto)
:param ylim: tuples or lists of specifying the range for the y axis, or None (auto)
:param `**kwargs`: Additional arguments passed to the main plot call.
:return: list of areas under the curve (or single area, if one data component)
list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit)
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if not isinstance(syslabel, list):
syslabel = [syslabel]
assert (len(syslabel) == len(self.d))
assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d)))
# main plot of (possibly multiple) datasets
plt.figure()
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
op_info = []
auucc = self.get_AUUCC(vary_bias=vary_bias)
auucc = [auucc] if not isinstance(auucc, list) else auucc
for s in range(len(self.d)):
# original operating point
x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s])
op_info.append((x_op, y_op, x_unit, y_unit))
# display chart
plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s]
axisX_data = [i[self.x_axis_idx] / xnorm for i in plotdata]
axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata]
marker = None
if markers is not None: marker = markers[s]
p = plt.plot(axisX_data, axisY_data, label=syslabel[s] + (" (AUC=%.3f)" % auucc[s]), marker=marker, **kwargs)
if s + 1 == len(self.d):
oplab = 'OP'
else:
oplab = None
plt.plot(x_op, y_op, marker='o', color=p[0].get_color(), label=oplab, markerfacecolor='w',
markeredgewidth=1.5, markeredgecolor=p[0].get_color())
axisX_label = self.axes_idx2descr[self.x_axis_idx]
axisY_label = self.axes_idx2descr[self.y_axis_idx]
axisX_units = "(raw)" if np.isclose(xnorm, 1.0) else "[in std deviations]"
axisY_units = "(raw)" if np.isclose(ynorm, 1.0) else "[in std deviations]"
axisX_label += ' ' + axisX_units
axisY_label += ' ' + axisY_units
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
plt.xlabel(axisX_label)
plt.ylabel(axisY_label)
plt.legend()
plt.title(titlestr)
plt.grid()
if outfn is None:
plt.show()
else:
plt.savefig(outfn)
if len(auucc) < 2:
auucc = auucc[0]
op_info = op_info[0]
return auucc, op_info
|
heteroscedastic_mlp.py | import torch
import torch.nn.functional as F
from uq360.models.noise_models.heteroscedastic_noise_models import GaussianNoise
class GaussianNoiseMLPNet(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(GaussianNoiseMLPNet, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
self.noise_layer = GaussianNoise()
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
def loss(self, y_true=None, mu_pred=None, log_var_pred=None):
return self.noise_layer.loss(y_true, mu_pred, log_var_pred, reduce_mean=True) |
layer_utils.py | """
Contains implementations of various utilities used by Horseshoe Bayesian layers
"""
import numpy as np
import torch
from torch.nn import Parameter
td = torch.distributions
gammaln = torch.lgamma
def diag_gaussian_entropy(log_std, D):
return 0.5 * D * (1.0 + torch.log(2 * np.pi)) + torch.sum(log_std)
def inv_gamma_entropy(a, b):
return torch.sum(a + torch.log(b) + torch.lgamma(a) - (1 + a) * torch.digamma(a))
def log_normal_entropy(log_std, mu, D):
return torch.sum(log_std + mu + 0.5) + (D / 2) * np.log(2 * np.pi)
class InvGammaHalfCauchyLayer(torch.nn.Module):
"""
Uses the inverse Gamma parameterization of the half-Cauchy distribution.
a ~ C^+(0, b) <==> a^2 ~ IGamma(0.5, 1/lambda), lambda ~ IGamma(0.5, 1/b^2), where lambda is an
auxiliary latent variable.
Uses a factorized variational approximation q(ln a^2)q(lambda) = N(mu, sigma^2) IGamma(ahat, bhat).
This layer places a half Cauchy prior on the scales of each output node of the layer.
"""
def __init__(self, out_features, b):
"""
:param out_fatures: number of output nodes in the layer.
:param b: scale of the half Cauchy
"""
super(InvGammaHalfCauchyLayer, self).__init__()
self.b = b
self.out_features = out_features
# variational parameters for q(ln a^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
# self.log_sigma = torch.FloatTensor(out_features)
# variational parameters for q(lambda). These will be updated via fixed point updates, hence not parameters.
self.ahat = torch.FloatTensor([1.]) # The posterior parameter is always 1.
self.bhat = torch.ones(out_features) * (1.0 / self.b ** 2)
self.const = torch.FloatTensor([0.5])
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
# sample from half cauchy and log to initialize the mean of the log normal
sample = np.abs(self.b * (np.random.randn(self.out_features) / np.random.randn(self.out_features)))
self.mu.data = torch.FloatTensor(np.log(sample))
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(a^2 | lambda)] + E[ln p(lambda)]
"""
expected_a_given_lambda = -gammaln(self.const) - 0.5 * (torch.log(self.bhat) - torch.digamma(self.ahat)) + (
-0.5 - 1.) * self.mu - torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) * (self.ahat / self.bhat)
expected_lambda = -gammaln(self.const) - 2 * 0.5 * np.log(self.b) + (-self.const - 1.) * (
torch.log(self.bhat) - torch.digamma(self.ahat)) - (1. / self.b ** 2) * (self.ahat / self.bhat)
return torch.sum(expected_a_given_lambda) + torch.sum(expected_lambda)
def entropy(self):
"""
Computes entropy of q(ln a^2) and q(lambda)
"""
return self.entropy_lambda() + self.entropy_a2()
def entropy_lambda(self):
return inv_gamma_entropy(self.ahat, self.bhat)
def entropy_a2(self):
return log_normal_entropy(self.log_sigma, self.mu, self.out_features)
def kl(self):
"""
Computes KL(q(ln(a^2)q(lambda) || IG(a^2 | 0.5, 1/lambda) IG(lambda | 0.5, 1/b^2))
"""
return -self.expectation_wrt_prior() - self.entropy()
def fixed_point_updates(self):
# update lambda moments
self.bhat = torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + (1. / self.b ** 2)
class InvGammaLayer(torch.nn.Module):
"""
Approximates the posterior of c^2 with prior IGamma(c^2 | a , b)
using a log Normal approximation q(ln c^2) = N(mu, sigma^2)
"""
def __init__(self, a, b, out_features=1):
super(InvGammaLayer, self).__init__()
self.a = torch.FloatTensor([a])
self.b = torch.FloatTensor([b])
# variational parameters for q(ln c^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
self.out_features = out_features
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
self.mu.data = torch.log(self.b / (self.a + 1) * torch.ones(self.out_features)) # initialize at the mode
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(c^2 | a, b)]
"""
# return self.c_a * np.log(self.c_b) - gammaln(self.c_a) + (
# - self.c_a - 1) * c_mu - self.c_b * Ecinv
return self.a * torch.log(self.b) - gammaln(self.a) + (- self.a - 1) \
* self.mu - self.b * torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2)
def entropy(self):
return log_normal_entropy(self.log_sigma, self.mu, 1)
def kl(self):
"""
Computes KL(q(ln(c^2) || IG(c^2 | a, b))
"""
return -self.expectation_wrt_prior().sum() - self.entropy()
|
layers.py | """
Contains implementations of various Bayesian layers
"""
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from uq360.models.bayesian_neural_networks.layer_utils import InvGammaHalfCauchyLayer, InvGammaLayer
td = torch.distributions
def reparam(mu, logvar, do_sample=True, mc_samples=1):
if do_sample:
std = torch.exp(0.5 * logvar)
eps = torch.FloatTensor(std.size()).normal_()
sample = mu + eps * std
for _ in np.arange(1, mc_samples):
sample += mu + eps * std
return sample / mc_samples
else:
return mu
class BayesianLinearLayer(torch.nn.Module):
"""
Affine layer with N(0, v/H) or N(0, user specified v) priors on weights and
fully factorized variational Gaussian approximation
"""
def __init__(self, in_features, out_features, cuda=False, init_weight=None, init_bias=None, prior_stdv=None):
super(BayesianLinearLayer, self).__init__()
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
# weight mean params
self.weights = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(out_features))
# weight variance params
self.weights_logvar = Parameter(torch.Tensor(out_features, in_features))
self.bias_logvar = Parameter(torch.Tensor(out_features))
# numerical stability
self.fudge_factor = 1e-8
if not prior_stdv:
# We will use a N(0, 1/num_inputs) prior over weights
self.prior_stdv = torch.FloatTensor([1. / np.sqrt(self.weights.size(1))])
else:
self.prior_stdv = torch.FloatTensor([prior_stdv])
# self.prior_stdv = torch.Tensor([1. / np.sqrt(1e+3)])
self.prior_mean = torch.FloatTensor([0.])
# for Bias use a prior of N(0, 1)
self.prior_bias_stdv = torch.FloatTensor([1.])
self.prior_bias_mean = torch.FloatTensor([0.])
# init params either random or with pretrained net
self.init_parameters(init_weight, init_bias)
def init_parameters(self, init_weight, init_bias):
# init means
if init_weight is not None:
self.weights.data = torch.Tensor(init_weight)
else:
self.weights.data.normal_(0, np.float(self.prior_stdv.numpy()[0]))
if init_bias is not None:
self.bias.data = torch.Tensor(init_bias)
else:
self.bias.data.normal_(0, 1)
# init variances
self.weights_logvar.data.normal_(-9, 1e-2)
self.bias_logvar.data.normal_(-9, 1e-2)
def forward(self, x, do_sample=True, scale_variances=False):
# local reparameterization trick
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
if scale_variances:
activ = reparam(mu_activations, var_activations.log() - np.log(self.in_features), do_sample=do_sample)
else:
activ = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return activ
def kl(self):
"""
KL divergence (q(W) || p(W))
:return:
"""
weights_logvar = self.weights_logvar
kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \
(weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (
2 * self.prior_stdv.pow(2)) - 0.5
kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \
(self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (
2 * self.prior_bias_stdv.pow(2)) \
- 0.5
return kld_weights.sum() + kld_bias.sum()
class HorseshoeLayer(BayesianLinearLayer):
"""
Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k
are vectors of all weights incident into the unit
"""
def __init__(self, in_features, out_features, cuda=False, scale=1.):
super(HorseshoeLayer, self).__init__(in_features, out_features)
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.)
self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale)
# prior on beta is N(0, I) when employing non centered parameterization
self.prior_stdv = torch.Tensor([1])
self.prior_mean = torch.Tensor([0.])
def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample scales
scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu)
scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2)
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return scale_sample * activ_sample
def kl(self):
return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl()
def fixed_point_updates(self):
self.nodescales.fixed_point_updates()
self.layerscale.fixed_point_updates()
class RegularizedHorseshoeLayer(HorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b).
c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b)
def forward(self, x, do_sample=True, **kwargs):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample regularized scales
scale_mean = self.nodescales.mu + self.layerscale.mu
scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp()
regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample)
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return torch.sqrt(regularized_scale_sample) * activ_sample
def kl(self):
return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl()
class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b).
c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe
Note that we now have a per-node c_k.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features)
|
misc.py | import numpy as np
import torch
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision
def compute_test_ll(y_test, y_pred_samples, std_y=1.):
"""
Computes test log likelihoods = (1 / Ntest) * \sum_n p(y_n | x_n, D_train)
:param y_test: True y
:param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples
q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train)
:param std_y: True std of y (assumed known)
"""
S, _ = y_pred_samples.shape
noise = GaussianNoiseFixedPrecision(std_y=std_y)
ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False)
ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples
return torch.mean(ll) # mean over test points
|
horseshoe_mlp.py | from abc import ABC
import numpy as np
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class HshoeBNN(nn.Module, ABC):
"""
Bayesian neural network with Horseshoe layers.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1,
hshoe_scale=1e-1, use_reg_hshoe=False):
if use_reg_hshoe:
layer = RegularizedHorseshoeLayer
else:
layer = HorseshoeLayer
super(HshoeBNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes))
self.fc_out = BayesianLinearLayer(num_nodes, op_dim)
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def fixed_point_updates(self):
if hasattr(self.fc1, 'fixed_point_updates'):
self.fc1.fixed_point_updates()
if hasattr(self.fc_out, 'fixed_point_updates'):
self.fc_out.fixed_point_updates()
for layer in self.fc_hidden:
if hasattr(layer, 'fixed_point_updates'):
layer.fixed_point_updates()
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class HshoeRegressionNet(HshoeBNN, ABC):
"""
Horseshoe net with N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class HshoeClassificationNet(HshoeBNN, ABC):
"""
Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w()) / num_batches - Elik
return neg_elbo
|
bayesian_mlp.py | from abc import ABC
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class BayesianNN(nn.Module, ABC):
"""
Bayesian neural network with zero mean Gaussian priors over weights.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50,
activation_type='relu', num_layers=1):
super(BayesianNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes,)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes, ))
self.fc_out = layer(num_nodes, op_dim, )
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class BayesianRegressionNet(BayesianNN, ABC):
"""
Bayesian neural net with N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class BayesianClassificationNet(BayesianNN, ABC):
"""
Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = self.kl_divergence_w() / num_batches - Elik
return neg_elbo
|
homoscedastic_noise_models.py | import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b).
Uses a variational approximation; q(lambda) = Gamma(ahat, bhat)
"""
def __init__(self, a0=6, b0=6, cuda=False):
super(GaussianNoiseGammaPrecision, self).__init__()
self.cuda = cuda
self.a0 = a0
self.b0 = b0
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
# variational parameters
self.ahat = Parameter(torch.FloatTensor([10.]))
self.bhat = Parameter(torch.FloatTensor([3.]))
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * E_q(\lambda)[ln N (y_pred | y_true, \lambda^-1)], where q(lambda) = Gamma(ahat, bhat)
:param y_pred:
:param y_true:
:return:
"""
n = y_pred.shape[0]
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \
- 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum())
def kl(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \
self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat
def get_noise_var(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (bhat / ahat).data.numpy()[0]
class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), sigma_y**2); known sigma_y
"""
def __init__(self, std_y=1., cuda=False):
super(GaussianNoiseFixedPrecision, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
self.sigma_y = std_y
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * ln N (y_pred | y_true, sigma_y**2)
:param y_pred:
:param y_true:
:return:
"""
ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)
return -ll.sum(dim=0)
def get_noise_var(self):
return self.sigma_y ** 2 |
heteroscedastic_noise_models.py | import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoise(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f_\mu(x, w), f_\sigma^2(x, w))
"""
def __init__(self, cuda=False):
super(GaussianNoise, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True):
"""
computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred))
:param y_true:
:param mu_pred:
:param log_var_pred:
:return:
"""
var_pred = transform(log_var_pred)
ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2)
if reduce_mean:
return -ll.mean(dim=0)
else:
return -ll.sum(dim=0)
def get_noise_var(self, log_var_pred):
return transform(log_var_pred)
|
noisemodel.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class AbstractNoiseModel(ABC):
""" Abstract class. All noise models inherit from here.
"""
def __init__(self, *argv, **kwargs):
""" Initialize an AbstractNoiseModel object.
"""
@abc.abstractmethod
def loss(self, *argv, **kwargs):
""" Compute loss given predictions and groundtruth labels
"""
raise NotImplementedError
@abc.abstractmethod
def get_noise_var(self, *argv, **kwargs):
"""
Return the current estimate of noise variance
"""
raise NotImplementedError
|
builtinuq.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class BuiltinUQ(ABC):
""" BuiltinUQ is the base class for any algorithm that has UQ built into it.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def fit(self, *argv, **kwargs):
""" Learn the UQ related parameters..
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
|
posthocuq.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class PostHocUQ(ABC):
""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def _process_pretrained_model(self, *argv, **kwargs):
""" Method to process the pretrained model that requires UQ.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self):
"""
This method should not take any arguments and returns a dict of the __init__ parameters.
"""
raise NotImplementedError
|
__init__.py | from .ucc_recalibration import UCCRecalibration
|
ucc_recalibration.py | from collections import namedtuple
from uq360.algorithms.posthocuq import PostHocUQ
from uq360.utils.misc import form_D_for_auucc
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
class UCCRecalibration(PostHocUQ):
""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve.
"""
def __init__(self, base_model):
"""
Args:
base_model: pretrained model to be recalibrated.
"""
super(UCCRecalibration).__init__()
self.base_model = self._process_pretrained_model(base_model)
self.ucc = None
def get_params(self, deep=True):
return {"base_model": self.base_model}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
"""
Fit the Uncertainty Characteristics Curve.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
self.ucc = UncertaintyCharacteristicsCurve()
self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze())
return self
def predict(self, X, missrate=0.05):
"""
Generate prediction and uncertainty bounds for data X.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
missrate: desired missrate of the new operating point, set to 0.05 by default.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False)
new_scale = C['modvalue']
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
if C['operation'] == 'bias':
calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width
else:
calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper)
return res
|
__init__.py | from .classification_calibration import ClassificationCalibration
|
classification_calibration.py | from collections import namedtuple
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
from sklearn.preprocessing import LabelEncoder
from uq360.utils.misc import DummySklearnEstimator
from uq360.algorithms.posthocuq import PostHocUQ
class ClassificationCalibration(PostHocUQ):
"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows
non-sklearn models to be calibrated.
"""
def __init__(self, num_classes, fit_mode="features", method='isotonic', base_model_prediction_func=None):
"""
Args:
num_classes: number of classes.
fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores,
useful when these are precomputed.
method: isotonic or sigmoid.
base_model_prediction_func: the function that takes in the input features and produces base model's
probability scores. This is ignored when operating in `probs` mode.
"""
super(ClassificationCalibration).__init__()
if fit_mode == "probs":
# In this case, the fit assumes that it receives the probability scores of the base model.
# create a dummy estimator
self.base_model = DummySklearnEstimator(num_classes, lambda x: x)
else:
self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func)
self.method = method
def get_params(self, deep=True):
return {"num_classes": self.num_classes, "fit_mode": self.fit_mode, "method": self.method,
"base_model_prediction_func": self.base_model_prediction_func}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
""" Fits calibration model using the provided calibration set.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.base_model.label_encoder_ = LabelEncoder().fit(y)
self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model,
cv="prefit",
method=self.method)
self.calib_model.fit(X, y)
return self
def predict(self, X):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
"""
y_prob = self.calib_model.predict_proba(X)
if len(np.shape(y_prob)) == 1:
y_pred_labels = y_prob > 0.5
else:
y_pred_labels = np.argmax(y_prob, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob'])
res = Result(y_pred_labels, y_prob)
return res
|
auxiliary_interval_predictor.py | from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class _MLPNet_Main(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Main, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
class _MLPNet_Aux(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Aux, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
log_var = self.fc_log_var(x)
return log_var
class AuxiliaryIntervalPredictor(BuiltinUQ):
""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model.
References:
.. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep
models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on
Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079
"""
def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The model type used to build the main model and the auxiliary model. Currently supported values
are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user
provide `main_model` and `aux_model`.
main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance.
aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(AuxiliaryIntervalPredictor).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.main_model = _MLPNet_Main(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
self.aux_model = _MLPNet_Aux(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.main_model = main_model
self.aux_model = aux_model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "main_model": self.main_model,
"aux_model": self.aux_model, "device": self.device, "verbose": self.verbose}
def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux):
r = torch.abs(y_true - y_pred_mu)
# + 0.5 * y_pred_log_var +
loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \
self.config["lambda_match"] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux)))
return loss
def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux):
deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux)
upper = y_pred_mu + deltau
lower = y_pred_mu - deltal
width = upper - lower
r = torch.abs(y_true - y_pred_mu)
emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000))
loss_emce = torch.abs(self.config["calibration_alpha"]-emce)
loss_noise = torch.mean(torch.abs(0.5 * width - r))
loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true))
#print(emce)
return loss_emce + self.config["lambda_noise"] * loss_noise + self.config["lambda_sharpness"] * loss_sharpness
def fit(self, X, y):
""" Fit the Auxiliary Interval Predictor model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer_main_model = torch.optim.Adam(self.main_model.parameters(), lr=self.config["lr"])
optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config["lr"])
for it in range(self.config["num_outer_iters"]):
# Train the main model
for epoch in range(self.config["num_main_iters"]):
avg_mean_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.main_model.train()
self.aux_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux)
optimizer_main_model.zero_grad()
main_loss.backward()
optimizer_main_model.step()
avg_mean_model_loss += main_loss.item()/len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, main_model_loss = {}".format(it, epoch, avg_mean_model_loss))
# Train the auxiliary model
for epoch in range(self.config["num_aux_iters"]):
avg_aux_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.aux_model.train()
self.main_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux)
optimizer_aux_model.zero_grad()
aux_loss.backward()
optimizer_aux_model.step()
avg_aux_model_loss += aux_loss.item() / len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, aux_model_loss = {}".format(it, epoch, avg_aux_model_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.main_model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
|
__init__.py | from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor
|
bnn.py | import copy
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.utils.data as data_utils
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp
class BnnRegression(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression.
References:
.. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. "Structured variational learning of Bayesian neural
networks with horseshoe priors." International Conference on Machine Learning. PMLR, 2018.
"""
def __init__(self, config, prior="Gaussian"):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnRegression, self).__init__()
self.config = config
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config}
def fit(self, X, y):
""" Fit the BNN regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
neg_elbo[epoch] = loss.item()
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0],
self.net.get_noise_var()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
mc_samples: Number of Monte-Carlo samples.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
epistemic_out = np.zeros([mc_samples, X.shape[0]])
total_out = np.zeros([mc_samples, X.shape[0]])
for s in np.arange(mc_samples):
pred = self.net(X).data.numpy().ravel()
epistemic_out[s] = pred
total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0])
y_total_std = np.std(total_out, axis=0)
y_epi_std = np.std(epistemic_out, axis=0)
y_mean = np.mean(total_out, axis=0)
y_lower = y_mean - 2 * y_total_std
y_upper = y_mean + 2 * y_total_std
y_epi_lower = y_mean - 2 * y_epi_std
y_epi_upper = y_mean + 2 * y_epi_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',))
res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
class BnnClassification(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification.
"""
def __init__(self, config, prior="Gaussian", device=None):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnClassification, self).__init__()
self.config = config
self.device = device
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
if "batch_size" not in self.config:
self.config["batch_size"] = 50
self.net = self.net.to(device)
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config, "device": self.device}
def fit(self, X=None, y=None, train_loader=None):
""" Fits BNN regression model.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Ignored if train_loader is not None.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Ignored if train_loader is not None.
train_loader: pytorch train_loader object.
Returns:
self
"""
if train_loader is None:
train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long())
train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True)
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
avg_loss = 0.0
for batch_x, batch_y in train_loader:
loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
avg_loss += loss.item()
neg_elbo[epoch] = avg_loss / len(train_loader)
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
mc_samples: Number of Monte-Carlo samples.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
y_prob_var: ndarray of shape (n_samples,)
Variance of the prediction on the test points.
y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes)
Samples from the predictive distribution.
"""
X = torch.Tensor(X)
y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)]
y_prob_samples_stacked = np.stack(y_prob_samples)
prob_mean = np.mean(y_prob_samples_stacked, 0)
prob_var = np.std(y_prob_samples_stacked, 0) ** 2
if len(np.shape(prob_mean)) == 1:
y_pred_labels = prob_mean > 0.5
else:
y_pred_labels = np.argmax(prob_mean, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples'])
res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples)
return res
|
homoscedastic_gaussian_process_regression.py | from collections import namedtuple
import botorch
import gpytorch
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.utils.transforms import normalize
from gpytorch.constraints import GreaterThan
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class HomoscedasticGPRegression(BuiltinUQ):
""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise.
References:
.. [1] https://botorch.org/api/models.html#singletaskgp
"""
def __init__(self,
kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
likelihood=None,
config=None):
"""
Args:
kernel: gpytorch kernel function with default set to `RBFKernel` with output scale.
likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`.
config: dictionary containing the config parameters for the model.
"""
super(HomoscedasticGPRegression).__init__()
self.config = config
self.kernel = kernel
self.likelihood = likelihood
self.model = None
self.scaler = StandardScaler()
self.X_bounds = None
def get_params(self, deep=True):
return {"kernel": self.kernel, "likelihood": self.likelihood, "config": self.config}
def fit(self, X, y, **kwargs):
"""
Fit the GP Regression model.
Additional arguments relevant for SingleTaskGP fitting can be passed to this function.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
**kwargs: Additional arguments relevant for SingleTaskGP fitting.
Returns:
self
"""
y = self.scaler.fit_transform(y)
X, y = torch.tensor(X), torch.tensor(y)
self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]),
X.max() * torch.ones(X.shape[1])])
X = normalize(X, X_bounds)
model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs)
model_homo.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e-5))
model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo)
botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik)
model_homo_marginal_log_lik.eval()
self.model = model_homo_marginal_log_lik
self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze()
return self
def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
X = torch.tensor(X)
X_test_norm = normalize(X, self.X_bounds)
self.model.eval()
with torch.no_grad():
posterior = self.model.model.posterior(X_test_norm)
y_mean = posterior.mean
#y_epi_std = torch.sqrt(posterior.variance)
y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region()
predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True)
#y_std = torch.sqrt(predictive_posterior.variance)
y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region()
y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \
self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\
self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\
self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\
self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze()
y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0
y_std = (y_upper_total - y_lower_total) / 4.0
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',))
res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
|
__init__.py | from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression |
quantile_regression.py | from collections import namedtuple
from sklearn.ensemble import GradientBoostingRegressor
from uq360.algorithms.builtinuq import BuiltinUQ
class QuantileRegression(BuiltinUQ):
"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile
to obtain the prediction intervals.
"""
def __init__(self, model_type="gbr", config=None):
"""
Args:
model_type: The base model used for predicting a quantile. Currently supported values are [gbr].
gbr is sklearn GradientBoostingRegressor.
config: dictionary containing the config parameters for the model.
"""
super(QuantileRegression).__init__()
if config is not None:
self.config = config
else:
self.config = {}
if "alpha" not in self.config:
self.config["alpha"] = 0.95
if model_type == "gbr":
self.model_type = model_type
self.model_mean = GradientBoostingRegressor(
loss='ls',
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_upper = GradientBoostingRegressor(
loss='quantile',
alpha=self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_lower = GradientBoostingRegressor(
loss='quantile',
alpha=1.0 - self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"])
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config}
def fit(self, X, y):
""" Fit the Quantile Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.model_mean.fit(X, y)
self.model_lower.fit(X, y)
self.model_upper.fit(X, y)
return self
def predict(self, X):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_mean = self.model_mean.predict(X)
y_lower = self.model_lower.predict(X)
y_upper = self.model_upper.predict(X)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
|
__init__.py | from .quantile_regression import QuantileRegression
|
__init__.py | from .infinitesimal_jackknife import InfinitesimalJackknife
|
infinitesimal_jackknife.py | from collections import namedtuple
import numpy as np
from uq360.algorithms.posthocuq import PostHocUQ
class InfinitesimalJackknife(PostHocUQ):
"""
Performs a first order Taylor series expansion around MLE / MAP fit.
Requires the model being probed to be twice differentiable.
"""
def __init__(self, params, gradients, hessian, config):
""" Initialize IJ.
Args:
params: MLE / MAP fit around which uncertainty is sought. d*1
gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n
hessian: Hessian evaluated at the MLE / MAP fit. d*d
"""
super(InfinitesimalJackknife).__init__()
self.params_one = params
self.gradients = gradients
self.hessian = hessian
self.d, self.n = gradients.shape
self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients)
self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients)
self.w_one = np.ones([self.n])
self.config = config
def get_params(self, deep=True):
return {"params": self.params, "config": self.config, "gradients": self.gradients,
"hessian": self.hessian}
def _process_pretrained_model(self, *argv, **kwargs):
pass
def get_parameter_uncertainty(self):
if (self.config['resampling_strategy'] == "jackknife") or (self.config['resampling_strategy'] == "jackknife+"):
w_query = np.ones_like(self.w_one)
resampled_params = np.zeros([self.n, self.d])
for i in np.arange(self.n):
w_query[i] = 0
resampled_params[i] = self.ij(w_query)
w_query[i] = 1
return np.cov(resampled_params), resampled_params
elif self.config['resampling_strategy'] == "bootstrap":
pass
else:
raise NotImplementedError("Only jackknife, jackknife+, and bootstrap resampling strategies are supported")
def predict(self, X, model):
"""
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
model: model object, must implement a set_parameters function
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
n, _ = X.shape
y_all = model.predict(X)
_, d_out = y_all.shape
params_cov, params = self.get_parameter_uncertainty()
if d_out > 1:
print("Quantiles are computed independently for each dimension. May not be accurate.")
y = np.zeros([params.shape[0], n, d_out])
for i in np.arange(params.shape[0]):
model.set_parameters(params[i])
y[i] = model.predict(X)
y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0)
y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0)
y_mean = y.mean(axis=0)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
def ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T
def approx_ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T |
blackbox_metamodel_classification.py | import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelClassification(PostHocUQ):
""" Extracts confidence scores from black-box classification models using a meta-model [4]_ .
References:
.. [4] Chen, Tongfei, et al. "Confidence scoring using whitebox meta-models with linear classifier probes."
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
""" Instantiates a model by name passed in 'mdltype'.
Args:
mdltype: string with name (must be supported)
config: dict with args passed in the instantiation call
Returns:
mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'lr':
mdl = LogisticRegression(**config)
elif mdltype == 'gbm':
mdl = GradientBoostingClassifier(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance.
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'),
(3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelClassification).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbm'
self.meta_model_default = 'lr'
self.base_config_default = {'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def _process_pretrained_model(self, X, y_hat_proba):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat_proba: [nsamples, nclasses]
:return: array with new features [nsamples, newdim]
"""
assert (len(y_hat_proba.shape) == 2)
assert (X.shape[0] == y_hat_proba.shape[0])
# sort the probs sample by sample
faux1 = np.sort(y_hat_proba, axis=-1)
# add delta between top and second candidate
faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1)
return np.hstack([X, faux1, faux2])
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model,
array-like of shape (n_samples, n_features).
Features vectors of the training data.
:param y: ground truth for the base model,
array-like of shape (n_samples,)
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert (len(meta_train_data) == 2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta_proba = self.base_model.predict_proba(X_meta)
# determine correct-incorrect outcome - these are targets for the meta model trainer
# y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8)
y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# get input features for meta training
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba)
# train meta model to predict 'correct' vs. 'incorrect' of the base
self.meta_model.fit(X_meta_in, y_hat_meta_targets)
return self
def predict(self, X):
"""
Generate a base prediction along with uncertainty/confidence for data X.
:param X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
:return: namedtuple: A namedtuple that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_score: ndarray of shape (n_samples,)
Confidence score the test points.
"""
y_hat_proba = self.base_model.predict_proba(X)
y_hat = np.argmax(y_hat_proba, axis=-1)
X_meta_in = self._process_pretrained_model(X, y_hat_proba)
z_hat = self.meta_model.predict_proba(X_meta_in)
index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome
Result = namedtuple('res', ['y_pred', 'y_score'])
res = Result(y_hat, z_hat[:, index_of_class_1])
return res
|
__init__.py | from .blackbox_metamodel_regression import BlackboxMetamodelRegression
from .blackbox_metamodel_classification import BlackboxMetamodelClassification
|
blackbox_metamodel_regression.py | import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
|
__init__.py | from .heteroscedastic_regression import HeteroscedasticRegression |
heteroscedastic_regression.py | from collections import namedtuple
import numpy as np
import torch
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet
np.random.seed(42)
torch.manual_seed(42)
class HeteroscedasticRegression(BuiltinUQ):
""" Wrapper for heteroscedastic regression. We learn to predict targets given features,
assuming that the targets are noisy and that the amount of noise varies between data points.
https://en.wikipedia.org/wiki/Heteroscedasticity
"""
def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The base model architecture. Currently supported values are [mlp].
mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the
mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n))
model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(HeteroscedasticRegression).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.model = _MLPNet(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.model = model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "model": self.model,
"device": self.device, "verbose": self.verbose}
def _loss(self, y_true, y_pred_mu, y_pred_log_var):
return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 +
0.5 * y_pred_log_var)
def fit(self, X, y):
""" Fit the Heteroscedastic Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
for epoch in range(self.config["num_epochs"]):
avg_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.model.train()
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()/len(dataset_loader)
if self.verbose:
print("Epoch: {}, loss = {}".format(epoch, avg_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
|
__init__.py | from .meps_dataset import MEPSDataset
|
meps_dataset.py | # Adapted from https://github.com/Trusted-AI/AIX360/blob/master/aix360/datasets/meps_dataset.py
# Utilization target is kept as a continuous target.
import os
import pandas as pd
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 19
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION.
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 19]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION'})
df = df[['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION','PERWT15F']]
return df
class MEPSDataset():
"""
The Medical Expenditure Panel Survey (MEPS) [#]_ data consists of large scale surveys of families and individuals,
medical providers, and employers, and collects data on health services used, costs & frequency of services,
demographics, health status and conditions, etc., of the respondents.
This specific dataset contains MEPS survey data for calendar year 2015 obtained in rounds 3, 4, and 5 of Panel 19,
and rounds 1, 2, and 3 of Panel 20.
See :file:`uq360/datasets/data/meps_data/README.md` for more details on the dataset and instructions on downloading/processing the data.
References:
.. [#] `Medical Expenditure Panel Survey data <https://meps.ahrq.gov/mepsweb/>`_
"""
def __init__(self, custom_preprocessing=default_preprocessing, dirpath=None):
self._dirpath = dirpath
if not self._dirpath:
self._dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'meps_data')
self._filepath = os.path.join(self._dirpath, 'h181.csv')
try:
df = pd.read_csv(self._filepath, sep=',', na_values=[])
except IOError as err:
print("IOError: {}".format(err))
print("To use this class, please place the heloc_dataset.csv:")
print("file, as-is, in the folder:")
print("\n\t{}\n".format(os.path.abspath(os.path.join(
os.path.abspath(__file__), 'data', 'meps_data'))))
import sys
sys.exit(1)
if custom_preprocessing:
self._data = custom_preprocessing(df)
def data(self):
return self._data |
logistic_regression.py | import autograd
import autograd.numpy as np
import numpy.random as npr
import scipy.optimize
sigmoid = lambda x: 0.5 * (np.tanh(x / 2.) + 1)
get_num_train = lambda inputs: inputs.shape[0]
logistic_predictions = lambda params, inputs: sigmoid(np.dot(inputs, params))
class LogisticRegression:
def __init__(self):
self.params = None
def set_parameters(self, params):
self.params = params
def predict(self, X):
if self.params is not None:
# Outputs probability of a label being true according to logistic model
return np.atleast_2d(sigmoid(np.dot(X, self.params))).T
else:
raise RuntimeError("Params need to be fit before predictions can be made.")
def loss(self, params, weights, inputs, targets):
# Training loss is the negative log-likelihood of the training labels.
preds = logistic_predictions(params, inputs)
label_probabilities = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(weights * np.log(label_probabilities + 1e-16))
def fit(self, weights, init_params, inputs, targets, verbose=True):
training_loss_fun = lambda params: self.loss(params, weights, inputs, targets)
# Define a function that returns gradients of training loss using Autograd.
training_gradient_fun = autograd.grad(training_loss_fun, 0)
# optimize params
if verbose:
print("Initial loss:", self.loss(init_params, weights, inputs, targets))
# opt_params = sgd(training_gradient_fun, params, hyper=1, num_iters=5000, step_size=0.1)
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options={'disp': verbose})
opt_params = res.x
if verbose:
print("Trained loss:", self.loss(opt_params, weights, inputs, targets))
self.params = opt_params
return opt_params
def get_test_acc(self, params, test_targets, test_inputs):
preds = np.round(self.predict(test_inputs).T).astype(np.int)
err = np.abs(test_targets - preds).sum()
return 1 - err/ test_targets.shape[1]
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one, inputs, targets):
return autograd.hessian(self.loss, argnum=0)(params_one, weights_one, inputs, targets)
def compute_jacobian(self, params_one, weights_one, inputs, targets):
return autograd.jacobian(autograd.jacobian(self.loss, argnum=0), argnum=1)\
(params_one, weights_one, inputs, targets).squeeze()
###################################################
@staticmethod
def synthetic_lr_data(N=10000, D=10):
x = 1. * npr.randn(N, D)
x_test = 1. * npr.randn(int(0.3 * N), D)
w = npr.randn(D, 1)
y = sigmoid((x @ w)).ravel()
y = npr.binomial(n=1, p=y) # corrupt labels
y_test = sigmoid(x_test @ w).ravel()
# y_test = np.round(y_test)
y_test = npr.binomial(n=1, p=y_test)
return x, np.atleast_2d(y), x_test, np.atleast_2d(y_test)
|
hidden_markov_model.py | import autograd
import autograd.numpy as np
import scipy.optimize
from autograd import grad
from autograd.scipy.special import logsumexp
from sklearn.cluster import KMeans
class HMM:
"""
A Hidden Markov Model with Gaussian observations with
unknown means and known precisions.
"""
def __init__(self, X, config_dict=None):
self.N, self.T, self.D = X.shape
self.K = config_dict['K'] # number of HMM states
self.I = np.eye(self.K)
self.Precision = np.zeros([self.D, self.D, self.K])
self.X = X
if config_dict['precision'] is None:
for k in np.arange(self.K):
self.Precision[:, :, k] = np.eye(self.D)
else:
self.Precision = config_dict['precision']
self.dParams_dWeights = None
self.alphaT = None # Store the final beliefs.
self.beta1 = None # store the first timestep beliefs from the beta recursion.
self.forward_trellis = {} # stores \alpha
self.backward_trellis = {} # stores \beta
def initialize_params(self, seed=1234):
np.random.seed(seed)
param_dict = {}
A = np.random.randn(self.K, self.K)
# use k-means to initialize the mean parameters
X = self.X.reshape([-1, self.D])
kmeans = KMeans(n_clusters=self.K, random_state=seed,
n_init=15).fit(X)
labels = kmeans.labels_
_, counts = np.unique(labels, return_counts=True)
pi = counts
phi = kmeans.cluster_centers_
param_dict['A'] = np.exp(A)
param_dict['pi0'] = pi
param_dict['phi'] = phi
return self.pack_params(param_dict)
def unpack_params(self, params):
param_dict = dict()
K = self.K
# For unpacking simplex parameters: have packed them as
# log(pi[:-1]) - log(pi[-1]).
unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1),
np.zeros((K, 1)),
axis=1)
)
Z = np.sum(unnorm_A[:, :-1], axis=1)
unnorm_A /= Z[:, np.newaxis]
norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True)
param_dict['A'] = norm_A
unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], 0.0))
Z = np.sum(unnorm_pi[:-1])
unnorm_pi /= Z
param_dict['pi0'] = unnorm_pi / unnorm_pi.sum()
param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K)
return param_dict
def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Computes the weighted marginal probability of the sequence xseq given parameters;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
alpha = np.log(pi.ravel()) + wseq[0] * ll[0]
if wseq[0] == 0:
self.forward_trellis[0] = alpha[:, np.newaxis]
for t in np.arange(1, self.T):
alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t]
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.forward_trellis[t] = alpha[:, np.newaxis]
if store_belief:
# store the final belief
self.alphaT = alpha
return logsumexp(alpha)
def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Runs beta recursion;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
beta = np.zeros_like(pi.ravel()) # log(\beta) of all ones.
max_t = ll.shape[0]
if wseq[max_t - 1] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[max_t - 1] = beta[:, np.newaxis]
for i in np.arange(1, max_t):
t = max_t - i - 1
beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1)
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[t] = beta[:, np.newaxis]
# account for the init prob
beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel())
if store_belief:
# store the final belief
self.beta1 = beta
return logsumexp(beta)
def weighted_loss(self, params, weights):
"""
For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion
:param params:
:param weights:
:return:
"""
param_dict = self.unpack_params(params)
logp = self.get_prior_contrib(param_dict)
logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights)
return -logp
def loss_at_missing_timesteps(self, weights, params):
"""
:param weights: zeroed out weights indicate missing values
:param params: packed parameters
:return:
"""
# empty forward and backward trellis
self.clear_trellis()
param_dict = self.unpack_params(params)
# populate forward and backward trellis
lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True )
lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True)
assert np.allclose(lpx, lpx_alt) # sanity check
test_ll = []
# compute loo likelihood
ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision)
# compute posterior p(z_t | x_1,...t-1, t+1,...T) \forall missing t
tsteps = []
for t in self.forward_trellis.keys():
lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx
test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel()))
tsteps.append(t)
# empty forward and backward trellis
self.clear_trellis()
return -np.array(test_ll)
def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None):
if maxiter:
options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter}
else:
options_dict = {'disp': verbose, 'gtol': 1e-10}
# Define a function that returns gradients of training loss using Autograd.
training_loss_fun = lambda params: self.weighted_loss(params, weights)
training_gradient_fun = grad(training_loss_fun, 0)
if init_params is None:
init_params = self.initialize_params()
if verbose:
print("Initial loss: ", training_loss_fun(init_params))
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options=options_dict)
if verbose:
print('grad norm =', np.linalg.norm(res.jac))
return res.x
def clear_trellis(self):
self.forward_trellis = {}
self.backward_trellis = {}
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one):
return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one)
def compute_jacobian(self, params_one, weights_one):
return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\
(params_one, weights_one).squeeze()
###################################################
@staticmethod
def log_obs_lik(x, phi, Sigma):
"""
:param x: T*D*1
:param phi: 1*D*K
:param Sigma: D*D*K --- precision matrices per state
:return: ll
"""
centered_x = x - phi
ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma )
return ll
@staticmethod
def pack_params(params_dict):
param_list = [(np.log(params_dict['A'][:, :-1]) -
np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(),
np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]),
params_dict['phi'].ravel()]
return np.concatenate(param_list)
@staticmethod
def get_prior_contrib(param_dict):
logp = 0.0
# Prior
logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum()
logp += (1.1 - 1) * np.log(param_dict['A']).sum()
logp += (1.1 - 1) * np.log(param_dict['pi0']).sum()
return logp
@staticmethod
def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False):
"""
:param T: length of the sequence
:param pct_to_drop: % of T in the held out fold
:param contiguous: if True generate a block of indices to drop else generate indices by iid sampling
:return: o (the set of indices in the fold)
"""
if contiguous:
l = np.floor(pct_to_drop / 100. * T)
anchor = np.random.choice(np.arange(l + 1, T))
o = np.arange(anchor - l, anchor).astype(int)
else:
# i.i.d LWCV
o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1
return o
@staticmethod
def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0,
diagonal_upweight=False):
"""
:param K: Number of HMM states
:param T: length of the sequence
"""
N = 1 # For structured IJ we will remove data / time steps from a single sequence
np.random.seed(seed)
if sigma0 is None:
sigma0 = np.eye(D)
A = np.random.dirichlet(alpha=np.ones(K), size=K)
if diagonal_upweight:
A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions
A = A / A.sum(axis=1)
pi0 = np.random.dirichlet(alpha=np.ones(K))
mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean))
zs = np.empty((N, T), dtype=np.int)
X = np.empty((N, T, D))
for n in range(N):
zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0))
X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0)
for t in range(1, T):
zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :]))
X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0)
return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus}
|
misc.py | import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
from copy import deepcopy
import numpy as np
import numpy.random as npr
def make_batches(n_data, batch_size):
return [slice(i, min(i+batch_size, n_data)) for i in range(0, n_data, batch_size)]
def generate_regression_data(seed, data_count=500):
"""
Generate data from a noisy sine wave.
:param seed: random number seed
:param data_count: number of data points.
:return:
"""
np.random.seed(seed)
noise_var = 0.1
x = np.linspace(-4, 4, data_count)
y = 1*np.sin(x) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
x_train = x[idx[:train_count], np.newaxis ]
x_test = x[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(x_train, 0)
std = np.std(x_train, 0)
x_train = (x_train - mu) / std
x_test = (x_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
y_train = (y_train - mu) / std
train_stats = dict()
train_stats['mu'] = mu
train_stats['sigma'] = std
return x_train, y_train, x_test, y_test, train_stats
def form_D_for_auucc(yhat, zhatl, zhatu):
# a handy routine to format data as needed by the UCC fit() method
D = np.zeros([yhat.shape[0], 3])
D[:, 0] = yhat.squeeze()
D[:, 1] = zhatl.squeeze()
D[:, 2] = zhatu.squeeze()
return D
def fitted_ucc_w_nullref(y_true, y_pred_mean, y_pred_lower, y_pred_upper):
"""
Instantiates an UCC object for the target predictor plus a 'null' (constant band) reference
:param y_pred_lower:
:param y_pred_mean:
:param y_pred_upper:
:param y_true:
:return: ucc object fitted for two systems: target + null reference
"""
# form matrix for ucc:
X_for_ucc = form_D_for_auucc(y_pred_mean.squeeze(),
y_pred_mean.squeeze() - y_pred_lower.squeeze(),
y_pred_upper.squeeze() - y_pred_mean.squeeze())
# form matrix for a 'null' system (constant band)
X_null = deepcopy(X_for_ucc)
X_null[:,1:] = np.std(y_pred_mean) # can be set to any other constant (no effect on AUUCC)
# create an instance of ucc and fit data
from uq360.metrics.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
u = ucc()
u.fit([X_for_ucc, X_null], y_true.squeeze())
return u
def make_sklearn_compatible_scorer(task_type, metric, greater_is_better=True, **kwargs):
"""
Args:
task_type: (str) regression or classification.
metric: (str): choice of metric can be one of these - [aurrrc, ece, auroc, nll, brier, accuracy] for
classification and ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] for regression.
greater_is_better: is False the scores are negated before returning.
**kwargs: additional arguments specific to some metrics.
Returns:
sklearn compatible scorer function.
"""
from uq360.metrics.classification_metrics import compute_classification_metrics
from uq360.metrics.regression_metrics import compute_regression_metrics
def sklearn_compatible_score(model, X, y_true):
"""
Args:
model: The model being scored. Currently uq360 and sklearn models are supported.
X: Input features.
y_true: ground truth values for the target.
Returns:
Computed score of the model.
"""
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.algorithms.posthocuq import PostHocUQ
if isinstance(model, BuiltinUQ) or isinstance(model, PostHocUQ):
# uq360 models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict(X).y_prob,
option=metric,
**kwargs
)[metric]
elif task_type == "regression":
y_mean, y_lower, y_upper = model.predict(X)
score = compute_regression_metrics(
y_true=y_true,
y_mean=y_mean,
y_lower=y_lower,
y_upper=y_upper,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError
else:
# sklearn models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict_proba(X),
option=metric,
**kwargs
)[metric]
else:
if metric in ["rmse", "r2"]:
score = compute_regression_metrics(
y_true=y_true,
y_mean=model.predict(X),
y_lower=None,
y_upper=None,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError("{} is not supported for sklearn regression models".format(metric))
if not greater_is_better:
score = -score
return score
return sklearn_compatible_score
class DummySklearnEstimator(ABC):
def __init__(self, num_classes, base_model_prediction_fn):
self.base_model_prediction_fn = base_model_prediction_fn
self.classes_ = [i for i in range(num_classes)]
def fit(self):
pass
def predict_proba(self, X):
return self.base_model_prediction_fn(X)
|
optimizers.py | from builtins import range
import autograd.numpy as np
def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False):
"""Adapted from autograd.misc.optimizers"""
m = np.zeros(len(x))
v = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g, polyak)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x = x - step_size*mhat/(np.sqrt(vhat) + eps)
return x |
generate_1D_regression_data.py | import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \sigma^2). \sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
X_train = X[idx[:train_count], np.newaxis ]
X_test = X[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(X_train, 0)
std = np.std(X_train, 0)
X_train = (X_train - mu) / std
X_test = (X_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
# mu = 0
# std = 1
y_train = (y_train - mu) / std
y_test = (y_test -mu) / std
train_stats = dict()
train_stats['mu'] = torch.FloatTensor([mu])
train_stats['sigma'] = torch.FloatTensor([std])
return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\
train_stats |
dataTransformer.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import os
import sys
import json
import datetime,time,timeit
import itertools
import numpy as np
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
import logging
class dataTransformer():
def __init__(self):
self.log = logging.getLogger('eion')
def startTransformer(self,df,features,target,transType):
scaler ='None'
if target in features:
features.remove(target)
transFeatures=features
transDfColumns=[]
dataframe=df[transFeatures]
#targetArray=np.array(df[target])
#targetArray.shape = (len(targetArray), 1)
self.log.info("Data Normalization has started")
if transType.lower() =='standardscaler':
scaler = StandardScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='minmax':
scaler=MinMaxScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='lognormal':
print(dataframe)
scaler = PowerTransformer(method='yeo-johnson', standardize=False).fit(dataframe)
transDf = scaler.transform(dataframe)
else:
self.log.info("Need to implement")
#features.append(target)
#scaledDf = pd.DataFrame(np.hstack((transDf, targetArray)),columns=features)
return transDf,features,scaler |
preprocess.py | import pandas as pd
tab = ' '
VALID_AGGREGATION_METHODS = ['mean','sum']
VALID_GRANULARITY_UNITS = ['second','minute','hour','day','week','month','year']
VALID_INTERPOLATE_KWARGS = {'linear':{},'spline':{'order':5},'timebased':{}}
VALID_INTERPOLATE_METHODS = list( VALID_INTERPOLATE_KWARGS.keys())
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def get_source_delta( data: pd.DataFrame):
MAX_SAMPLE_TRY = 20
if len( data) <= 1:
return None
time_delta = data.index[-1] - data.index[-2]
count = {}
for i in range(len(data)):
if i == MAX_SAMPLE_TRY or i == data.index[-1]:
break
delta = data.index[i+1] - data.index[i]
if delta not in count.keys():
count[delta] = 1
else:
count[delta] += 1
if count:
return max(count, key=count.get)
else:
return None
class timeSeries():
def __init__( self, config, datetime, log=None):
self.datetime = datetime
self.validate_config(config)
self.log = log
def validate_config( self, config):
if not self.datetime or self.datetime.lower() == 'na':
raise ValueError('date time feature is not provided')
self.config = {}
method = get_one_true_option(config.get('interpolation',None))
self.config['interpolate'] = {}
self.config['interpolate']['enabled'] = method in VALID_INTERPOLATE_METHODS
self.config['interpolate']['method'] = method
self.config['rolling'] = {}
self.config['rolling']['enabled'] = get_boolean( config.get('rollingWindow',False))
self.config['rolling']['size'] = int( config.get('rollingWindowSize',1))
if self.config['rolling']['size'] < 1:
raise ValueError('Rolling window size should be greater than 0.')
self.config['aggregation'] = {}
aggregation = config.get('aggregation',{})
agg_method = get_one_true_option(aggregation['type'])
self.config['aggregation'] = {}
self.config['aggregation']['enabled'] = agg_method in VALID_AGGREGATION_METHODS
self.config['aggregation']['method'] = agg_method
granularity = aggregation.get('granularity',{})
granularity_unit = get_one_true_option( granularity.get('unit',None))
if granularity_unit in VALID_GRANULARITY_UNITS:
granularity_mapping = {'second':'S','minute':'Min','hour':'H','day':'D','week':'W','month':'M','year':'Y'}
size = int(granularity.get('size',10))
granularity_unit = granularity_mapping.get(granularity_unit,granularity_unit)
self.config['aggregation']['granularity'] = {}
self.config['aggregation']['granularity']['unit'] = granularity_unit
self.config['aggregation']['granularity']['size'] = size
def log_info(self, msg, type='info'):
if self.log:
if type == 'error':
self.log.error( msg)
else:
self.log.info( msg)
else:
print( msg)
def is_down_sampling(self, data, size, granularity_unit):
down_sampling = False
if granularity_unit in ['M', 'Y']:
return True
else:
target_delta = pd.Timedelta(size , granularity_unit)
source_delta = get_source_delta(data)
if not source_delta:
raise ValueError('Could not find the data frame time frequency')
return source_delta < target_delta
def run( self, data):
if self.datetime not in data.columns:
raise ValueError(f"Date time feature '{self.datetime}' is not present in data")
try:
# data[self.datetime] = pd.to_datetime( data[self.datetime])
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime])
except:
#for utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime],utc=True)
data.set_index( self.datetime, inplace=True)
except:
raise ValueError(f"can not convert '{self.datetime}' to dateTime")
if self.config.get('interpolate',{}).get('enabled',False):
method = self.config['interpolate']['method']
self.log_info(f"Applying Interpolation using {method}")
methods_mapping = {'timebased': 'time'}
self.config['interpolate']['mapped_method'] = methods_mapping.get(method, method)
data.interpolate(method=self.config['interpolate']['mapped_method'], inplace=True, **VALID_INTERPOLATE_KWARGS[method])
if self.config.get('rolling',{}).get('enabled',False):
if self.config['rolling']['size'] > len( data):
raise ValueError('Rolling window size is greater than dataset size')
self.log_info(f"Applying rolling window( moving avg) with size {self.config['rolling']['size']}")
data = data.rolling( self.config['rolling']['size']).mean()
data = data.iloc[self.config['rolling']['size'] - 1:]
aggregation = self.config.get('aggregation',{})
if aggregation.get('enabled',False):
method = aggregation.get('method','mean')
self.rule = str(aggregation['granularity']['size']) + aggregation['granularity']['unit']
if self.is_down_sampling(data, aggregation['granularity']['size'], aggregation['granularity']['unit']):
self.log_info(f"Applying down sampling( {self.rule})")
if method == 'mean':
data = data.resample( self.rule).mean()
elif method == 'sum':
data = data.resample( self.rule).sum()
else:
self.log_info(f"Applying up sampling using forward fill method( {self.rule})")
data = data.resample( self.rule).ffill()
data.reset_index( inplace=True, names=self.datetime)
return data
def get_code(self, indent=0):
tab = ' '
code = ''
code += f"""
def preprocess( data):
try:
#for non utc timestamp
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'])
except:
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'],utc=True)
data.set_index( '{self.datetime}', inplace=True)
"""
if self.config.get('interpolate',{}).get('enabled',False):
code += tab + f"data.interpolate(method='{self.config['interpolate']['mapped_method']}', inplace=True, **{VALID_INTERPOLATE_KWARGS[self.config['interpolate']['method']]})\n"
if self.config.get('rolling',{}).get('enabled',False):
code += tab + f"data = data.rolling( {self.config['rolling']['size']}).mean().iloc[{self.config['rolling']['size'] - 1}:]\n"
if self.config.get('aggregation',{}).get('enabled',False):
code += tab + f"data = data.resample( '{self.rule}').{self.config.get('aggregation',{}).get('method','mean')}()\n"
code += tab + f"data.reset_index( inplace=True, names='{self.datetime}')\n"
code += tab + "return data\n"
return code
|
textDataProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import os
import sys
import string
import spacy
#import en_core_web_sm
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
try:
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
except:
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.base import TransformerMixin
from nltk.stem import WordNetLemmatizer
import re
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
class textDataProfiler():
def __init__(self):
self.data=None
#self.nlp=en_core_web_sm.load()
self.punctuations = string.punctuation
self.stopwords = list(STOP_WORDS)
def startTextProfiler(self,df,target):
try:
dataColumns = list(df.columns)
print(' \n No of rows and columns in dataFrame',df.shape)
print('\n features in dataFrame',dataColumns)
dataFDtypes=self.dataFramecolType(df)
print('\n feature types in dataFrame',dataFDtypes)
trainX=df['text']
trainY=df[target]
return trainX,trainY
except Exception as inst:
print('startTextProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except Exception as e:
print("error in dataFramecolyType",e)
return dataFDtypes
def textTokenizer(self,text):
try:
parser = English()
tokens = parser(text)
tokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in tokens ]
tokens = [ word for word in tokens if word not in self.stopwords and word not in self.punctuations ]
return tokens
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def cleanText(self,text):
try:
text=str(text).strip().lower()
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
except Exception as inst:
print('cleanText code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def textTokenization(self,text):
try:
tokenizedText=word_tokenize(text)
return tokenizedText
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def textLemmitizer(self,text):
try:
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
Final_words = []
word_Lemmatized = WordNetLemmatizer()
for word, tag in pos_tag(text):
if word not in stopwords.words('english') and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
return str(Final_words)
except Exception as inst:
print('textLemmitizer code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
class TextCleaner(TransformerMixin):
def clean_text(self,text):
try:
text=str(text).strip().lower()
text = text.replace("isn't", "is not")
text = text.replace("aren't", "are not")
text = text.replace("ain't", "am not")
text = text.replace("won't", "will not")
text = text.replace("didn't", "did not")
text = text.replace("shan't", "shall not")
text = text.replace("haven't", "have not")
text = text.replace("hadn't", "had not")
text = text.replace("hasn't", "has not")
text = text.replace("don't", "do not")
text = text.replace("wasn't", "was not")
text = text.replace("weren't", "were not")
text = text.replace("doesn't", "does not")
text = text.replace("'s", " is")
text = text.replace("'re", " are")
text = text.replace("'m", " am")
text = text.replace("'d", " would")
text = text.replace("'ll", " will")
text = re.sub(r'^https?:\/\/.*[\r\n]*', ' ', text, flags=re.MULTILINE)
text = re.sub(r'[\w\.-]+@[\w\.-]+', ' ', text, flags=re.MULTILINE)
for punctuation in string.punctuation:
text = text.replace(punctuation,' ')
text = re.sub(r'[^A-Za-z0-9\s]',r' ',text)
text = re.sub(r'\n',r' ',text)
text = re.sub(r'[0-9]',r' ',text)
wordnet_lemmatizer = WordNetLemmatizer()
text = " ".join([wordnet_lemmatizer.lemmatize(w, pos='v') for w in text.split()])
return text
except Exception as inst:
print('TextCleaner clean_text code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def text_cleaner(self,text):
text = self.clean_text(text)
stop_words = set(stopwords.words('english'))
text_tokens = word_tokenize(text)
out=' '.join(str(j) for j in text_tokens if j not in stop_words and (len(j)!=1))
return(out)
def transform(self, X, **transform_params):
# Cleaning Text
return [self.clean_text(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {} |
imageAug.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import random
from matplotlib import pyplot as plt
import cv2
import albumentations as A
import os
import pandas as pd
from pathlib import Path
class ImageAugmentation():
def __init__(self, dataLocation, csvFile):
self.AugmentationOptions = {"Flip": {"operation": A.HorizontalFlip, "suffix":"_flip"},
"Rotate": {"operation": A.Rotate, "suffix":"_rotate"},
"Shift": {"operation": A.RGBShift, "suffix":"_shift"},
"Crop": {"operation": [A.CenterCrop, A.RandomSizedBBoxSafeCrop], "suffix":"_crop"},
"Contrast": {"operation": A.RandomContrast, "suffix":"_cont"},
"Brightness": {"operation": A.RandomBrightness, "suffix":"_bright"},
"Blur": {"operation": A.GaussianBlur, "suffix":"_blur"}
}
self.dataLocation = dataLocation
self.csvFile = csvFile
def __applyAugmentationClass(self, image, augmentation,limit):
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transform = self.AugmentationOptions[augmentation]["operation"][0](height=int(height*crop_percentage), width=int(width*crop_percentage) )
elif augmentation == "Blur":
transform = self.AugmentationOptions[augmentation]["operation"](blur_limit = limit)
elif augmentation in ["Contrast","Brightness"]:
transform = self.AugmentationOptions[augmentation]["operation"](limit = limit)
else:
transform = self.AugmentationOptions[augmentation]["operation"]()
return transform(image=image)
def __applyAugmentation(self, image, augmentation,limit,bboxes=None, category_ids=None, seed=7):
transformOptions = []
if bboxes:
bbox_params = A.BboxParams(format='pascal_voc', label_fields=['category_ids'])
else:
bbox_params = None
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transformOptions.append(self.AugmentationOptions[augmentation]["operation"][1](height=int(height*crop_percentage), width=int(width*crop_percentage) ))
elif augmentation == "Blur":
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](blur_limit = limit))
elif augmentation in ["Contrast","Brightness"]:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](limit = limit))
else:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"]())
transform = A.Compose(
transformOptions,
bbox_params=bbox_params,
)
random.seed(seed)
return transform(image=image, bboxes=bboxes, category_ids=category_ids)
else:
return None
def getBBox(self, df, imageLoc, category_name_to_id):
subDf = df[df['loc']==imageLoc]
boxes = []
category = []
for index, row in subDf.iterrows():
boxes.append( [row['xmin'],row['ymin'],row['xmax'],row['ymax']])
category.append(category_name_to_id[row['Label']])
return boxes, category
def __objAug(self, imageLoc, df, classes_names, category_id_to_name, category_name_to_id,limit,numberofImages,op):
for x in range(numberofImages):
bbox, category_ids = self.getBBox(df, imageLoc, category_name_to_id)
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentation(image, op,limit,bbox, category_ids)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
count = 1
row = df[df['loc']==imageLoc].iloc[0]
filename = (Path(imageLoc).stem +'_'+str(x)+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
newImage = str(Path(imageLoc).parent/filename)
for index,bbox in enumerate(transformed['bboxes']):
data = {'File':filename, 'xmin':bbox[0],'ymin':bbox[1],'xmax':bbox[2],'ymax':bbox[3],'Label':category_id_to_name[transformed['category_ids'][index]],'id':count,'height':row['height'],'width':row['width'], 'angle':0.0, 'loc': newImage, 'AugmentedImage': True}
count += 1
df=df.append(data, ignore_index=True)
cv2.imwrite(newImage, transformed['image'])
return df
def __objectDetection(self, images, df, optionDf, classes_names, suffix='',augConf={}):
category_id_to_name = {v+1:k for v,k in enumerate(classes_names)}
category_name_to_id = {k:v+1 for v,k in enumerate(classes_names)}
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
df = self.__objAug(imageLoc, df, classes_names, category_id_to_name,category_name_to_id,limit,numberofImages,op=key)
return df
def __augClassificationImage(self, imageLoc, df,limit,imageindex,op):
data = {}
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentationClass(image, op,limit)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
location = Path(imageLoc).parent
filename = (Path(imageLoc).stem +'_'+'str(imageindex)'+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
cv2.imwrite(str(location/'AION'/'AugumentedImages'/filename), transformed['image'])
data['File'] = filename
data['Label'] = df[df['File']==Path(imageLoc).name]["Label"].iloc[0]
data['AugmentedImage'] = True
data['loc'] = str(location/filename)
return data
def __classification(self, images, df, optionDf,augConf,csv_file=None, outputDir=None):
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
for x in range(numberofImages):
rows = self.__augClassificationImage(imageLoc, df,limit,x,op=key)
df=df.append(rows, ignore_index=True)
return df
def removeAugmentedImages(self, df):
removeDf = df[df['AugmentedImage'] == True]['loc'].unique().tolist()
#df[df['imageAugmentationOriginalImage'] != True][loocationField].apply(lambda x: Path(x).unlink())
for file in removeDf:
if file:
Path(file).unlink()
def augment(self, modelType="imageclassification",params=None,csvSavePath = None,augConf={}):
if isinstance(params, dict) and any(params.values()):
df = pd.read_csv(self.csvFile)
if not self.dataLocation.endswith('/'):
images = self.dataLocation+'/'
else:
images = self.dataLocation
if modelType == "imageclassification":
images = images + df['File']
else:
images = images + df['File']
df['loc'] = images
images = set(images.tolist())
option = {}
for key in list(self.AugmentationOptions.keys()):
option[key] = params.get(key, False)
optionDf = pd.DataFrame(columns=list(option.keys()))
for i in range(len(images)):
optionDf = optionDf.append(option, ignore_index=True)
if modelType == "imageclassification":
df = self.__classification(images, df, optionDf,augConf)
else:
classes_names = sorted(df['Label'].unique().tolist())
df = self.__objectDetection(images, df, optionDf, classes_names,'',augConf)
df.to_csv(self.csvFile, index=False)
return self.csvFile |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
textProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import logging
from distutils.util import strtobool
import pandas as pd
from text import TextProcessing
def get_one_true_option(d, default_value):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
class textProfiler():
def __init__(self):
self.log = logging.getLogger('eion')
def textCleaning(self, textCorpus):
textProcessor = TextProcessing.TextProcessing()
textCorpus = textProcessor.transform(textCorpus)
return(textCorpus)
def textProfiler(self, textCorpus, conf_json, pipeList, max_features):
cleaning_kwargs = {}
textCleaning = conf_json.get('textCleaning')
self.log.info("Text Preprocessing config: ",textCleaning)
cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True'))
cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True'))
cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False'))
cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False'))
cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True'))
cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True'))
cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True'))
cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'),
'lemmatization').capitalize()
removeNoiseConfig = textCleaning.get('removeNoiseConfig')
if type(removeNoiseConfig) is dict:
cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True'))
cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True'))
cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True'))
cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True'))
cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace'
cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True'))
cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True'))
acronymConfig = textCleaning.get('acronymConfig')
if type(acronymConfig) is dict:
cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None)
stopWordsConfig = textCleaning.get('stopWordsConfig')
if type(stopWordsConfig) is dict:
cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', [])
cleaning_kwargs['extend_or_replace_stopwordslist'] = 'extend' if strtobool(stopWordsConfig.get('extend', 'True')) else 'replace'
removeNumericConfig = textCleaning.get('removeNumericConfig')
if type(removeNumericConfig) is dict:
cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True'))
removePunctuationConfig = textCleaning.get('removePunctuationConfig')
if type(removePunctuationConfig) is dict:
cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False'))
cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False'))
if cleaning_kwargs['fExpandContractions']:
cleaning_kwargs['expandContractions_googleNewsWordVectorPath'] = GOOGLE_NEWS_WORD_VECTORS_PATH
libConfig = textCleaning.get('libConfig')
if type(libConfig) is dict:
cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk')
cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk')
cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk')
textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs)
textCorpus = textProcessor.transform(textCorpus)
pipeList.append(("TextProcessing",textProcessor))
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('pos_tags', 'False')):
pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk')
posTagger = TextProcessing.PosTagging( pos_tags_lib)
textCorpus = posTagger.transform(textCorpus)
pipeList.append(("posTagger",posTagger))
ngram_min = 1
ngram_max = 1
if strtobool(textFeatureExtraction.get('n_grams', 'False')):
n_grams_config = textFeatureExtraction.get("n_grams_config")
ngram_min = int(n_grams_config.get('min_n', 1))
ngram_max = int(n_grams_config.get('max_n', 1))
if (ngram_min < 1) or ngram_min > ngram_max:
ngram_min = 1
ngram_max = 1
invalidNgramWarning = 'WARNING : invalid ngram config.\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max)
self.log.info(invalidNgramWarning)
ngram_range_tuple = (ngram_min, ngram_max)
textConversionMethod = conf_json.get('textConversionMethod')
conversion_method = get_one_true_option(textConversionMethod, None)
if conversion_method.lower() == "countvectors":
X, vectorizer = TextProcessing.ExtractFeatureCountVectors(textCorpus, ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: CountVectors')
elif conversion_method.lower() in ["word2vec","fasttext","glove"]:
embedding_method = conversion_method
wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method)
wordEmbeddingVecotrizer.checkAndDownloadPretrainedModel()
X = wordEmbeddingVecotrizer.transform(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",wordEmbeddingVecotrizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "sentencetransformer":
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('sentence-transformers/msmarco-distilroberta-base-v2')
X = model.encode(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",model))
self.log.info('----------> Conversion Method: SentenceTransformer')
elif conversion_method.lower() == 'tf_idf':
X, vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(textCorpus,ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: TF_IDF')
else:
df1 = pd.DataFrame()
df1['tokenize'] = textCorpus
self.log.info('----------> Conversion Method: NA')
return df1, pipeList,conversion_method
|
generate_tfrecord.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import glob
import pandas as pd
import io
import xml.etree.ElementTree as ET
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util, label_map_util
from collections import namedtuple
from pathlib import Path
def class_text_to_int(row_label, label_map_dict):
return label_map_dict[row_label]
def split(df, group):
data = namedtuple('data', ['File', 'object'])
gb = df.groupby(group)
return [data(File, gb.get_group(x)) for File, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path, label_map_dict):
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.File)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
File = group.File.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmin_n = min(row['xmin'], row['xmax'])
xmax_n = max(row['xmin'], row['xmax'])
ymin_n = min(row['ymin'], row['ymax'])
ymax_n = max(row['ymin'], row['ymax'])
xmin_new = min(xmin_n, width)
xmax_new = min(xmax_n, width)
ymin_new = min(ymin_n, height)
ymax_new = min(ymax_n, height)
xmn = xmin_new / width
xmins.append(xmn)
xmx = xmax_new / width
xmaxs.append(xmx)
ymn = ymin_new / height
ymins.append(ymn)
ymx = ymax_new / height
ymaxs.append(ymx)
classes_text.append(row['Label'].encode('utf8'))
classes.append(class_text_to_int(row['Label'], label_map_dict))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(File),
'image/source_id': dataset_util.bytes_feature(File),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def labelFile(classes_names, label_map_path):
pbtxt_content = ""
for i, class_name in enumerate(classes_names):
pbtxt_content = (
pbtxt_content
+ "item {{\n id: {0}\n name: '{1}'\n}}\n\n".format(i + 1, class_name)
)
pbtxt_content = pbtxt_content.strip()
with open(label_map_path, "w") as f:
f.write(pbtxt_content)
def createLabelFile(train_df, save_path):
labelmap_path = str(Path(save_path)/ 'label_map.pbtxt')
classes_names = sorted(train_df['Label'].unique().tolist())
labelFile(classes_names, labelmap_path)
return labelmap_path, len(classes_names)
def generate_TF_record(image_dir, output_dir, train_df, test_df, labelmap_path):
outputPath = str(Path(output_dir)/ 'train.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(train_df, 'File')
label_map = label_map_util.load_labelmap(labelmap_path )
label_map_dict = label_map_util.get_label_map_dict(label_map)
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
if len(test_df):
outputPath = str(Path(output_dir)/ 'test.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(test_df, 'File')
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
|
dataProfiler.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import io
import json
import logging
import pandas as pd
import sys
import numpy as np
from pathlib import Path
from word2number import w2n
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.compose import ColumnTransformer
from sklearn.base import TransformerMixin
from sklearn.ensemble import IsolationForest
from category_encoders import TargetEncoder
try:
import transformations.data_profiler_functions as cs
except:
import data_profiler_functions as cs
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = False
log_suffix = f'[{Path(__file__).stem}] '
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.data_path = data_path
self.encode_target = encode_target
self.label_encoder = None
self.data = xtrain
self.keep_unprocessed = keep_unprocessed
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = {}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
self.force_numeric_conv = []
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
self.log_input_feat_info()
def log_input_feat_info(self):
if self.featureDict:
feature_df = pd.DataFrame(self.featureDict)
log_text = '\nPreprocessing options:'
log_text += '\n\t'+str(feature_df.head( len(self.featureDict))).replace('\n','\n\t')
self.log.info(log_text)
def log_dataframe(self, msg=None):
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\n\t'+str(self.data.head(2)).replace('\n','\n\t')
log_text += ('\n\t' + buffer.getvalue().replace('\n','\n\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
print(self.data.info())
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
try:
if self.text_feature:
from text.textProfiler import set_pretrained_model
set_pretrained_model(pipe)
conversion_method = self.get_conversion_method()
process_data = pipe.fit_transform(self.data, y=self.target)
# save for testing
if DEBUG_ENABLED:
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data)
df.to_csv('debug_preprocessed.csv', index=False)
if self.text_feature and conversion_method == 'latentsemanticanalysis':
n_size = self.get_tf_idf_output_size( pipe)
dimensions = self.get_tf_idf_dimensions()
if n_size < dimensions or n_size > dimensions:
dimensions = n_size
from sklearn.decomposition import TruncatedSVD
reducer = TruncatedSVD( n_components = dimensions)
reduced_data = reducer.fit_transform( process_data[:,-n_size:])
text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process')
pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer))
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1)
last_step = self.feature_expender.pop()
self.feature_expender.append({'feature_reducer':list(last_step.values())[0]})
except EOFError as e:
if "Compressed file ended before the end-of-stream marker was reached" in str(e):
raise EOFError('Pretrained model is not downloaded properly')
self.update_output_features_names(pipe)
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if self.keep_unprocessed:
df[self.keep_unprocessed] = self.data[self.keep_unprocessed]
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio']))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio']))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel']))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.dropna()
if self.config['removeDuplicate']:
self.drop_duplicate()
#self.check_categorical_features()
#self.string_to_numeric()
self.process_target()
self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)}
self.parse_process_step_config()
self.process_drop_fillna()
self.log_type_conversion()
self.update_num_fill_dict()
if DEBUG_ENABLED:
print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
if DEBUG_ENABLED:
self.log.info(self.process_method)
self.log.info(self.pipeline)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.is_target_available():
# drop null values
self.__update_index( self.target.notna(), 'target')
if self.encode_target:
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{}))
normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none')
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{}))
cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{}))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def get_tf_idf_dimensions(self):
dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default')
return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim]
def get_tf_idf_output_size(self, pipe):
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
return len(v)
return 0
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
index_shifter = 0
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
for key,value in start_index.items():
for k,v in value.items():
index_shifter += len(v)
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
#print(start_index)
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
self.output_columns[key:key] = v
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
from text.textProfiler import textProfiler
from text.textProfiler import textCombine
pipeList = []
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.cleaner(conf_json, pipeList, self.data_path)
pipeList = obj.embedding(conf_json, pipeList)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in cs.supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_dict[f][en]
if not self.num_fill_method_dict[f]:
del self.num_fill_method_dict[f]
def update_cat_fill_dict(self):
self.cat_fill_method_dict = {}
if 'catFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['categorical']:
self.cat_fill_method_dict[f] = {}
for en in cs.supported_method['categoryEncoding']:
self.cat_fill_method_dict[f][en] = []
for col in self.cat_feature:
catFillDict = self.process_method.get('catFill',{})
catEncoderDict = self.process_method.get('catEncoder',{})
if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''):
self.cat_fill_method_dict[f][en].append(col)
if not self.cat_fill_method_dict[f][en] :
del self.cat_fill_method_dict[f][en]
if not self.cat_fill_method_dict[f]:
del self.cat_fill_method_dict[f]
def __update_type(self):
self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed))
self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed))
self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed))
self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed))
def update_user_provided_type(self, data_types):
allowed_types = ['numerical','categorical', 'text']
skipped_types = ['date','index']
type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),}
mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types}
skipped_features = [k for k,v in data_types.items() if v in skipped_types]
if skipped_features:
self.keep_unprocessed.extend( skipped_features)
self.keep_unprocessed = list(set(self.keep_unprocessed))
self.update_type(mapped_type, 'user provided data type')
def get_type(self, as_list=False):
if as_list:
return [self.colm_type.values()]
else:
return self.colm_type
def update_type(self, data_types={}, reason=''):
invalid_features = [x for x in data_types.keys() if x not in self.data.columns]
if invalid_features:
valid_feat = list(set(data_types.keys()) - set(invalid_features))
valid_feat_type = {k:v for k,v in data_types if k in valid_feat}
else:
valid_feat_type = data_types
for k,v in valid_feat_type.items():
if v != self.colm_type[k].name:
try:
self.data.astype({k:v})
self.colm_type.update({k:self.data[k].dtype})
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
except:
self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason)
if v == np.dtype('float64') and self.colm_type[k].name == 'object':
if self.check_numeric( k):
self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce')
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
self.force_numeric_conv.append( k)
else:
raise ValueError(f"Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%")
self.data = self.data.astype(valid_feat_type)
self.__update_type()
def check_numeric(self, feature):
col_values = self.data[feature].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
return True
return False
def string_to_numeric(self):
def to_number(x):
try:
return w2n.word_to_num(x)
except:
return np.nan
for col in self.text_feature:
col_values = self.data[col].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
self.text_to_num[col] = 'float64'
self.wordToNumericFeatures.append(col)
if self.text_to_num:
columns = list(self.text_to_num.keys())
self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast')
self.update_type(self.text_to_num)
self.log.info('----------- Inspecting Features -----------')
for col in self.text_feature:
self.log.info(f'-------> Feature : {col}')
if col in self.text_to_num:
self.log.info('----------> Numeric Status :Yes')
self.log.info('----------> Data Type Converting to numeric :Yes')
else:
self.log.info('----------> Numeric Status :No')
self.log.info(f'\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric')
self.log.info(f'\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}')
self.log.info('----------- Inspecting Features End -----------')
def check_categorical_features(self):
num_data = self.data.select_dtypes(include='number')
num_data_unique = num_data.nunique()
num_to_cat_col = {}
for i, value in enumerate(num_data_unique):
if value < self.config['categoryMaxLabel']:
num_to_cat_col[num_data_unique.index[i]] = 'category'
if num_to_cat_col:
self.update_type(num_to_cat_col, 'numerical to categorical')
str_to_cat_col = {}
str_data = self.data.select_dtypes(include='object')
str_data_unique = str_data.nunique()
for i, value in enumerate(str_data_unique):
if value < self.config['categoryMaxLabel']:
str_to_cat_col[str_data_unique.index[i]] = 'category'
for colm in str_data.columns:
if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']:
str_to_cat_col[colm] = 'category'
if str_to_cat_col:
self.update_type(str_to_cat_col, 'text to categorical')
def drop_features(self, features=[], reason='unspecified'):
if isinstance(features, str):
features = [features]
feat_to_remove = [x for x in features if x in self.data.columns]
if feat_to_remove:
self.data.drop(feat_to_remove, axis=1, inplace=True)
for feat in feat_to_remove:
self.dropped_features[feat] = reason
self.log_drop_feature(feat_to_remove, reason)
self.__update_type()
def __update_index(self, indices, reason=''):
if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data):
if not indices.all():
self.data = self.data[indices]
if self.is_target_available():
self.target = self.target[indices]
self.log_update_index((indices == False).sum(), reason)
def dropna(self):
self.data.dropna(how='all',inplace=True)
if self.is_target_available():
self.target = self.target[self.data.index]
def drop_duplicate(self):
index = self.data.duplicated(keep='first')
self.__update_index( ~index, reason='duplicate')
def log_drop_feature(self, columns, reason):
self.log.info(f'---------- Dropping {reason} features ----------')
self.log.info(f'\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found')
self.log.info(f'-------> Drop Features: {columns}')
self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}')
def log_update_index(self,count, reason):
if count:
if reason == 'target':
self.log.info('-------> Null Target Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'duplicate':
self.log.info('-------> Duplicate Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'outlier':
self.log.info(f'-------> Dropped rows count: {count}')
self.log.info('Status:- |... Outlier treatment done')
self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}')
def log_normalization(self):
if self.process_method.get('normalization', None):
self.log.info(f'\nStatus:- !... Normalization treatment done')
for method in cs.supported_method['normalization']:
cols = []
for col, m in self.process_method['normalization'].items():
if m == method:
cols.append(col)
if cols and method != 'none':
self.log.info(f'Running {method} on features: {cols}')
def log_numerical_fill(self):
if self.process_method.get('numFill', None):
self.log.info(f'\nStatus:- !... Fillna for numeric feature done')
for method in cs.supported_method['fillNa']['numeric']:
cols = []
for col, m in self.process_method['numFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def log_categorical_fill(self):
if self.process_method.get('catFill', None):
self.log.info(f'\nStatus:- !... FillNa for categorical feature done')
for method in cs.supported_method['fillNa']['categorical']:
cols = []
for col, m in self.process_method['catFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def remove_constant_feature(self):
unique_values = self.data.nunique()
constant_features = []
for i, value in enumerate(unique_values):
if value == 1:
constant_features.append(unique_values.index[i])
if constant_features:
self.drop_features(constant_features, "constant")
def remove_empty_feature(self, misval_ratio=1.0):
missing_ratio = self.data.isnull().sum() / len(self.data)
missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)}
empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio]
if empty_features:
self.drop_features(empty_features, "empty")
def remove_index_features(self):
index_feature = []
for feat in self.numeric_feature:
if self.data[feat].nunique() == len(self.data):
#if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)):
# index feature can be time based
count = (self.data[feat] - self.data[feat].shift() == 1).sum()
if len(self.data) - count == 1:
index_feature.append(feat)
self.drop_features(index_feature, "index")
def fill_missing_value_method(self, colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['fillNa']['numeric']:
if 'numFill' not in self.process_method.keys():
self.process_method['numFill'] = {}
if method == 'na' and self.process_method['numFill'].get(colm, None):
pass # don't overwrite
else:
self.process_method['numFill'][colm] = method
if colm in self.cat_feature:
if method in cs.supported_method['fillNa']['categorical']:
if 'catFill' not in self.process_method.keys():
self.process_method['catFill'] = {}
if method == 'na' and self.process_method['catFill'].get(colm, None):
pass
else:
self.process_method['catFill'][colm] = method
def check_encoding_method(self, method, colm,default=False):
if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]):
method = cs.target_encoding_method_change[method.lower()]
if default:
self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present")
return method
def fill_encoder_value_method(self,colm, method, default=False):
if colm in self.cat_feature:
if method.lower() in cs.supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}")
def apply_outlier(self):
inlier_indice = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = cs.findiqrOutlier(self.data[k])
elif v == 'zscore':
index = cs.findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indice = np.logical_and(inlier_indice, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
if self.numeric_feature:
index = cs.findiforestOutlier(self.data[self.numeric_feature])
inlier_indice = np.logical_and(inlier_indice, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indice.sum() != len(self.data):
self.__update_index(inlier_indice, 'outlier')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method not in ['Disable', 'na']:
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder()
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="ignore")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder()
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
def recommenderStartProfiler(self,modelFeatures):
return cs.recommenderStartProfiler(self,modelFeatures)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation)
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2)
def get_conversion_method(self):
return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower()
def set_features(features,profiler=None):
return cs.set_features(features,profiler)
|
data_profiler_functions.py | import os
import sys
import numpy as np
import scipy
import pandas as pd
from pathlib import Path
default_config = {
'misValueRatio': '1.0',
'numericFeatureRatio': '1.0',
'categoryMaxLabel': '20',
'str_to_cat_len_max': 10
}
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable', 'na'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
from sklearn.ensemble import IsolationForest
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def recommenderStartProfiler(self,modelFeatures):
try:
self.log.info('----------> FillNA:0')
self.data = self.data.fillna(value=0)
self.log.info('Status:- !... Missing value treatment done')
self.log.info('----------> Remove Empty Row')
self.data = self.data.dropna(axis=0,how='all')
self.log.info('Status:- !... Empty feature treatment done')
userId,itemId,rating = modelFeatures.split(',')
self.data[itemId] = self.data[itemId].astype(np.int32)
self.data[userId] = self.data[userId].astype(np.int32)
self.data[rating] = self.data[rating].astype(np.float32)
return self.data
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
return(self.data)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
try:
dataset_directory = Path(folderlocation)
dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name']
tfrecord_directory = Path(deployLocation)/'Video_TFRecord'
from savp import PreprocessSAVP
import csv
csvfile = open(dataset_csv_file, newline='')
csv_reader = csv.DictReader(csvfile)
PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory)
dataColumns = list(self.data.columns)
VideoProcessing = True
return dataColumns,VideoProcessing,tfrecord_directory
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
import os
try:
features = [doc_col_1, doc_col_2]
pipe = None
dataColumns = list(self.data.columns)
self.numofCols = self.data.shape[1]
self.numOfRows = self.data.shape[0]
from transformations.textProfiler import textProfiler
self.log.info('-------> Execute Fill NA With Empty String')
self.data = self.data.fillna(value=" ")
self.log.info('Status:- |... Missing value treatment done')
self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1])
self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2])
self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2)
self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
from tensorflow.keras.preprocessing.text import Tokenizer
pipe = Tokenizer()
pipe.fit_on_texts(self.data['text'].values)
self.log.info('-------> Tokenizer: Fit on Concatenate Field')
self.log.info('Status:- |... Tokenizer the text')
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
return (self.data, pipe, self.target_name, features)
except Exception as inst:
self.log.info("StartProfiler failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
def set_features(features,profiler=None):
if profiler:
features = [x for x in features if x not in profiler.added_features]
return features + profiler.text_feature
return features |
dataReader.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import sys
import os
import warnings
import logging
from pathlib import Path
import random
from sklearn.model_selection import train_test_split
import operator
import re
import pdfplumber
class dataReader():
def __init__(self):
self.dataDf =None
self.log = logging.getLogger('eion')
def readCsv(self,dataPath,featureList,targetColumn):
data=pd.read_csv(dataPath)
dataDf=data[featureList]
predictDf=data[targetColumn]
return dataDf,predictDf
def rowsfilter(self,filters,dataframe):
self.log.info('\n-------> No of rows before filtering: '+str(dataframe.shape[0])) #task-13479
filterexpression=''
firstexpressiondone = False
for x in filters:
if firstexpressiondone:
filterexpression += ' '
if x['combineOperator'].lower() == 'and':
filterexpression += '&'
elif x['combineOperator'].lower() == 'or':
filterexpression += '|'
filterexpression += ' '
firstexpressiondone = True
filterexpression += x['feature']
filterexpression += ' '
if x['condition'].lower() == 'equals':
filterexpression += '=='
elif x['condition'].lower() == 'notequals':
filterexpression += '!='
elif x['condition'].lower() == 'lessthan':
filterexpression += '<'
elif x['condition'].lower() == 'lessthanequalto':
filterexpression += '<='
elif x['condition'].lower() == 'greaterthan':
filterexpression += '>'
elif x['condition'].lower() == 'greaterthanequalto':
filterexpression += '>='
filterexpression += ' '
if dataframe[x['feature']].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
filterexpression += x['value']
else:
filterexpression += '\''+x['value']+'\''
dataframe = dataframe.query(filterexpression)
self.log.info('-------> Row filter: '+str(filterexpression)) #task-13479
self.log.info('-------> No of rows after filtering: '+str(dataframe.shape[0]))
return dataframe,filterexpression
def grouping(self,grouper,dataframe):
grouperbyjson= {}
groupbyfeatures = grouper['groupby']
dataframe = dataframe.reset_index()
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_sum'] = 'sum'
temp[feature+'_max'] = 'max'
temp[feature+'_min'] = 'min'
temp[feature+'_mean'] = 'mean'
aggjson[feature] = temp
else:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_unique'] = 'nunique'
aggjson[feature] = temp
groupbystring = 'groupby([\''+groupbyfeatures+'\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
dataframe = dataframe.groupby([groupbyfeatures]).agg(aggjson)
dataframe.columns = dataframe.columns.droplevel(0)
dataframe = dataframe.reset_index()
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def timeGrouping(self,timegrouper,dataframe):
grouperbyjson= {}
dateTime = timegrouper['dateTime']
frequency = timegrouper['freq']
groupbyfeatures = timegrouper['groupby']
grouperbyjson['datetime'] = dateTime
if dataframe[dateTime].dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
dtlenth = dataframe[dateTime].iloc[0]
dtlenth = np.int64(dtlenth)
dtlenth = len(str(dtlenth))
if dtlenth == 13:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='ms')
grouperbyjson['unit'] = 'ms'
elif dtlenth == 10:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='s')
grouperbyjson['unit'] = 's'
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
dataframe = dataframe.reset_index()
dataframe.set_index('date',inplace=True)
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == dateTime or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {'size','sum','max','min','mean'}
aggjson[feature] = temp
else:
temp = {'size','nunique'}
aggjson[feature] = temp
if groupbyfeatures == '':
groupbystring = 'groupby([pd.Grouper(freq=\''+frequency+'\')]).agg('+str(aggjson)+')'
else:
groupbystring = 'groupby([pd.Grouper(freq=\''+frequency+'\'),\''+groupbyfeatures+'\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
print(grouperbyjson)
if groupbyfeatures == '':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency)]).agg(aggjson)
else:
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).agg(aggjson)
dataframe.columns = ['_'.join(col) for col in dataframe.columns]
dataframe = dataframe.reset_index()
self.log.info(dataframe.head(10))
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def readDf(self,dataF,featureList,targetColumn):
dataDf = dataF[featureList]
predictDf =dataF[targetColumn]
return dataDf,predictDf
def csvTodf(self,dataPath,delimiter,textqualifier):
'''
if os.path.splitext(dataPath)[1] == ".tsv":
dataFrame=pd.read_csv(dataPath,encoding='latin1',sep='\t')
else:
dataFrame=pd.read_csv(dataPath,encoding='latin1')
'''
if os.path.splitext(dataPath)[1] == ".py":
f = open(dataPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
dataFrame = ldict['dfpy']
else:
dataFrame=pd.read_csv(dataPath,encoding='utf-8',sep=delimiter,quotechar=textqualifier, skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
dataFrame.rename(columns=lambda x: x.strip(), inplace=True)
return dataFrame
def read_file(self, fileName):
fileName = Path(fileName)
if fileName.suffix == '.pdf':
pdf = pdfplumber.open(fileName)
text = ''
for index, page in enumerate(pdf.pages):
if index:
text += ' '
text += page.extract_text()
else:
with open(fileName, "r",encoding="utf-8") as f:
text = f.read()
return text
def documentsTodf(self,folderlocation,labelFilePath):
dataDf = pd.DataFrame()
error_message = ""
dataset_csv_file = os.path.join(folderlocation,labelFilePath)
labels = pd.read_csv(dataset_csv_file)
dataDict = {}
keys = ["File","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(labels)):
filename = os.path.join(folderlocation,labels.loc[i,"File"])
dataDict["File"].append(self.read_file(filename))
dataDict["Label"].append(labels.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
error_message = ""
return dataDf, error_message
def removeFeatures(self,df,datetimeFeature,indexFeature,modelFeatures,targetFeature):
self.log.info("\n---------- Prepare Features ----------")
if(str(datetimeFeature).lower() != 'na'):
datetimeFeature = datetimeFeature.split(",")
datetimeFeature = list(map(str.strip, datetimeFeature))
for dtfeature in datetimeFeature:
if dtfeature in df.columns:
self.log.info("-------> Remove Date Time Feature: "+dtfeature)
df = df.drop(columns=dtfeature)
if(str(indexFeature).lower() != 'na'):
indexFeature = indexFeature.split(",")
indexFeature = list(map(str.strip, indexFeature))
for ifeature in indexFeature:
if ifeature in df.columns:
self.log.info("-------> Remove Index Feature: "+ifeature)
df = df.drop(columns=ifeature)
if(str(modelFeatures).lower() != 'na'):
self.log.info("-------> Model Features: "+str(modelFeatures))
modelFeatures = modelFeatures.split(",")
modelFeatures = list(map(str.strip, modelFeatures))
if(targetFeature != '' and str(targetFeature).lower() != 'na'):
targetFeature = targetFeature.split(",")
targetFeature = list(map(str.strip, targetFeature))
for ifeature in targetFeature:
if ifeature not in modelFeatures:
modelFeatures.append(ifeature)
if(str(indexFeature).lower() != 'na'):
for ifeature in indexFeature:
if ifeature in modelFeatures:
modelFeatures.remove(ifeature)
if(str(datetimeFeature).lower() != 'na'):
for dtfeature in datetimeFeature:
if dtfeature in modelFeatures:
modelFeatures.remove(dtfeature)
df = df[modelFeatures]
self.log.info("---------- Prepare Features End ----------")
return(df)
def splitImageDataset(self, df, ratio, modelType):
if modelType.lower() == "objectdetection":
images = df['File'].unique().tolist()
trainImages = random.sample(images, int(len(images) * ratio))
mask = [0] * len(df)
for i in range(len(df)):
mask[i] = df.iloc[i]['File'] in trainImages
trainDf = df.iloc[mask]
testDf = df.iloc[[not elem for elem in mask]]
return trainDf, testDf
else:
return train_test_split(df, test_size=(1 - ratio))
def createTFRecord(self, train_image_dir, output_dir, csv_file, testPercentage, AugEnabled,keepAugImages,operations, modelType,augConf={}):
from transformations import generate_tfrecord
from transformations.imageAug import ImageAugmentation
if isinstance(csv_file, pd.DataFrame):
df = csv_file
else:
df = pd.read_csv(os.path.join(train_image_dir,csv_file))
labelmap_path, num_classes = generate_tfrecord.createLabelFile(df, output_dir)
train_df, test_df = self.splitImageDataset(df, testPercentage/100.0, modelType)
if AugEnabled:
augFile = os.path.join(output_dir,"tempTrainDf.csv")
train_df.to_csv(augFile)
ia = ImageAugmentation(train_image_dir, augFile)
augFile = ia.augment(modelType, operations,None,augConf)
train_df = pd.read_csv(augFile)
generate_tfrecord.generate_TF_record(train_image_dir, output_dir, train_df, test_df, labelmap_path)
if AugEnabled and not keepAugImages:
ia.removeAugmentedImages(train_df)
return train_df, num_classes
|
pretrainedModels.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sys
from pathlib import Path
import urllib.request
import tarfile
import json
import subprocess
import os
from os.path import expanduser
import platform
class ODpretrainedModels():
def __init__(self, location=None):
if location:
if isinstance(location, Path):
self.pretrained_models_location = location.as_posix()
else:
self.pretrained_models_location = location
else:
p = subprocess.run([sys.executable, "-m", "pip","show","AION"],capture_output=True, text=True)
if p.returncode == 0:
Output = p.stdout.split('\n')
for x in Output:
y = x.split(':',1)
if(y[0]=='Location'):
self.pretrained_models_location = y[1].strip()+"/AION/pretrained_models/object_detection"
break
if Path(self.pretrained_models_location).is_dir():
self.config_file_location = self.pretrained_models_location+'/supported_models.json'
with open(self.config_file_location) as json_data:
self.supportedModels = json.load(json_data)
home = expanduser("~")
if platform.system() == 'Windows':
self.modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection')
else:
self.modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection')
if os.path.isdir(self.modelsPath) == False:
os.makedirs(self.modelsPath)
def __save_config(self):
with open(self.config_file_location, 'w') as json_file:
json.dump(self.supportedModels, json_file)
def __download(self, modelName):
try:
url = self.supportedModels[modelName]["url"]
file = self.supportedModels[modelName]["file"]
local_file_path = Path(self.modelsPath)/(file+".tar.gz")
urllib.request.urlretrieve(url, local_file_path)
except:
raise ValueError("{} model download error, check your internet connection".format(modelName))
return local_file_path
def __extract(self, modelName, file_location, extract_dir):
try:
tarFile = tarfile.open(file_location)
tarFile.extractall(extract_dir)
tarFile.close()
Path.unlink(file_location)
return True
except:
return False
def download(self, modelName):
if modelName in list(self.supportedModels.keys()):
p = Path(self.modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.is_dir()]
if self.supportedModels[modelName]['file'] not in modelsDownloaded:
file = self.__download(modelName)
self.supportedModels[modelName]["downloaded"] = True
if self.__extract(modelName, file, self.modelsPath):
self.supportedModels[modelName]["extracted"] = True
self.__save_config()
else:
self.__save_config()
raise ValueError("{} model downloaded but extraction failed,please try again".format(modelName))
else:
raise ValueError("{} is not supported for object detection".format(modelName))
return self.supportedModels[modelName]
def get_info(self,modeltype):
models_info = {}
p = Path(self.pretrained_models_location)
downloaded_models = [x.name for x in p.iterdir() if x.is_dir()]
for model in list(self.supportedModels.keys()):
if (self.supportedModels[model]['type'] == modeltype) or (modeltype == ''):
models_info[model] = self.supportedModels[model]['extracted']
return models_info
def is_model_exist(self, model_name):
models = self.get_info('')
status = "NOT_SUPPORTED"
if model_name in models:
if self.supportedModels[model_name]['extracted']:
status = "READY"
else:
status = "NOT_READY"
return status
def clear_config(self, model_name):
self.supportedModels[model_name]['extracted'] = False
self.supportedModels[model_name]['downloaded'] = False
self.__save_config()
|
performance.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
def get_metrics(request):
output = {}
output_path = Path(request.session['deploypath'])/"etc"/"output.json"
if not output_path.exists():
raise ValueError('output json path does not exist, something unexpected happen')
with open(output_path) as file:
config = json.load(file)
output['problem_type'] = config.get('data',{}).get('ModelType')
output['best_model'] = config.get('data',{}).get('BestModel')
output['hyper_params'] = config.get('data',{}).get('params')
output['best_score'] = str(round(float(config.get('data',{}).get('BestScore')), 2))
output['scoring_method'] = config.get('data',{}).get('ScoreType')
if output['problem_type'] == 'classification':
output['mcc_score'] = str(round(float(config.get('data',{}).get('matrix',{}).get('MCC_SCORE', 0.0)), 2))
else:
output['mcc_score'] = 'NA'
return output
|
brier_score.py | import json
import os
def get_brier_score(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "output.json")
with open(displaypath) as file:
config = json.load(file)
problem_type = config["data"]["ModelType"]
brier_score = config["data"]["matrix"]["BRIER_SCORE"]
print(problem_type,brier_score)
except Exception as e:
#print(str(e))
raise ValueError(str(e))
return problem_type, brier_score
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
|
fairness_metrics.py |
import pandas as pd
import numpy as np
from appbe.eda import ux_eda
from sklearn.preprocessing import LabelEncoder
import json
import matplotlib.pyplot as plt
import os
import mpld3
import subprocess
import os
import sys
import re
import json
import pandas as pd
from appbe.eda import ux_eda
from aif360.datasets import StandardDataset
from aif360.metrics import ClassificationMetric
from aif360.datasets import BinaryLabelDataset
def get_metrics(request):
dataFile = os.path.join(request.session['deploypath'], "data", "preprocesseddata.csv.gz")
predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py')
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
Target_feature = configSettings['targetFeature']
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
df = pd.read_csv(dataFile)
df_p = pd.DataFrame.from_dict(predict_dict['data'])
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
df_temp = request.GET.get('feature')
global metricvalue
metricvalue = request.GET.get('metricvalue')
Protected_feature = df_temp
df_p = df_p.drop(columns=[Target_feature, 'remarks', 'probability'])
df_p.rename(columns={'prediction': Target_feature}, inplace=True)
eda_obj = ux_eda(dataFile, optimize=1)
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
features_to_Encode = features
categorical_names = {}
encoders = {}
for feature in features_to_Encode:
le = LabelEncoder()
le.fit(df[feature])
df[feature] = le.transform(df[feature])
le.fit(df_p[feature])
df_p[feature] = le.transform(df_p[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
new_list = [item for item in categorical_names[Protected_feature] if not(pd.isnull(item)) == True]
claas_size = len(new_list)
if claas_size > 10:
return 'HeavyFeature'
metrics = fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p)
figure = plot_fair_metrics(metrics)
html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url)
return html_graph
def fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p):
cols = [metricvalue]
obj_fairness = [[0]]
fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)
for indx in range(claas_size):
priv_group = categorical_names[Protected_feature][indx]
privileged_class = np.where(categorical_names[Protected_feature] == priv_group)[0]
data_orig = StandardDataset(df,
label_name=Target_feature,
favorable_classes=[1],
protected_attribute_names=[Protected_feature],
privileged_classes=[privileged_class])
attr = data_orig.protected_attribute_names[0]
idx = data_orig.protected_attribute_names.index(attr)
privileged_groups = [{attr:data_orig.privileged_protected_attributes[idx][0]}]
unprivileged_size = data_orig.unprivileged_protected_attributes[0].size
unprivileged_groups = []
for idx2 in range(unprivileged_size):
unprivileged_groups.extend([{attr:data_orig.unprivileged_protected_attributes[idx][idx2]}])
bld = BinaryLabelDataset(df=df, label_names=[Target_feature], protected_attribute_names=[Protected_feature])
bld_p = BinaryLabelDataset(df=df_p, label_names=[Target_feature], protected_attribute_names=[Protected_feature])
ClsMet = ClassificationMetric(bld, bld_p,unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
if metricvalue == "Theil Index":
row = pd.DataFrame([[ClsMet.theil_index()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Equal Opportunity Difference":
row = pd.DataFrame([[ClsMet.equal_opportunity_difference()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Disparate Impact":
row = pd.DataFrame([[ClsMet.disparate_impact()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Statistical Parity Difference":
row = pd.DataFrame([[ClsMet.statistical_parity_difference()]],
columns = cols ,
index = [priv_group])
#fair_metrics = fair_metrics.append(row)
fair_metrics = pd.concat([fair_metrics,row])
return fair_metrics
def plot_fair_metrics(fair_metrics):
import matplotlib.patches as patches
plt.style.use('default')
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1)
plt.subplots_adjust(
left = 0.125,
bottom = 0.1,
right = 0.9,
top = 0.9,
wspace = .5,
hspace = 1.1
)
y_title_margin = 1.2
plt.suptitle("Fairness metrics", y = 1.09, fontsize=20)
sns.set(style="dark")
cols = fair_metrics.columns.values
obj = fair_metrics.loc['objective']
if metricvalue == "Theil Index":
size_rect = [0.5]
rect = [-0.1]
bottom = [-0.1]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Equal Opportunity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
elif metricvalue == "Disparate Impact":
size_rect = [0.4]
rect = [0.8]
bottom = [0]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Statistical Parity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
for attr in fair_metrics.index[1:len(fair_metrics)].values:
check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)]
for i in range(0,1):
plt.subplot(1, 1, i+1)
xx = fair_metrics.index[1:len(fair_metrics)].values.tolist()
yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist()
palette = sns.color_palette('husl', len(xx))
ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx)
index = 0
for p in zip(ax.get_xticks(), yy):
if (p[1] > 2.0):
_color = palette.as_hex()[index]
_val = 'Outlier(' + str(round(p[1],3)) + ')'
ax.text(p[0]-0.5, 0.02, _val, color=_color)
else:
ax.text(p[0], p[1]+0.05, round(p[1],3), color='k')
index = index + 1
plt.ylim(bottom[i], top[i])
plt.setp(ax.patches, linewidth=0)
ax.get_xaxis().set_visible(False)
ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1)
ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor="green", linewidth=1, linestyle='solid'))
# plt.axhline(obj[i], color='black', alpha=0.3)
plt.title(cols[i], fontname="Times New Roman", size=20,fontweight="bold")
ax.set_ylabel('')
ax.set_xlabel('')
return fig |
sensitivity_analysis.py | import base64
import io
import json
import os
import urllib
import joblib
import numpy as np
import pandas as pd
from SALib.analyze import sobol
class sensitivityAnalysis():
def __init__(self, model, problemType, data, target, featureName):
self.model = model
self.probemType = problemType
self.data = data
self.target = target
self.featureName = featureName
self.paramvales = []
self.X = []
self.Y = []
self.problem = {}
def preprocess(self):
self.X = self.data[self.featureName].values
self.Y = self.data[self.target].values
bounds = [[np.min(self.X[:, i]), np.max(self.X[:, i])] for i in range(self.X.shape[1])]
self.problem = {
'num_vars': self.X.shape[1],
'names': self.featureName,
'bounds': bounds
}
def generate_samples(self,size):
from SALib.sample import sobol
self.param_values = sobol.sample(self.problem, size)
def calSiClass(self, satype,isML,isDL):
try:
D = self.problem['num_vars']
S = np.zeros(self.X.shape[1])
for class_label in np.unique(self.Y):
if isML:
y_pred_poba = self.model.predict_proba(self.param_values)[:, class_label]
if isDL:
y_pred_poba = self.model.predict(self.param_values)[:,class_label]
if not y_pred_poba.size % (2 * D + 2) == 0:
lim = y_pred_poba.size - y_pred_poba.size % (2 * D + 2)
y_pred_poba = y_pred_poba[:lim]
Si = sobol.analyze(self.problem, y_pred_poba)
if satype.lower() == 'first':
S += Si['S1']
else:
S += Si['ST']
S /= len(np.unique(self.Y))
return S
except Exception as e:
print('Error in calculating Si for Classification: ', str(e))
raise ValueError(str(e))
def calSiReg(self, satype,isML,isDL):
try:
D = self.problem['num_vars']
Y = np.array([self.model.predict(X_sample.reshape(1, -1)) for X_sample in self.param_values])
Y = Y.reshape(-1)
if not Y.size % (2 * D + 2) == 0:
lim = Y.size - Y.size % (2 * D + 2)
Y = Y[:lim]
Si = sobol.analyze(self.problem, Y)
if satype.lower() == 'first':
S = Si['S1']
else:
S = Si['ST']
return S
except Exception as e:
print('Error in calculating Si for Regression: ', str(e))
raise ValueError(str(e))
def plotSi(self, S, saType):
try:
import matplotlib.pyplot as plt
if saType.lower() == 'first':
title, label = 'Sensitivity Analysis', 'First order'
else:
title, label = 'Sensitivity Analysis', 'Total order'
x = np.arange(len(self.problem['names']))
width = 0.35
fig, ax = plt.subplots()
ax.bar(x - width / 2, S, width, label=label)
ax.set_xticks(x)
ax.set_xlabel('Features')
ax.set_ylabel('Sensitivity Indices')
ax.set_title(title)
ax.set_xticklabels(self.problem['names'], rotation=45, ha="right")
ax.legend()
plt.tight_layout()
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
SAimage = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as e:
print(e)
SAimage = ''
return SAimage
def checkModelType(modelName):
isML= False
isDL = False
if modelName in ["Neural Network", "Convolutional Neural Network (1D)", "Recurrent Neural Network","Recurrent Neural Network (GRU)",
"Recurrent Neural Network (LSTM)", "Neural Architecture Search", "Deep Q Network", "Dueling Deep Q Network"]:
isDL = True
elif modelName in ["Linear Regression","Lasso","Ridge","Logistic Regression", "Naive Bayes", "Decision Tree", "Random Forest", "Support Vector Machine", "K Nearest Neighbors", "Gradient Boosting",
"Extreme Gradient Boosting (XGBoost)", "Light Gradient Boosting (LightGBM)", "Categorical Boosting (CatBoost)","Bagging (Ensemble)"]:
isML = True
return isML,isDL
def startSA(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
if not os.path.exists(displaypath):
raise Exception('Config file not found.')
with open(displaypath) as file:
config = json.load(file)
probelmType = config['problemType']
if probelmType.lower() not in ['classification','regression']:
raise Exception(f"Probolem Type: {probelmType} not supported")
isML,isDL = checkModelType(config['modelname'])
sample_size = 1024
if isML:
model = joblib.load(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 2048
if isDL:
from tensorflow.keras.models import load_model
model = load_model(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 512
target = config['targetFeature']
featureName = config['modelFeatures']
dataPath = os.path.join(request.session['deploypath'], 'data', 'postprocesseddata.csv.gz')
if not os.path.exists(dataPath):
raise Exception('Data file not found.')
from utils.file_ops import read_df_compressed
read_status,dataFrame = read_df_compressed(dataPath)
obj = sensitivityAnalysis(model, probelmType, dataFrame, target, featureName)
obj.preprocess()
obj.generate_samples(sample_size)
submitType = str(request.GET.get('satype'))
saType = 'first' if submitType == 'first' else 'total'
if probelmType.lower() == 'classification':
SA_values = obj.calSiClass(saType,isML,isDL)
else:
SA_values = obj.calSiReg(saType,isML,isDL)
if SA_values.size and saType:
graph = obj.plotSi(SA_values, saType)
if graph:
outputJson = {'Status': "Success", "graph": graph}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in Plotting Graph'}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in calculating Si values'}
output_json = json.dumps(outputJson)
return output_json
except Exception as e:
print(str(e))
raise ValueError(str(e))
|
trustedai_uq.py | import numpy as np
import joblib
import pandas as pd
from appbe.eda import ux_eda
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# from pathlib import Path
import configparser
import json
import matplotlib.pyplot as plt
import numpy as np
import os
def trustedai_uq(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
TargetFeature = configSettings['targetFeature']
problemType = configSettings['problemType']
raw_data_loc = configSettings['preprocessedData']
dataLocation = configSettings['postprocessedData']
selectedfeatures = request.GET.get('values')
if problemType.lower() == "classification":
model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model']))
df = pd.read_csv(dataLocation)
trainfea = df.columns.tolist()
feature = json.loads(selectedfeatures)
# feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = TargetFeature
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(model,dataLocation,feature,tar)
return outputStr
if problemType.lower() == "regression":
model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model']))
df = pd.read_csv(dataLocation)
trainfea = df.columns.tolist()
feature = json.loads(selectedfeatures)
# feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = TargetFeature
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(model,dataLocation,feature,tar)
print(outputStr)
return outputStr
except Exception as e:
print('error',e)
return e |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
visualization.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import re
import shutil
import scipy.stats as st
import json
import os,sys
import glob
import logging
from utils.file_ops import read_df_compressed
class Visualization():
def __init__(self,usecasename,version,dataframe,visualizationJson,dateTimeColumn,deployPath,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,modelFeatures,targetFeature,modeltype,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,vectorizerFeatures,textFeatures,numericalFeatures,nonNumericFeatures,emptyFeatures,nrows,ncols,saved_model,scoreParam,learner_type,modelname,featureReduction,reduction_data_file):
self.dataframe = dataframe
self.displayjson = {}
self.visualizationJson = visualizationJson
self.dateTimeColumn = dateTimeColumn
self.deployPath = deployPath
#shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'aion_portal.py'),self.deployPath)
if learner_type == 'ML' and modelname != 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainable_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
elif learner_type == 'DL' or modelname == 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainabledl_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
self.jsondeployPath = deployPath
#self.deployPath = self.deployPath+'visualization/'
self.dataFolderLocation = dataFolderLocation
self.vectorizerFeatures = vectorizerFeatures
self.textFeatures = textFeatures
self.emptyFeatures = emptyFeatures
'''
try:
os.makedirs(self.deployPath)
except OSError as e:
print("\nFolder Already Exists")
'''
self.numericContinuousFeatures = numericContinuousFeatures
self.discreteFeatures = discreteFeatures
self.categoricalFeatures = categoricalFeatures
self.modelFeatures = modelFeatures
self.modeltype = modeltype
self.targetFeature = targetFeature
self.displayjson['usecasename'] = str(usecasename)
self.displayjson['version'] = str(version)
self.displayjson['problemType'] = str(self.modeltype)
self.displayjson['targetFeature'] = self.targetFeature
self.displayjson['numericalFeatures'] = numericalFeatures
self.displayjson['nonNumericFeatures'] = nonNumericFeatures
self.displayjson['modelFeatures'] = self.modelFeatures
self.displayjson['textFeatures'] = self.textFeatures
self.displayjson['emptyFeatures'] = self.emptyFeatures
self.displayjson['modelname']= str(modelname)
self.displayjson['preprocessedData'] = str(original_data_file)
self.displayjson['nrows'] = str(nrows)
self.displayjson['ncols'] = str(ncols)
self.displayjson['saved_model'] = str(saved_model)
self.displayjson['scoreParam'] = str(scoreParam)
self.displayjson['labelMaps'] = eval(str(labelMaps))
self.original_data_file = original_data_file
self.displayjson['featureReduction'] = featureReduction
if featureReduction == 'True':
self.displayjson['reduction_data_file'] = reduction_data_file
else:
self.displayjson['reduction_data_file'] = ''
self.pred_filename = predicted_data_file
self.profiled_data_file = profiled_data_file
self.displayjson['predictedData'] = predicted_data_file
self.displayjson['postprocessedData'] = profiled_data_file
#self.trained_data_file = trained_data_file
#self.displayjson['trainingData'] = trained_data_file
#self.displayjson['categorialFeatures']=categoricalFeatures
#self.displayjson['discreteFeatures']=discreteFeatures
#self.displayjson['continuousFeatures']=numericContinuousFeatures
#y = json.dumps(self.displayjson)
#print(y)
self.labelMaps = labelMaps
self.log = logging.getLogger('eion')
def visualizationrecommandsystem(self):
try:
import tensorflow.keras.utils as kutils
datasetid = self.visualizationJson['datasetid']
self.log.info('\n================== Data Profiling Details==================')
datacolumns=list(self.dataframe.columns)
self.log.info('================== Data Profiling Details End ==================\n')
self.log.info('================== Features Correlation Details ==================\n')
self.log.info('\n================== Model Performance Analysis ==================')
if os.path.exists(self.pred_filename):
try:
status,df=read_df_compressed(self.pred_filename)
if self.modeltype == 'Classification' or self.modeltype == 'ImageClassification' or self.modeltype == 'anomaly_detection':
y_actual = df['actual'].values
y_predict = df['predict'].values
y_actual = kutils.to_categorical(y_actual)
y_predict = kutils.to_categorical(y_predict)
classes = df.actual.unique()
n_classes = y_actual.shape[1]
self.log.info('-------> ROC AUC CURVE')
roc_curve_dict = []
for i in classes:
try:
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
fpr, tpr, threshold = metrics.roc_curve(y_actual[:,i],y_predict[:,i])
roc_auc = metrics.auc(fpr, tpr)
class_roc_auc_curve = {}
class_roc_auc_curve['class'] = str(classname)
fprstring = ','.join(str(v) for v in fpr)
tprstring = ','.join(str(v) for v in tpr)
class_roc_auc_curve['FP'] = str(fprstring)
class_roc_auc_curve['TP'] = str(tprstring)
roc_curve_dict.append(class_roc_auc_curve)
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> False Positive Rate (x Points): '+str(fpr))
self.log.info('------------> True Positive Rate (y Points): '+str(tpr))
except:
pass
self.displayjson['ROC_AUC_CURVE'] = roc_curve_dict
self.log.info('-------> Precision Recall CURVE')
precision_recall_curve_dict = []
for i in range(n_classes):
try:
lr_precision, lr_recall, threshold = metrics.precision_recall_curve(y_actual[:,i],y_predict[:,i])
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
roc_auc = metrics.auc(lr_recall,lr_precision)
class_precision_recall_curve = {}
class_precision_recall_curve['class'] = str(classname)
Precisionstring = ','.join(str(round(v,2)) for v in lr_precision)
Recallstring = ','.join(str(round(v,2)) for v in lr_recall)
class_precision_recall_curve['Precision'] = str(Precisionstring)
class_precision_recall_curve['Recall'] = str(Recallstring)
precision_recall_curve_dict.append(class_precision_recall_curve)
except:
pass
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> Recall (x Points): '+str(lr_precision))
self.log.info('------------> Precision (y Points): '+str(lr_recall))
self.displayjson['PRECISION_RECALL_CURVE'] = precision_recall_curve_dict
status,predictdataFrame=read_df_compressed(self.displayjson['predictedData'])
except Exception as e:
self.log.info('================== Error in Calculation ROC_AUC/Recall Precision Curve '+str(e))
self.log.info('================== Model Performance Analysis End ==================\n')
self.log.info('\n================== For Descriptive Analysis of Model Features ==================')
outputfile = os.path.join(self.jsondeployPath,'etc','display.json')
with open(outputfile, 'w') as fp:
json.dump(self.displayjson, fp)
self.log.info('================== For Descriptive Analysis of Model Features End ==================\n')
except Exception as inst:
self.log.info('Visualization Failed !....'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def drawlinechart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_linechart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "543234","_type": "visualization","_source": {"title": "'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\"title\\":\\"'+title+'\\",'
visulizationjson = visulizationjson+'\\"type\\":\\"line\\",\\"params\\":{\\"type\\":\\"line\\",\\"grid\\":{\\"categoryLines\\":false,\\"style\\":{\\"color\\":\\"#eee\\"}},\\"categoryAxes\\":[{\\"id\\":\\"CategoryAxis-1\\",\\"type\\":\\"category\\",\\"position\\":\\"bottom\\",\\"show\\":true,\\"style\\":{},\\"scale\\":{\\"type\\":\\"linear\\"},\\"labels\\":{\\"show\\":true,\\"truncate\\":100},\\"title\\":{}}],\\"valueAxes\\":[{\\"id\\":\\"ValueAxis-1\\",\\"name\\":\\"LeftAxis-1\\",\\"type\\":\\"value\\",\\"position\\":\\"left\\",\\"show\\":true,\\"style\\":{},\\"scale\\":{\\"type\\":\\"linear\\",\\"mode\\":\\"normal\\"},\\"labels\\":{\\"show\\":true,\\"rotate\\":0,\\"filter\\":false,\\"truncate\\":100},\\"title\\":'
visulizationjson = visulizationjson+'{\\"text\\":\\"'+yaxisname+'\\"}}],\\"seriesParams\\":[{\\"show\\":\\"true\\",\\"type\\":\\"line\\",\\"mode\\":\\"normal\\",\\"data\\":'
visulizationjson = visulizationjson+'{\\"label\\":\\"'+yaxisname+'\\",\\"id\\":\\"1\\"},\\"valueAxis\\":\\"ValueAxis-1\\",\\"drawLinesBetweenPoints\\":true,\\"showCircles\\":true}],\\"addTooltip\\":true,\\"addLegend\\":true,\\"legendPosition\\":\\"right\\",\\"times\\":[],\\"addTimeMarker\\":false},\\"aggs\\":[{\\"id\\":\\"1\\",\\"enabled\\":true,\\"type\\":\\"avg\\",\\"schema\\":\\"metric\\",\\"params\\":{\\"field\\":\\"'+str(ycolumn)+'\\"}},{\\"id\\":\\"2\\",\\"enabled\\":true,\\"type\\":\\"terms\\",\\"schema\\":\\"segment\\",\\"params\\":{\\"field\\":\\"'+xcolumn+'\\",\\"size\\":100,\\"order\\":\\"desc\\",\\"orderBy\\":\\"1\\",\\"otherBucket\\":false,\\"otherBucketLabel\\":\\"Other\\",\\"missingBucket\\":false,\\"missingBucketLabel\\":\\"Missing\\"}}]}","uiStateJSON": "{}", "description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON": "{\\"index\\":\\"'+datasetindex+'\\",\\"query\\":{\\"query\\":\\"\\",\\"language\\":\\"lucene\\"},\\"filter\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawbarchart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_barchart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\"title\\":\\"'+title+'\\",'
visulizationjson = visulizationjson+'\\"type\\":\\"histogram\\",\\"params\\":{\\"addLegend\\":true,\\"addTimeMarker\\":false,\\"addTooltip\\":true,\\"categoryAxes\\":[{\\"id\\":\\"CategoryAxis-1\\",\\"labels\\":{\\"show\\":true,\\"truncate\\":100},\\"position\\":\\"bottom\\",\\"scale\\":{\\"type\\":\\"linear\\"},\\"show\\":true,\\"style\\":{},\\"title\\":{},\\"type\\":\\"category\\"}],\\"grid\\":{\\"categoryLines\\":false,\\"style\\":{\\"color\\":\\"#eee\\"}},\\"legendPosition\\":\\"right\\",\\"seriesParams\\":[{\\"data\\":{\\"id\\":\\"1\\",'
visulizationjson = visulizationjson+'\\"label\\":\\"'+yaxisname+'\\"},'
visulizationjson = visulizationjson+'\\"drawLinesBetweenPoints\\":true,\\"mode\\":\\"stacked\\",\\"show\\":\\"true\\",\\"showCircles\\":true,\\"type\\":\\"histogram\\",\\"valueAxis\\":\\"ValueAxis-1\\"}],\\"times\\":[],\\"type\\":\\"histogram\\",\\"valueAxes\\":[{\\"id\\":\\"ValueAxis-1\\",\\"labels\\":{\\"filter\\":false,\\"rotate\\":0,\\"show\\":true,\\"truncate\\":100},\\"name\\":\\"LeftAxis-1\\",\\"position\\":\\"left\\",\\"scale\\":{\\"mode\\":\\"normal\\",\\"type\\":\\"linear\\"},\\"show\\":true,\\"style\\":{},\\"title\\":'
visulizationjson = visulizationjson+'{\\"text\\":\\"'+yaxisname+'\\"},'
visulizationjson = visulizationjson+'\\"type\\":\\"value\\"}]},\\"aggs\\":[{\\"id\\":\\"1\\",\\"enabled\\":true,\\"type\\":\\"avg\\",\\"schema\\":\\"metric\\",\\"params\\":{\\"field\\":\\"'+str(xcolumn)+'\\"}},{\\"id\\":\\"2\\",\\"enabled\\":true,\\"type\\":\\"terms\\",\\"schema\\":\\"segment\\",\\"params\\":{\\"field\\":\\"'+ycolumn+'\\",\\"size\\":100,\\"order\\":\\"asc\\",\\"orderBy\\":\\"1\\",\\"otherBucket\\":false,\\"otherBucketLabel\\":\\"Other\\",\\"missingBucket\\":false,\\"missingBucketLabel\\":\\"Missing\\"}}]}","uiStateJSON":"{}","description": "","version": 1,"kibanaSavedObjectMeta": {'
visulizationjson = visulizationjson+'"searchSourceJSON": "{\\"index\\":\\"'+datasetindex+'\\",\\"query\\":{\\"language\\":\\"lucene\\",\\"query\\":\\"\\"},\\"filter\\":[]}"}},"_migrationVersion":{"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawpiechart(self,xcolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_piechart"
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\"title\\":\\"'+title+'\\",'
visulizationjson = visulizationjson+'\\"type\\":\\"pie\\",\\"params\\":{\\"type\\":\\"pie\\",\\"addTooltip\\":true,\\"addLegend\\":true,\\"legendPosition\\":\\"right\\",\\"isDonut\\":true,\\"labels\\":{\\"show\\":false,\\"values\\":true,\\"last_level\\":true,\\"truncate\\":100}},\\"aggs\\":[{\\"id\\":\\"1\\",\\"enabled\\":true,\\"type\\":\\"count\\",\\"schema\\":\\"metric\\",\\"params\\":{}},{\\"id\\":\\"2\\",\\"enabled\\":true,\\"type\\":\\"terms\\",\\"schema\\":\\"segment\\",\\"params\\":{\\"field\\":\\"'+xcolumn+'\\",\\"size\\":100,\\"order\\":\\"asc\\",\\"orderBy\\":\\"1\\",\\"otherBucket\\":false,\\"otherBucketLabel\\":\\"Other\\",\\"missingBucket\\":false,\\"missingBucketLabel\\":\\"Missing\\"}}]}",'
visulizationjson = visulizationjson+'"uiStateJSON": "{}","description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON":"{\\"index\\":\\"'+datasetid+'\\",\\"query\\":{\\"query\\":\\"\\",\\"language\\":\\"lucene\\"},\\"filter\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def get_confusion_matrix(self,df):
setOfyTrue = set(df['actual'])
unqClassLst = list(setOfyTrue)
if(str(self.labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in self.labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
for item in unqClassLst2:
indexName.append("act:"+str(item))
columnName.append("pre:"+str(item))
result = pd.DataFrame(confusion_matrix(df['actual'], df['predict'], labels = unqClassLst),index = indexName, columns = columnName)
resultjson = result.to_json(orient='index')
return(resultjson)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int" or data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(abs(data.astype(int)),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
|
cat_type_str.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
class cat_to_str:
def __init__(self):
pass
def convert(self, x):
return pd.DataFrame(x).astype(str)
|
file_ops.py | import os
from pathlib import Path
import pandas as pd
import numpy as np
import json
def listToStringWithDelimiter(s, vectorDBFeatureDelimitInDoc):
#lenght
sLen = len(s)
# initialize an empty string
str1 = ""
# traverse in the string
for i in range(0, sLen-1):
str1 +=str(s[i])+vectorDBFeatureDelimitInDoc
str1 +=str(s[sLen-1])
# return string
return str1
def save_csv(df, fileLocation, encoding=None):
#import pdb;pdb.set_trace();
try:
parent_dir = Path(fileLocation).parent
parent_dir.mkdir(parents=True, exist_ok=True)
if encoding:
df.to_csv(fileLocation, encoding=encoding, index=False,)
else:
df.to_csv(fileLocation, index=False)
return True, ''
except Exception as e:
print(e)
return False, str(e)
def save_csv_compressed(df, fileLocation, encoding=None):
try:
parent_dir = Path(fileLocation).parent
parent_dir.mkdir(parents=True, exist_ok=True)
if encoding:
df.to_csv(fileLocation, encoding=encoding, index=False, compression='gzip')
else:
df.to_csv(fileLocation, index=False, compression='gzip')
return True, ''
except Exception as e:
print(e)
return False, str(e)
def read_df(fileLocation,encoding=None, nrows=None):
parent_dir = Path(fileLocation).parent
if parent_dir.exists():
try:
if encoding and nrows:
df = pd.read_csv(fileLocation, encoding=encoding,nrows=nrows,encoding_errors= 'replace')
elif encoding:
df = pd.read_csv(fileLocation, encoding=encoding,encoding_errors= 'replace')
elif nrows:
df = pd.read_csv(fileLocation, nrows=nrows)
return True, df
except Exception as e:
df = pd.read_csv(fileLocation, encoding="utf-8",encoding_errors= 'replace')
print(e)
return True,df
else:
print("parent fails")
def read_df_compressed(fileLocation, encoding=None, nrows=None):
parent_dir = Path(fileLocation).parent
if parent_dir.exists():
try:
if encoding:
df = pd.read_csv(fileLocation, encoding=encoding, compression="gzip",encoding_errors= 'replace')
if nrows:
df = pd.read_csv(fileLocation, nrows=nrows, compression="gzip")
else:
df = pd.read_csv(fileLocation, encoding="utf-8", compression="gzip",encoding_errors= 'replace')
return True, df
except Exception as e:
df = pd.read_csv(fileLocation, encoding="utf-8",encoding_errors= 'replace')
print(e)
return True,df
else:
print("parent fails")
def save_chromadb(df, config_obj, fileLocation, modelFeatures):
import chromadb
#from chromadb.config import Settings
try:
parent_dir = Path(fileLocation).parent
parent_dir.mkdir(parents=True, exist_ok=True)
vectorDBFeatureDelimitInDoc = config_obj.getVectorDBFeatureDelimitInDoc()
persist_directory = os.path.dirname(os.path.abspath(fileLocation))
# client = chromadb.Client(
# Settings(
# persist_directory=persist_directory,
# chroma_db_impl="duckdb+parquet",
# )
# )
client = chromadb.PersistentClient(path=persist_directory)
# Create a new chroma collection
collection_name = os.path.basename(fileLocation).split('/')[-1]
collection_name = collection_name.replace('.csv', '')
collection_name = collection_name + 'VecDB'
collection = client.create_collection(
name=collection_name,
metadata={"hnsw:space": "cosine"}
)
features = modelFeatures.split(",")
dftxt = pd.concat([df.pop(x) for x in features], axis=1)
stepSize = 500
for i in range(0, len(df),stepSize):
start = i
end = i+ stepSize
dfembdary = df.iloc[start:end].to_numpy()
dftxtary = dftxt.iloc[start:end].to_numpy()
idxary = df.iloc[start:end].index.values
#convert to string
idxary = [str(x) for x in idxary]
dftxtary = [listToStringWithDelimiter(x.tolist(), vectorDBFeatureDelimitInDoc) for x in dftxtary]
collection.add(
embeddings=dfembdary.tolist(),
ids=idxary,
documents= dftxtary
)
client.persist()
return True, ''
except Exception as e:
return False, str(e)
|
__init__.py | import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
from .cat_type_str import cat_to_str
__version__ = "1.0" |
validate_inputs.py | import pandas as pd
def dataGarbageValue(dataFrame,datetimeFeature):
if datetimeFeature == '' or datetimeFeature.lower() == 'na':
return 'Success',''
try:
features = datetimeFeature.split(',')
for dtfeature in features:
dataFrame[dtfeature] = pd.to_datetime(dataFrame[dtfeature],errors='coerce')
if pd.isnull(dataFrame[dtfeature]).sum() > 0:
return 'Error',dtfeature+' feature have some garbage values'
except Exception as e:
print(e)
return 'Error', 'Datetime features validation error'
return 'Success','' |
VideoTraining.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
class VideoTraining(object):
def __init__(self):
self.log = logging.getLogger('eion')
def train_model(self,model,modelParam,outputLocation,tfrecord_directory):
print(model)
print(modelParam)
print(outputLocation)
print(tfrecord_directory)
from savp import TrainSAVP
TrainSAVP(tfrecord_directory,outputLocation,modelParam,model)
|
deeplearning.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
from imblearn.over_sampling import RandomOverSampler,SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import TomekLinks
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
from sklearn.metrics import log_loss
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from dlearning.Classification import DLClassificationModel
from dlearning.Regression import DLRegressionModel
from learner.machinelearning import machinelearning
from sklearn.metrics import matthews_corrcoef, brier_score_loss
import os
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def rmse_m(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true-K.mean(y_true)))
return (1 - SS_res/(SS_tot+K.epsilon()))
class deeplearning(object):
def __init__(self):
self.log = logging.getLogger('eion')
def getDLPredictionData(self,model_dl,hist_reloaded,X):
if model_dl == "Neural Network":
XSNN = X.values
predictedData = hist_reloaded.predict(XSNN)
else:
X1 = np.expand_dims(X, axis=2)
predictedData = hist_reloaded.predict(X1)
return(predictedData)
def getPredictionData(self,model_dl,hist_reloaded,X):
if model_dl == "Neural Network":
XSNN = X.values
#predictedData = hist_reloaded.predict_classes(XSNN)
predict_x=hist_reloaded.predict(XSNN)
predictedData=np.argmax(predict_x,axis=1)
else:
X1 = np.expand_dims(X, axis=2)
#predictedData = hist_reloaded.predict_classes(X1)
predict_x=hist_reloaded.predict(X1)
predictedData=np.argmax(predict_x,axis=1)
return(predictedData, predict_x)
def LoadDL_Regression_Model(self,filename_dl,scoreParam,loss_matrix,optimizer):
if(scoreParam.lower() == 'rmse'):
hist_reloaded = load_model(filename_dl,custom_objects={"rmse": rmse_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif(scoreParam.lower() == 'r2'):
hist_reloaded = load_model(filename_dl,custom_objects={"r2": r_square},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
else:
hist_reloaded = load_model(filename_dl)
return(hist_reloaded)
def startLearning(self,model_type,modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,labelMaps,df_test,deployLocation,modelName,modelVersion,best_feature_model):
mlobj = machinelearning()
if model_type == 'Classification':
self.log.info('\n------ Training DL: Classification ----')
objClf = DLClassificationModel(modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,best_feature_model)
dftrain = xtrain.copy()
dftrain['Target'] = ytrain
model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer = objClf.TalosScan(objClf)
self.log.info('------ Training DL: Classification End----\n')
saved_model_dl = 'dl_'+modelName+'_'+modelVersion+'.sav'
filename_dl = os.path.join(deployLocation,'model',saved_model_dl)
best_model_dl.save(filename_dl)
hist_reloaded = self.LoadDL_Classification_Model(filename_dl,scoreParam,loss_matrix,optimizer)
self.log.info('\n--------- Performance Matrix with Train Data ---------')
predictedData, prob = self.getPredictionData(model_dl,hist_reloaded,xtrain)
trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(ytrain, predictedData, prob,labelMaps)
self.log.info('\n--------- Performance Matrix with Train Data End ---------')
predictedData, prob = self.getPredictionData(model_dl,hist_reloaded,xtest)
df_test['predict'] = predictedData
self.log.info('\n--------- Performance Matrix with Test Data ---------')
performancematrix = mlobj.getClassificationPerformaceMatrix(ytest, predictedData, prob,labelMaps)
self.log.info('\n--------- Performance Matrix with Test Data End ---------')
return(model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix)
else:
objReg = DLRegressionModel(modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,best_feature_model)
dftrain = xtrain.copy()
dftrain['Target'] = ytrain
model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer = objReg.TalosScan(objReg)
self.log.info('------ Training DL: Regression End----\n')
self.log.info('\n------- Best DL Model and its parameters -------------')
self.log.info('-------> Best Model: '+str(model_dl))
self.log.info('-------> Best Score: '+str(score_dl))
self.log.info('-------> Best Params: '+str(params_dl))
self.log.info('------- Best DL Model and its parameters End-------------\n')
saved_model_dl = 'dl_'+modelName+'_'+modelVersion+'.sav'
filename_dl = os.path.join(deployLocation,'model',saved_model_dl)
best_model_dl.save(filename_dl)
hist_reloaded=self.LoadDL_Regression_Model(filename_dl,scoreParam,loss_matrix,optimizer)
predictedData = self.getDLPredictionData(model_dl,hist_reloaded,xtrain)
self.log.info('\n--------- Performance Matrix with Train Data ---------')
trainingperformancematrix = mlobj.get_regression_matrix(ytrain, predictedData)
self.log.info('--------- Performance Matrix with Train Data End---------\n')
predictedData = self.getDLPredictionData(model_dl,hist_reloaded,xtest)
df_test['predict'] = predictedData
self.log.info('\n--------- Performance Matrix with Test Data ---------')
performancematrix = mlobj.get_regression_matrix(ytest, predictedData)
self.log.info('--------- Performance Matrix with Test Data End---------\n')
return(model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix)
def LoadDL_Classification_Model(self,filename_dl,scoreParam,loss_matrix,optimizer):
if(scoreParam.lower() == 'recall'):
hist_reloaded = load_model(filename_dl,custom_objects={"recall": recall_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif(scoreParam.lower() == 'precision'):
hist_reloaded = load_model(filename_dl,custom_objects={"precision": precision_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif(scoreParam.lower() == 'roc_auc'):
hist_reloaded = load_model(filename_dl,compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif(scoreParam.lower() == 'f1_score'):
hist_reloaded = load_model(filename_dl,custom_objects={"f1_score": f1_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
else:
hist_reloaded = load_model(filename_dl)
return(hist_reloaded)
def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps):
setOfyTrue = set(le_trainY)
unqClassLst = list(setOfyTrue)
if(str(labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
for item in unqClassLst2:
indexName.append("true:"+str(item))
columnName.append(str(item))
matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName)
self.log.info('\n <--- Confusion Matrix --->')
self.log.info(matrixconfusion)
classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, output_dict=True))
self.log.info('\n <--- Classification Report --->')
self.log.info(classificationreport)
lb = LabelBinarizer()
lb.fit(le_trainY)
transformTarget= lb.transform(le_trainY)
if transformTarget.shape[-1] == 1:
transformTarget = le_trainY
prob = np.delete( prob, 0, 1)
rocaucscore = roc_auc_score(transformTarget,prob,average="macro")
brier_score = None
mcc_score = matthews_corrcoef(le_trainY,predictedData)
if len(unqClassLst) > 2:
brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1))
else:
brier_score = brier_score_loss(transformTarget,prob)
self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore))
self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}')
self.log.info(f'-------> BRIER SCORE : {brier_score}')
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = classificationreport.to_json(orient='index')
matrix = f'"ConfusionMatrix": {matrixconfusion},"ClassificationReport": {classificationreport},"ROC_AUC_SCORE": {rocaucscore},"MCC_SCORE": {mcc_score},"BRIER_SCORE": {brier_score}'
return(matrix)
def split_into_train_test_data(self,featureData,targetData,cvSplit,testPercentage,modelType='classification'):
'''
if cvSplit == None:
'''
testSize=testPercentage/100
if modelType == 'regression':
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True)
else:
try:
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True)
except:
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True)
self.log.info('\n<-------------- Test Train Split ---------------->\n')
self.log.info('\n<-------- Train Data Shape '+str(xtrain.shape)+' ---------->\n')
self.log.info('\n<-------- Test Data Shape '+str(xtest.shape)+' ---------->\n')
'''
else:
xtrain=featureData
ytrain=targetData
xtest=featureData
ytest=targetData
'''
return(xtrain,ytrain,xtest,ytest)
def checkForClassBalancing(self,targetData):
imbalancedCount=0
valueCount=targetData.value_counts()
self.log.info("<------ Categories and Count ------>")
self.log.info(valueCount)
categoryList=valueCount.keys().tolist()
categoryCountList=valueCount.tolist()
for i in range(0,len(categoryCountList)):
if float(categoryCountList[i])<=float(0.3*max(categoryCountList)):
self.log.info("<------ Imbalanced class ------>"+str(categoryCountList[i])+' '+str(categoryList[i]))
imbalancedCount=imbalancedCount+1
return(imbalancedCount)
def setScoreParams(self,scoreParam,problem_type):
if problem_type.lower() == 'classification' or problem_type.lower() == 'textclassification':
allowedmatrix = ['accuracy','recall','precision','f1_score','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
elif scoreParam.lower() == 'none':
scoreParam = 'accuracy'
else:
scoreParam = scoreParam.lower()
else:
allowedmatrix = ['mse','rmse','r2','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
elif scoreParam.lower() == 'none':
scoreParam = 'mse'
else:
scoreParam = scoreParam.lower()
return(scoreParam)
def ExecuteClassBalancing(self,featureData,targetData,balancingMethod):
if balancingMethod.lower() == "oversample":
self.log.info("<------ Balancing data using SMOTE OverSampling Technique ------>")
oversample = SMOTE()
balfeatureData, baltargetData = oversample.fit_resample(featureData, targetData)
self.log.info(baltargetData.value_counts())
elif balancingMethod.lower() == "undersample":
self.log.info("<------ Balancing data using Tomelinks UnderSampling Technique ------>")
tLinks = TomekLinks()
balfeatureData, baltargetData= tLinks.fit_sample(featureData, targetData)
self.log.info(baltargetData.value_counts())
else:
balfeatureData = featureData
baltargetData = targetData
self.log.info("<------ No balancing technique has been defined ,using imbalanced data for classification ------>")
return(balfeatureData,baltargetData)
def get_regression_matrix(self,targetData,predictedData):
try:
self.log.info('\n <--------- r2_score-------------- --->')
r2score=r2_score(targetData, predictedData)
self.log.info(r2score)
except Exception as e:
self.log.info('\n--------- r2_score ',str(e))
r2score = 0
try:
self.log.info('\n <--- Mean Absolute Error --->')
meanabsoluteerror=(mean_absolute_error(targetData, predictedData))
self.log.info(meanabsoluteerror)
except Exception as e:
self.log.info('\n---------Error: meanabsoluteerror ',str(e))
meanabsoluteerror = 0
try:
self.log.info("<------------mean_squared_error--------------->")
meanssquatederror=mean_squared_error(targetData, predictedData)
self.log.info(meanssquatederror)
except Exception as e:
self.log.info('\n---------Error: meanssquatederror ',str(e))
meanssquatederror = 0
try:
self.log.info("<------------root mean_squared_error--------------->")
rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False)
self.log.info(rootmeanssquatederror)
except Exception as e:
self.log.info('\n---------Error: rootmeanssquatederror ',str(e))
rootmeanssquatederror = 0
try:
self.log.info('\n <--- Mean Absolute Percentage Error --->')
targetArray, predictedArray = np.array(targetData), np.array(predictedData)
meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / targetArray))*100
self.log.info(meanpercentageerror)
except Exception as e:
self.log.info('\n---------Error: meanpercentageerror ',str(e))
meanpercentageerror = 0
matrix = '"MAE":'+str(meanabsoluteerror)+',"R2Score":'+str(r2score)+',"MSE":'+str(meanssquatederror)+',"MAPE":'+str(meanpercentageerror)+',"RMSE":'+str(rootmeanssquatederror)
return matrix |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
Classification.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import talos
import json
import sys
import time
import os
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
import tensorflow.keras.utils as kutils
from talos.model.normalizers import lr_normalizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,LSTM,GRU,SimpleRNN,Flatten, Input
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Conv1D,MaxPooling1D
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import SGD
import logging
import tensorflow as tf
import tensorflow.keras.backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
class DLClassificationModel:
def __init__(self,modelList, modelParams, scoreParam, cvSplit, featuresData,
targetData,testX,testY, method,randomMethod,roundLimit,best_feature_model):
self.modelList =modelList
self.modelParams =modelParams
self.scoreParam = scoreParam
self.cvSplit =cvSplit
self.featuresData =featuresData
self.targetData = targetData
self.testX = testX
self.testY = testY
self.method =method
self.randomMethod=randomMethod
self.roundLimit=roundLimit
self.best_feature_model = best_feature_model
self.log = logging.getLogger('eion')
def RNNClassification(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
x_train = K.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_val = K.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
model = Sequential()
if params['RNNType'] == "LSTM" :
if params['numRNNLayers'] > 1:
model.add(LSTM(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(LSTM(params['first_neuron']))
else:
model.add(LSTM(params['first_neuron'],input_shape=(x_train.shape[1],1)))
elif params['RNNType'] == "GRU" :
if params['numRNNLayers'] > 1:
model.add(GRU(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(GRU(params['first_neuron']))
else:
model.add(GRU(params['first_neuron'],input_shape=(x_train.shape[1],1)))
elif params['RNNType'] == "SimpleRNN" :
if params['numRNNLayers'] > 1:
model.add(SimpleRNN(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(SimpleRNN(params['first_neuron']))
else:
model.add(SimpleRNN(params['first_neuron'],input_shape=(x_train.shape[1],1)))
talos.utils.hidden_layers(model, params, x_train.shape[1])
model.add(Dense(y_train.shape[1],activation=params['last_activation']))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['acc',f1_m,precision_m,recall_m,tf.keras.metrics.AUC()])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def SNNClassification(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
model = Sequential()
model.add(Dense(params['first_neuron'], input_dim=x_train.shape[1], activation=params['activation']))
talos.utils.hidden_layers(model, params,1)
model.add(Dropout(params['dropout']))
model.add(Dense(y_train.shape[1], activation=params['last_activation']))
model.compile(loss=params['losses'],
optimizer=params['optimizer'],
metrics=['acc',f1_m,precision_m,recall_m,tf.keras.metrics.AUC()])
out = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
epochs=params['epochs'],
batch_size=params['batch_size'],
verbose=0)
return out, model
def CNNClassification(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
x_train = K.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_val = K.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
model = Sequential()
model.add(Conv1D(filters=params['first_neuron'], kernel_size=(3), activation=params['activation'], input_shape=(x_train.shape[1],1),padding='same') )
if params['numConvLayers'] > 1:
for x in range(1,params['numConvLayers']):
if params['MaxPool'] == "True":
model.add(MaxPooling1D(pool_size=2,padding='same'))
model.add(Conv1D(filters=8, kernel_size=3, activation=params['activation'],padding='same'))
talos.utils.hidden_layers(model, params, x_train.shape[1])
model.add(MaxPooling1D(pool_size=2,padding='same'))
model.add(Flatten())
model.add(Dense(y_train.shape[1],activation=params['last_activation']))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['acc',f1_m,precision_m,recall_m,tf.keras.metrics.AUC()])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],
epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def TalosScan(self,modelObj):
try:
#dataPath = pd.read_csv(self.dataLocation)
#X = dataPath.drop(self.targetData, axis=1)
loss_matrix='binary_crossentropy'
optimizer='Nadam'
X = self.featuresData
x = X.values
Y = self.targetData
scoredetails = ''
#Y= dataPath[self.targetData]
y = Y.values
y = kutils.to_categorical(y)
XSNN = X.values
X1 = np.expand_dims(X, axis=2)
kf = KFold(n_splits = self.cvSplit)
for train_index, test_index in kf.split(X):
X_train, X_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
data = self.modelParams
models = data.keys()
start = time.time()
scoreSNN = []
scoreRNN = []
scoreCNN = []
scoreRNNGRU = []
scoreRNNLSTM = []
best_paramsSNN = {}
best_paramsRNN = {}
best_paramsRNNGRU = {}
best_paramsRNNLSTM = {}
best_paramsCNN = {}
if "Neural Network"in self.modelList:
self.log.info("-------> Model Name: Neural Network")
start = time.time()
data = self.modelParams["Neural Network"]
p = {"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]
}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.SNNClassification,
experiment_name='SNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectSNN = talos.Analyze(scan_object)
highValAccSNN = analyze_objectSNN.high(matrix_type)
dfSNN = analyze_objectSNN.data
#pd.set_option('display.max_columns',20)
#print(dfSNN)
#pd.reset_option('display.max_columns')
newdfSNN = dfSNN.loc[dfSNN[matrix_type] == highValAccSNN]
if(len(newdfSNN) > 1):
lowLoss = analyze_objectSNN.low('val_loss')
newdfSNN = newdfSNN.loc[newdfSNN['val_loss'] == lowLoss]
best_paramsSNN["activation"] = list(newdfSNN["activation"])[0]
best_paramsSNN["optimizer"] = list(newdfSNN["optimizer"])[0]
best_paramsSNN["losses"] = list(newdfSNN["losses"])[0]
best_paramsSNN["first_layer"] = list(newdfSNN["first_neuron"])[0]
best_paramsSNN["shapes"] = list(newdfSNN["shapes"])[0]
best_paramsSNN["hidden_layers"] = list(newdfSNN["hidden_layers"])[0]
best_paramsSNN["dropout"] = list(newdfSNN["dropout"])[0]
best_paramsSNN["batch_size"] = list(newdfSNN["batch_size"])[0]
best_paramsSNN["epochs"] = list(newdfSNN["epochs"])[0]
best_paramsSNN["lr"] = list(newdfSNN["lr"])[0]
best_paramsSNN["last_activation"] = list(newdfSNN["last_activation"])[0]
best_modelSNN = scan_object.best_model(metric=matrix_type)
try:
if(len(best_paramsSNN["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsSNN["losses"]
if(len(best_paramsSNN["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsSNN["optimizer"]
if best_paramsSNN["batch_size"] == 0:
batchsize = 32
else:
batchsize = best_paramsSNN["batch_size"]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'roc_auc':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'recall':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'precision':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
scoreSNN = best_modelSNN.evaluate(XSNN,y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelSNN.metrics_names))
self.log.info("----------> Score: "+str(scoreSNN))
self.log.info("----------> Model Params: "+str(best_paramsSNN))
executionTime=time.time() - start
XSNN = self.testX.values
#predict_x=best_modelSNN.predict(XSNN)
predictedData=np.argmax(best_modelSNN.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict_classes(XSNN)
#print(predictedData)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
self.log.info('----------> Total Execution: '+str(executionTime)+'\n')
scoreSNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreSNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network")
start = time.time()
data = self.modelParams["Recurrent Neural Network"]
p = {"RNNType":["SimpleRNN"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.RNNClassification,
experiment_name='RNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectRNN = talos.Analyze(scan_object)
highValAccRNN = analyze_objectRNN.high(matrix_type)
dfRNN = analyze_objectRNN.data
newdfRNN = dfRNN.loc[dfRNN[matrix_type] == highValAccRNN]
if(len(newdfRNN) > 1):
lowLoss = analyze_objectRNN.low('val_loss')
newdfRNN = newdfRNN.loc[newdfRNN['val_loss'] == lowLoss]
best_paramsRNN["RNNType"] = list(newdfRNN["RNNType"])[0]
best_paramsRNN["numRNNLayers"] = list(newdfRNN["numRNNLayers"])[0]
best_paramsRNN["activation"] = list(newdfRNN["activation"])[0]
best_paramsRNN["optimizer"] = list(newdfRNN["optimizer"])[0]
best_paramsRNN["losses"] = list(newdfRNN["losses"])[0]
best_paramsRNN["first_layer"] = list(newdfRNN["first_neuron"])[0]
best_paramsRNN["shapes"] = list(newdfRNN["shapes"])[0]
best_paramsRNN["hidden_layers"] = list(newdfRNN["hidden_layers"])[0]
best_paramsRNN["dropout"] = list(newdfRNN["dropout"])[0]
best_paramsRNN["batch_size"] = list(newdfRNN["batch_size"])[0]
best_paramsRNN["epochs"] = list(newdfRNN["epochs"])[0]
best_paramsRNN["lr"] = list(newdfRNN["lr"])[0]
best_paramsRNN["last_activation"] = list(newdfRNN["last_activation"])[0]
best_modelRNN = scan_object.best_model(metric=matrix_type, asc=False)
try:
if(len(best_paramsRNN["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsRNN["losses"][0]
if(len(best_paramsRNN["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsRNN["optimizer"][0]
if(best_paramsRNN["batch_size"] == 0):
batchsize = 32
else:
batchsize = best_paramsRNN["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'roc_auc':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'precision':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelRNN.metrics_names))
scoreRNN = best_modelRNN.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreRNN))
self.log.info("----------> Model Params: "+str(best_paramsRNN))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelRNN.predict_classes(XSNN)
predictedData=np.argmax(best_modelRNN.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreRNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (GRU)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (GRU)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (GRU)"]
print(data)
p = {"RNNType":["GRU"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.RNNClassification,
experiment_name='RNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectRNNGRU = talos.Analyze(scan_object)
highValAccRNNGRU = analyze_objectRNNGRU.high(matrix_type)
dfRNNGRU = analyze_objectRNNGRU.data
newdfRNNGRU = dfRNNGRU.loc[dfRNNGRU[matrix_type] == highValAccRNNGRU]
if(len(newdfRNNGRU) > 1):
lowLoss = analyze_objectRNNGRU.low('val_loss')
newdfRNNGRU = newdfRNNGRU.loc[newdfRNNGRU['val_loss'] == lowLoss]
best_paramsRNNGRU["RNNType"] = "GRU"
best_paramsRNNGRU["numRNNLayers"] = list(newdfRNNGRU["numRNNLayers"])[0]
best_paramsRNNGRU["activation"] = list(newdfRNNGRU["activation"])[0]
best_paramsRNNGRU["optimizer"] = list(newdfRNNGRU["optimizer"])[0]
best_paramsRNNGRU["losses"] = list(newdfRNNGRU["losses"])[0]
best_paramsRNNGRU["first_layer"] = list(newdfRNNGRU["first_neuron"])[0]
best_paramsRNNGRU["shapes"] = list(newdfRNNGRU["shapes"])[0]
best_paramsRNNGRU["hidden_layers"] = list(newdfRNNGRU["hidden_layers"])[0]
best_paramsRNNGRU["dropout"] = list(newdfRNNGRU["dropout"])[0]
best_paramsRNNGRU["batch_size"] = list(newdfRNNGRU["batch_size"])[0]
best_paramsRNNGRU["epochs"] = list(newdfRNNGRU["epochs"])[0]
best_paramsRNNGRU["lr"] = list(newdfRNNGRU["lr"])[0]
best_paramsRNNGRU["last_activation"] = list(newdfRNNGRU["last_activation"])[0]
best_modelRNNGRU = scan_object.best_model(metric=matrix_type, asc=False)
try:
if(len(best_paramsRNNGRU["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsRNNGRU["losses"][0]
if(len(best_paramsRNNGRU["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsRNNGRU["optimizer"][0]
if(best_paramsRNNGRU["batch_size"]== 0):
batchsize = 32
else:
batchsize = best_paramsRNNGRU["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'roc_auc':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'precision':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelRNNGRU.metrics_names))
scoreRNNGRU = best_modelRNNGRU.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreRNNGRU))
self.log.info("----------> Model Params: "+str(best_paramsRNNGRU))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelRNNGRU.predict_classes(XSNN)
predictedData=np.argmax(best_modelRNNGRU.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreRNNGRU[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (GRU)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNGRU[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (GRU)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (LSTM)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (LSTM)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (LSTM)"]
p = {"RNNType":["LSTM"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.RNNClassification,
experiment_name='RNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectRNNLSTM = talos.Analyze(scan_object)
highValAccRNNLSTM = analyze_objectRNNLSTM.high(matrix_type)
dfRNNLSTM = analyze_objectRNNLSTM.data
newdfRNNLSTM = dfRNNLSTM.loc[dfRNNLSTM[matrix_type] == highValAccRNNLSTM]
if(len(newdfRNNLSTM) > 1):
lowLoss = analyze_objectRNNLSTM.low('val_loss')
newdfRNNLSTM = newdfRNNLSTM.loc[newdfRNNLSTM['val_loss'] == lowLoss]
best_paramsRNNLSTM["RNNType"] = "LSTM"
best_paramsRNNLSTM["numRNNLayers"] = list(newdfRNNLSTM["numRNNLayers"])[0]
best_paramsRNNLSTM["activation"] = list(newdfRNNLSTM["activation"])[0]
best_paramsRNNLSTM["optimizer"] = list(newdfRNNLSTM["optimizer"])[0]
best_paramsRNNLSTM["losses"] = list(newdfRNNLSTM["losses"])[0]
best_paramsRNNLSTM["first_layer"] = list(newdfRNNLSTM["first_neuron"])[0]
best_paramsRNNLSTM["shapes"] = list(newdfRNNLSTM["shapes"])[0]
best_paramsRNNLSTM["hidden_layers"] = list(newdfRNNLSTM["hidden_layers"])[0]
best_paramsRNNLSTM["dropout"] = list(newdfRNNLSTM["dropout"])[0]
best_paramsRNNLSTM["batch_size"] = list(newdfRNNLSTM["batch_size"])[0]
best_paramsRNNLSTM["epochs"] = list(newdfRNNLSTM["epochs"])[0]
best_paramsRNNLSTM["lr"] = list(newdfRNNLSTM["lr"])[0]
best_paramsRNNLSTM["last_activation"] = list(newdfRNNLSTM["last_activation"])[0]
best_modelRNNLSTM = scan_object.best_model(metric=matrix_type, asc=False)
try:
if(len(best_paramsRNNLSTM["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsRNNLSTM["losses"][0]
if(len(best_paramsRNNLSTM["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsRNNLSTM["optimizer"][0]
if(best_paramsRNNLSTM["batch_size"] == 0):
batchsize = 32
else:
batchsize = best_paramsRNNLSTM["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'roc_auc':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'precision':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelRNNLSTM.metrics_names))
scoreRNNLSTM = best_modelRNNLSTM.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreRNNLSTM))
self.log.info("----------> Model Params: "+str(best_paramsRNNLSTM))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelRNNLSTM.predict_classes(XSNN)
predictedData=np.argmax(best_modelRNNLSTM.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreRNNLSTM[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (LSTM)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNLSTM[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (LSTM)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Convolutional Neural Network (1D)"in self.modelList:
self.log.info("-------> Model Name: CNN")
start = time.time()
data = self.modelParams["Convolutional Neural Network (1D)"]
p = {"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"numConvLayers":[int(n) for n in data["numConvLayers"].split(",")],
"MaxPool":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.CNNClassification,
experiment_name='CNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectCNN = talos.Analyze(scan_object)
highValAccCNN = analyze_objectCNN.high(matrix_type)
dfCNN = analyze_objectCNN.data
newdfCNN = dfCNN.loc[dfCNN[matrix_type] == highValAccCNN]
if(len(newdfCNN) > 1):
lowLoss = analyze_objectCNN.low('val_loss')
newdfCNN = newdfCNN.loc[newdfCNN['val_loss'] == lowLoss]
best_paramsCNN["numConvLayers"] = list(newdfCNN["numConvLayers"])
best_paramsCNN["MaxPool"] = list(newdfCNN["MaxPool"])
best_paramsCNN["activation"] = list(newdfCNN["activation"])
best_paramsCNN["optimizer"] = list(newdfCNN["optimizer"])
best_paramsCNN["losses"] = list(newdfCNN["losses"])
best_paramsCNN["first_layer"] = list(newdfCNN["first_neuron"])
best_paramsCNN["shapes"] = list(newdfCNN["shapes"])
best_paramsCNN["hidden_layers"] = list(newdfCNN["hidden_layers"])
best_paramsCNN["dropout"] = list(newdfCNN["dropout"])
best_paramsCNN["batch_size"] = list(newdfCNN["batch_size"])
best_paramsCNN["epochs"] = list(newdfCNN["epochs"])
best_paramsCNN["lr"] = list(newdfCNN["lr"])
best_paramsCNN["last_activation"] = list(newdfCNN["last_activation"])[0]
best_modelCNN = scan_object.best_model(metric='val_acc', asc=True)
try:
if(len(best_paramsCNN["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsCNN["losses"][0]
if(len(best_paramsCNN["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsCNN["optimizer"][0]
if(best_paramsCNN["batch_size"] == 0):
batchsize = 32
else:
batchsize = best_paramsCNN["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'precision':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'roc_auc':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'f1_score':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelCNN.metrics_names))
scoreCNN = best_modelCNN.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreCNN))
self.log.info("----------> Model Params: "+str(best_paramsCNN))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelCNN.predict_classes(XSNN)
predictedData=np.argmax(best_modelCNN.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreCNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Convolutional Neural Network (1D)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreCNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Convolutional Neural Network (1D)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
modelScore = []
if len(scoreSNN) != 0:
modelScore.append(scoreSNN[1])
if len(scoreRNN) != 0:
modelScore.append(scoreRNN[1])
if len(scoreRNNGRU) != 0:
modelScore.append(scoreRNNGRU[1])
if len(scoreRNNLSTM) != 0:
modelScore.append(scoreRNNLSTM[1])
if len(scoreCNN) != 0:
modelScore.append(scoreCNN[1])
selectedModel = ""
best_params=""
if len(scoreSNN) != 0 and max(modelScore) == scoreSNN[1]:
selectedModel = "Neural Network"
best_model = best_modelSNN
best_params = best_paramsSNN
elif len(scoreRNN) != 0 and max(modelScore) == scoreRNN[1]:
selectedModel = "Recurrent Neural Network"
best_model = best_modelRNN
best_params = best_paramsRNN
elif len(scoreRNNGRU) != 0 and max(modelScore) == scoreRNNGRU[1]:
selectedModel = "Recurrent Neural Network (GRU)"
best_model = best_modelRNNGRU
best_params = best_paramsRNNGRU
elif len(scoreRNNLSTM) != 0 and max(modelScore) == scoreRNNLSTM[1]:
selectedModel = "Recurrent Neural Network (LSTM)"
best_model = best_modelRNNLSTM
best_params = best_paramsRNNLSTM
elif len(scoreCNN) != 0 and max(modelScore) == scoreCNN[1]:
selectedModel = "Convolutional Neural Network (1D)"
best_model = best_modelCNN
best_params = best_paramsCNN
modelScore = max(modelScore)
executionTime=time.time() - start
self.log.info("-------> ExecutionTime(sec) :"+str(executionTime)+'\n')
self.log.info('Status:- |... Best Algorithm selected: '+str(selectedModel)+' '+str(round(modelScore,2)))
self.log.info('-------> Best Params: '+str(best_params))
return selectedModel,modelScore,best_model,best_params,X1,XSNN,scoredetails,loss_matrix,optimizer
except Exception as inst:
self.log.info( '\n-----> classificationModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) |
Regression.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import talos
from talos import Evaluate
import json
import sys
import time
import os
import tensorflow.keras.utils as kutils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,LSTM,GRU,SimpleRNN,Flatten,Input
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Conv1D,MaxPooling1D
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
import logging
import tensorflow as tf
import tensorflow.keras.backend as K
def rmse_m(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true-K.mean(y_true)))
return (1 - SS_res/(SS_tot+K.epsilon()))
class DLRegressionModel:
def __init__(self,modelList, modelParams, scoreParam, cvSplit, featuresData,
targetData,testX,testY, method,randomMethod,roundLimit,best_feature_model):
self.modelList =modelList
self.modelParams =modelParams
self.scoreParam = scoreParam
self.cvSplit =cvSplit
self.featuresData =featuresData
self.targetData = targetData
self.testX = testX
self.testY = testY
self.method =method
#self.logFile = logFile
self.randomMethod=randomMethod
self.roundLimit=roundLimit
self.log = logging.getLogger('eion')
self.best_feature_model = best_feature_model
def RNNRegression(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
model = Sequential()
if params['RNNType'] == "LSTM" :
if params['numRNNLayers'] > 1:
model.add(LSTM(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(LSTM(params['first_neuron']))
else:
model.add(LSTM(params['first_neuron'],input_shape=(x_train.shape[1],1)))
elif params['RNNType'] == "GRU" :
if params['numRNNLayers'] > 1:
model.add(GRU(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(GRU(params['first_neuron']))
else:
model.add(GRU(params['first_neuron'],input_shape=(x_train.shape[1],1)))
elif params['RNNType'] == "SimpleRNN" :
if params['numRNNLayers'] > 1:
model.add(SimpleRNN(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(SimpleRNN(params['first_neuron']))
else:
model.add(SimpleRNN(params['first_neuron'],input_shape=(x_train.shape[1],1)))
talos.utils.hidden_layers(model, params, 1)
model.add(Dense(1,activation=params['activation']))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['mae','mse',rmse_m,r_square])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],
epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def SNNRegression(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
model = Sequential()
model.add(Dense(params['first_neuron'],input_dim=x_train.shape[1],activation=params['activation']))
talos.utils.hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['activation']))
model.compile(loss=params['losses'], optimizer=params['optimizer'], metrics=['mae','mse',rmse_m,r_square])
out = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
epochs=params['epochs'],
batch_size=params['batch_size'],
verbose=0)
return out, model
def CNNRegression(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
self.log.info(x_train.shape)
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
model = Sequential()
self.log.info(params['kernel_size'])
model.add(Conv1D(filters=params['first_neuron'], kernel_size=int(params['kernel_size']), activation=params['activation'], input_shape=(x_train.shape[1],1)) )
if params['numConvLayers'] > 1:
for x in range(1,params['numConvLayers']):
if params['MaxPool'] == "True":
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=8, kernel_size=int(params['kernel_size']), activation=params['activation']))
talos.utils.hidden_layers(model, params, 1)
model.add(Flatten())
model.add(Dense(1))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['mae','mse',rmse_m,r_square])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],
epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def TalosScan(self,modelObj):
try:
#dataPath = pd.read_csv(self.dataLocation)
#X = dataPath.drop(self.targetData, axis=1)
X = self.featuresData
x = X.values
loss_matrix = 'mean_absolute_error'
optimizer='Nadam'
Y= self.targetData
y = Y.values
XSNN = X.values
X1 = np.expand_dims(X, axis=2)
scoredetails = ''
kf = KFold(n_splits = self.cvSplit)
for train_index, test_index in kf.split(X):
X_train, X_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
data = self.modelParams
models = data.keys()
lstart = time.time()
scoreSNN = []
scoreRNN = []
scoreCNN = []
scoreRNNGRU = []
scoreRNNLSTM = []
best_paramsSNN = {}
best_paramsRNN = {}
best_paramsRNNGRU = {}
best_paramsRNNLSTM = {}
best_paramsCNN = {}
if "Neural Network"in self.modelList:
self.log.info("-------> Model Name: Neural Network")
start = time.time()
data = self.modelParams["Neural Network"]
p = {"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.SNNRegression,experiment_name='SNN',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectSNN = talos.Analyze(scan_object)
highValAccSNN = analyze_objectSNN.low(matrix_type)
dfSNN = analyze_objectSNN.data
newdfSNN = dfSNN.loc[dfSNN[matrix_type] == highValAccSNN]
best_paramsSNN["activation"] = list(newdfSNN["activation"])[0]
best_paramsSNN["optimizer"] = list(newdfSNN["optimizer"])[0]
best_paramsSNN["losses"] = list(newdfSNN["losses"])[0]
best_paramsSNN["first_layer"] = list(newdfSNN["first_neuron"])[0]
best_paramsSNN["shapes"] = list(newdfSNN["shapes"])[0]
best_paramsSNN["hidden_layers"] = list(newdfSNN["hidden_layers"])[0]
best_paramsSNN["dropout"] = list(newdfSNN["dropout"])[0]
best_paramsSNN["batch_size"] = list(newdfSNN["batch_size"])[0]
best_paramsSNN["epochs"] = list(newdfSNN["epochs"])[0]
best_paramsSNN["lr"] = list(newdfSNN["lr"])[0]
best_modelSNN = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsSNN["losses"]
optimizer = best_paramsSNN["optimizer"]
batchsize = best_paramsSNN["batch_size"]
if self.scoreParam == 'rmse':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreSNN = best_modelSNN.evaluate(XSNN,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelSNN.metrics_names))
self.log.info("----------> Score: "+str(scoreSNN))
self.log.info("----------> Model Params: "+str(best_paramsSNN))
executionTime=time.time() - start
self.log.info('----------> SNN Execution Time: '+str(executionTime)+'\n')
XSNN = self.testX.values
predictedData = best_modelSNN.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreSNN[1]
self.log.info("----------> Testing Score: "+str(score))
scoreSNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreSNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network")
start = time.time()
data = self.modelParams["Recurrent Neural Network"]
p = {"RNNType":["SimpleRNN"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNN',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectRNN = talos.Analyze(scan_object)
highValAccRNN = analyze_objectRNN.low(matrix_type)
dfRNN = analyze_objectRNN.data
newdfRNN = dfRNN.loc[dfRNN[matrix_type] == highValAccRNN]
best_paramsRNN["RNNType"] = "SimpleRNN"
best_paramsRNN["numRNNLayers"] = list(newdfRNN["numRNNLayers"])[0]
best_paramsRNN["activation"] = list(newdfRNN["activation"])[0]
best_paramsRNN["optimizer"] = list(newdfRNN["optimizer"])[0]
best_paramsRNN["losses"] = list(newdfRNN["losses"])[0]
best_paramsRNN["first_layer"] = list(newdfRNN["first_neuron"])[0]
best_paramsRNN["shapes"] = list(newdfRNN["shapes"])[0]
best_paramsRNN["hidden_layers"] = list(newdfRNN["hidden_layers"])[0]
best_paramsRNN["dropout"] = list(newdfRNN["dropout"])[0]
best_paramsRNN["batch_size"] = list(newdfRNN["batch_size"])[0]
best_paramsRNN["epochs"] = list(newdfRNN["epochs"])[0]
best_paramsRNN["lr"] = list(newdfRNN["lr"])[0]
best_modelRNN = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsRNN["losses"]
optimizer = best_paramsRNN["optimizer"]
batchsize = best_paramsRNN["batch_size"]
if self.scoreParam == 'rmse':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreRNN = best_modelRNN.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelRNN.metrics_names))
self.log.info("----------> Score: "+str(scoreRNN))
self.log.info("----------> Model Params: "+str(best_paramsRNN))
executionTime=time.time() - start
self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelRNN.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreRNN[1]
self.log.info("----------> Testing Score: "+str(score))
scoreRNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (GRU)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (GRU)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (GRU)"]
p = {"RNNType":["GRU"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNNGRU',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectRNNGRU = talos.Analyze(scan_object)
highValAccRNNGRU = analyze_objectRNNGRU.low(matrix_type)
dfRNNGRU = analyze_objectRNNGRU.data
newdfRNNGRU = dfRNNGRU.loc[dfRNNGRU[matrix_type] == highValAccRNNGRU]
best_paramsRNNGRU["RNNType"] = "GRU"
best_paramsRNNGRU["numRNNLayers"] = list(newdfRNNGRU["numRNNLayers"])[0]
best_paramsRNNGRU["activation"] = list(newdfRNNGRU["activation"])[0]
best_paramsRNNGRU["optimizer"] = list(newdfRNNGRU["optimizer"])[0]
best_paramsRNNGRU["losses"] = list(newdfRNNGRU["losses"])[0]
best_paramsRNNGRU["first_layer"] = list(newdfRNNGRU["first_neuron"])[0]
best_paramsRNNGRU["shapes"] = list(newdfRNNGRU["shapes"])[0]
best_paramsRNNGRU["hidden_layers"] = list(newdfRNNGRU["hidden_layers"])[0]
best_paramsRNNGRU["dropout"] = list(newdfRNNGRU["dropout"])[0]
best_paramsRNNGRU["batch_size"] = list(newdfRNNGRU["batch_size"])[0]
best_paramsRNNGRU["epochs"] = list(newdfRNNGRU["epochs"])[0]
best_paramsRNNGRU["lr"] = list(newdfRNNGRU["lr"])[0]
best_modelRNNGRU = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsRNNGRU["losses"]
optimizer = best_paramsRNNGRU["optimizer"]
batchsize = best_paramsRNNGRU["batch_size"]
if self.scoreParam == 'rmse':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreRNNGRU = best_modelRNNGRU.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelRNNGRU.metrics_names))
self.log.info("----------> Score: "+str(scoreRNNGRU))
self.log.info("----------> Model Params: "+str(best_paramsRNNGRU))
executionTime=time.time() - start
self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelRNNGRU.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreRNNGRU[1]
self.log.info("----------> Testing Score: "+str(score))
scoreRNNGRU[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (GRU)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNGRU[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (GRU)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (LSTM)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (LSTM)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (LSTM)"]
p = {"RNNType":["LSTM"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNNLSTM',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectRNNLSTM = talos.Analyze(scan_object)
highValAccRNNLSTM = analyze_objectRNNLSTM.low(matrix_type)
dfRNNLSTM = analyze_objectRNNLSTM.data
newdfRNNLSTM = dfRNNLSTM.loc[dfRNNLSTM[matrix_type] == highValAccRNNLSTM]
best_paramsRNNLSTM["RNNType"] = "GRU"
best_paramsRNNLSTM["numRNNLayers"] = list(newdfRNNLSTM["numRNNLayers"])[0]
best_paramsRNNLSTM["activation"] = list(newdfRNNLSTM["activation"])[0]
best_paramsRNNLSTM["optimizer"] = list(newdfRNNLSTM["optimizer"])[0]
best_paramsRNNLSTM["losses"] = list(newdfRNNLSTM["losses"])[0]
best_paramsRNNLSTM["first_layer"] = list(newdfRNNLSTM["first_neuron"])[0]
best_paramsRNNLSTM["shapes"] = list(newdfRNNLSTM["shapes"])[0]
best_paramsRNNLSTM["hidden_layers"] = list(newdfRNNLSTM["hidden_layers"])[0]
best_paramsRNNLSTM["dropout"] = list(newdfRNNLSTM["dropout"])[0]
best_paramsRNNLSTM["batch_size"] = list(newdfRNNLSTM["batch_size"])[0]
best_paramsRNNLSTM["epochs"] = list(newdfRNNLSTM["epochs"])[0]
best_paramsRNNLSTM["lr"] = list(newdfRNNLSTM["lr"])[0]
best_modelRNNLSTM = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsRNNLSTM["losses"]
optimizer = best_paramsRNNLSTM["optimizer"]
batchsize = best_paramsRNNLSTM["batch_size"]
if self.scoreParam == 'rmse':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreRNNLSTM = best_modelRNNLSTM.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelRNNLSTM.metrics_names))
self.log.info("----------> Score: "+str(scoreRNNLSTM))
self.log.info("----------> Model Params: "+str(best_paramsRNNLSTM))
executionTime=time.time() - start
self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelRNNLSTM.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreRNNLSTM[1]
self.log.info("----------> Testing Score: "+str(score))
scoreRNNLSTM[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (LSTM)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNLSTM[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (LSTM)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Convolutional Neural Network (1D)"in self.modelList:
self.log.info("-------> Model Name: CNN")
start = time.time()
data = self.modelParams["Convolutional Neural Network (1D)"]
p = {"activation":data["activation"].split(","),
"kernel_size":data["kernel_size"].split(","),
"numConvLayers":[int(n) for n in data["numConvLayers"].split(",")],
"MaxPool":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train, x_val = X_test, y_val = y_test, model = modelObj.CNNRegression,experiment_name='CNN', params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectCNN = talos.Analyze(scan_object)
highValAccCNN = analyze_objectCNN.low(matrix_type)
dfCNN = analyze_objectCNN.data
newdfCNN = dfCNN.loc[dfCNN[matrix_type] == highValAccCNN]
best_paramsCNN["numConvLayers"] = list(newdfCNN["numConvLayers"])[0]
best_paramsCNN["MaxPool"] = list(newdfCNN["MaxPool"])[0]
best_paramsCNN["activation"] = list(newdfCNN["activation"])[0]
best_paramsCNN["optimizer"] = list(newdfCNN["optimizer"])[0]
best_paramsCNN["losses"] = list(newdfCNN["losses"])[0]
best_paramsCNN["first_layer"] = list(newdfCNN["first_neuron"])[0]
best_paramsCNN["shapes"] = list(newdfCNN["shapes"])[0]
best_paramsCNN["hidden_layers"] = list(newdfCNN["hidden_layers"])[0]
best_paramsCNN["dropout"] = list(newdfCNN["dropout"])[0]
best_paramsCNN["batch_size"] = list(newdfCNN["batch_size"])[0]
best_paramsCNN["epochs"] = list(newdfCNN["epochs"])[0]
best_paramsCNN["lr"] = list(newdfCNN["lr"])[0]
best_modelCNN = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsCNN["losses"]
optimizer = best_paramsCNN["optimizer"]
batchsize = best_paramsCNN["batch_size"]
if self.scoreParam == 'rmse':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreCNN = best_modelCNN.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelCNN.metrics_names))
self.log.info("----------> Score: "+str(scoreCNN))
self.log.info("----------> Model Params: "+str(best_paramsCNN))
executionTime=time.time() - start
self.log.info('----------> CNN Execution Time: '+str(executionTime)+'\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelCNN.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreCNN[1]
self.log.info("----------> Testing Score: "+str(score))
scoreCNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"CNN","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreCNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: CNN')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
modelScore = []
if len(scoreSNN) != 0:
modelScore.append(scoreSNN[1])
if len(scoreRNN) != 0:
modelScore.append(scoreRNN[1])
if len(scoreRNNGRU) != 0:
modelScore.append(scoreRNNGRU[1])
if len(scoreRNNLSTM) != 0:
modelScore.append(scoreRNNLSTM[1])
if len(scoreCNN) != 0:
modelScore.append(scoreCNN[1])
selectedModel = ""
best_model = ""
if self.scoreParam == "r2":
if len(scoreSNN) != 0 and max(modelScore) == scoreSNN[1]:
selectedModel = "Neural Network"
best_model = best_modelSNN
best_params = best_paramsSNN
elif len(scoreRNN) != 0 and max(modelScore) == scoreRNN[1]:
selectedModel = "Recurrent Neural Network"
best_model = best_modelRNN
best_params = best_paramsRNN
elif len(scoreRNNGRU) != 0 and max(modelScore) == scoreRNNGRU[1]:
selectedModel = "Recurrent Neural Network (GRU)"
best_model = best_modelRNNGRU
best_params = best_paramsRNNGRU
elif len(scoreRNNLSTM) != 0 and max(modelScore) == scoreRNNLSTM[1]:
selectedModel = "Recurrent Neural Network (LSTM)"
best_model = best_modelRNNLSTM
best_params = best_paramsRNNLSTM
elif len(scoreCNN) != 0 and max(modelScore) == scoreCNN[1]:
selectedModel = "Convolutional Neural Network (1D)"
best_model = best_modelCNN
best_params = best_paramsCNN
modelScore = max(modelScore)
else:
if len(scoreSNN) != 0 and min(modelScore) == scoreSNN[1]:
selectedModel = "Neural Network"
best_model = best_modelSNN
best_params = best_paramsSNN
elif len(scoreRNN) != 0 and min(modelScore) == scoreRNN[1]:
selectedModel = "Recurrent Neural Network"
best_model = best_modelRNN
best_params = best_paramsRNN
elif len(scoreRNNGRU) != 0 and min(modelScore) == scoreRNNGRU[1]:
selectedModel = "Recurrent Neural Network (GRU)"
best_model = best_modelRNNGRU
best_params = best_paramsRNNGRU
elif len(scoreRNNLSTM) != 0 and min(modelScore) == scoreRNNLSTM[1]:
selectedModel = "Recurrent Neural Network (LSTM)"
best_model = best_modelRNNLSTM
best_params = best_paramsRNNLSTM
elif len(scoreCNN) != 0 and min(modelScore) == scoreCNN[1]:
selectedModel = "Convolutional Neural Network (1D)"
best_model = best_modelCNN
best_params = best_paramsCNN
modelScore = min(modelScore)
executionTime=time.time() - lstart
self.log.info("-------> Total Execution Time(sec):"+str(executionTime))
self.log.info('Status:- |... Best Algorithm selected: '+str(selectedModel)+' '+str(round(modelScore,2)))
return selectedModel,modelScore,best_model,best_params,X1,XSNN,scoredetails,loss_matrix,optimizer
except Exception as inst:
self.log.info( '\n-----> regressionModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
|
edge_convert.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
import sys
from AionConfigManager import AionConfigManager
from sklearn.externals import joblib
class edgeformats:
def __init__(self,deploy_path):
self.deploy_path = deploy_path
self.edge_deploy_path = os.path.join(deploy_path,"edge")
os.mkdir(self.edge_deploy_path)
def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file):
if edge_format == 'onnx':
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
xtrain = xtrain[features]
initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))]
filename = os.path.join(self.deploy_path,saved_model)
loaded_model = joblib.load(filename)
onx = convert_sklearn(loaded_model, initial_types=initial_type)
onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx')
with open(onnx_filename, "wb") as f:
f.write(onx.SerializeToString())
self.createedgeruntimeFile(onnx_filename,profiled_data_file,features)
def createedgeruntimeFile(self,onnx_filename,datafilepath,features):
runtimefilecontent = ''
runtimefilecontent += 'import pandas'
runtimefilecontent += '\n'
runtimefilecontent += 'import numpy'
runtimefilecontent += '\n'
runtimefilecontent += 'import sys'
runtimefilecontent += '\n'
runtimefilecontent += 'import onnxruntime as rt'
runtimefilecontent += '\n'
runtimefilecontent += 'def onnx_runtime_validation():'
runtimefilecontent += '\n'
runtimefilecontent += ' modelfile = r"'+str(onnx_filename)+'"'
runtimefilecontent += '\n'
runtimefilecontent += ' datafile = r"'+str(datafilepath)+'"'
runtimefilecontent += '\n'
runtimefilecontent += ' dataframe = pandas.read_csv(datafile)'
runtimefilecontent += '\n'
runtimefilecontent += ' dataframe = dataframe['+str(features)+']'
runtimefilecontent += '\n'
runtimefilecontent += ' df = dataframe.head(8)'
runtimefilecontent += '\n'
runtimefilecontent += ' dataset = df.values'
runtimefilecontent += '\n'
runtimefilecontent += ' sess = rt.InferenceSession(modelfile)'
runtimefilecontent += '\n'
runtimefilecontent += ' input_name = sess.get_inputs()[0].name'
runtimefilecontent += '\n'
runtimefilecontent += ' label_name = sess.get_outputs()[0].name'
runtimefilecontent += '\n'
runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape'
runtimefilecontent += '\n'
runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)'
runtimefilecontent += '\n'
runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]'
runtimefilecontent += '\n'
runtimefilecontent += ' df[\'predictions\'] = pred_onx'
runtimefilecontent += '\n'
runtimefilecontent += ' result = df.to_json(orient="records")'
runtimefilecontent += '\n'
runtimefilecontent += ' return(result)'
runtimefilecontent += '\n'
runtimefilecontent += 'if __name__ == "__main__":'
runtimefilecontent += '\n'
runtimefilecontent += ' output = onnx_runtime_validation()'
runtimefilecontent += '\n'
runtimefilecontent += ' print("predictions:",output)'
filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py')
f = open(filename, "w")
f.write(str(runtimefilecontent))
f.close()
|
common.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package import utility
from AION.prediction_package.utility import TAB_CHAR
from importlib.metadata import version
"""
This file provide the functionality which is common for most of the
problem types deployment.
"""
def main_code():
return """
class predict():
def __init__(self):
self.profiler = inputprofiler()
self.selector = selector()
self.trainer = trainer()
self.formatter = output_format()
def run(self, data):
try:
df = self._parse_data(data)
raw_df = df.copy()
df = self.profiler.run(df)
df = self.selector.run(df)
df = self.trainer.run(df)
output = self.formatter.run(raw_df, df)
print("predictions:",output)
return (output)
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
def _parse_data(self, data):
file_path = Path(data)
if file_path.suffix == ".tsv":
df = pd.read_csv(data,encoding='utf-8',sep='\\t',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix in [".csv", ".dat"]:
df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix in [".gz"] and file_path.stem.endswith('.csv'):
df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
df = pd.json_normalize(jsonData)
else:
jsonData = json.loads(data)
df = pd.json_normalize(jsonData)
return df
import sys
if __name__ == "__main__":
output = predict().run(sys.argv[1])
"""
def profiler_code(params, indent=0):
"""
This will create the profiler file based on the config file.
separated file is created as profiler is required for input drift also.
"""
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
"""
init_code = """
def __init__(self):
"""
if params.get('text_features'):
imported_modules.append({'module':'importlib.util'})
init_code += """
# preprocessing
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if not preprocess_path.exists():
raise ValueError(f'Preprocess model file not found: {preprocess_path}')
self.profiler = joblib.load(preprocess_path)
"""
run_code = """
def run(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
"""
if params.get('input_features_type'):
imported_modules.append({'module':'dtype','mod_from':'numpy'})
run_code += f"""
df = df.astype({params.get('input_features_type')})
"""
if params.get('word2num_features'):
imported_modules.append({'module':'w2n','mod_from':'word2number'})
run_code += f"""
def s2n(value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))"""
if params.get('unpreprocessed_columns'):
run_code += f"""
unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}']
df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if params.get('force_numeric_conv'):
run_code += f"""
df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""
if params.get('conversion_method','').lower() == 'glove':
code_text, modules = __profiler_glove_code(params)
imported_modules.extend( modules)
init_code += code_text
elif params.get('conversion_method','').lower() == 'fasttext':
init_code += __profiler_fasttext_code(params)
run_code += __profiler_main_code(params)
if params.get('unpreprocessed_columns'):
run_code += f"""
df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
utility.import_modules(importer, imported_modules)
import_code = importer.getCode()
return import_code + code + init_code + run_code
def __profiler_glove_code(params, indent=2):
modules = []
modules.append({'module':'load_pretrained','mod_from':'text.Embedding'})
modules.append({'module':'TextProcessing','mod_from':'text'})
code = """
model_path = TextProcessing.checkAndDownloadPretrainedModel('glove')
embed_size, pretrained_model = load_pretrained(model_path)
self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model)
"""
return code.replace('\n', '\n'+(indent * TAB_CHAR)), modules
def __profiler_fasttext_code(params, indent=2):
code = """
def get_pretrained_model_path():
try:
from AION.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
if not importlib.util.find_spec('fasttext'):
raise ValueError('fastText not installed')
else:
import os
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
pretrained_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model)
self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary')
"""
return code.replace('\n', '\n'+(indent * TAB_CHAR))
def __profiler_main_code(params, indent=2):
code = f"""
df = self.profiler.transform(df)
columns = {params['output_features']}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
return df
"""
return code.replace('\n', '\n'+(indent * TAB_CHAR))
def feature_selector_code( params, indent=0):
modules = [
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
code = """
class selector():
# this class
def __init__(self):
pass
def run(self, df):"""
code +=f"""
return df[{params['output_features']}]
"""
return code, modules
def feature_reducer_code( params, indent=0):
modules = [
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
code = f"""
class selector():
def __init__(self):
reducer_file = (Path(__file__).parent/"model")/"{params['reducer_file']}"
if not reducer_file.exists():
raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}')
self.model = joblib.load(reducer_file)
def run(self, df):
reducer_input = {params['input_features']}
reducer_output = {params['output_features']}
df = self.model.transform(df[reducer_input])
return pd.DataFrame(df,columns=reducer_output)
"""
if indent:
code = code.replace('\n', '\n'+(indent * TAB_CHAR))
return code, modules
def create_feature_list(config=None, target_feature=None, deploy_path=None):
featurelist = []
if 'profiler' in config:
if 'input_features_type' in config['profiler']:
input_features = config['profiler']['input_features_type']
for x in input_features:
featurelt={}
featurelt['feature'] = x
if x == target_feature:
featurelt['Type'] = 'Target'
else:
if input_features[x] in ['int','int64','float','float64']:
featurelt['Type'] = 'Numeric'
elif input_features[x] == 'object':
featurelt['Type'] = 'Text'
elif input_features[x] == 'category':
featurelt['Type'] = 'Category'
else:
featurelt['Type'] = 'Unknown'
featurelist.append(featurelt)
featurefile = f"""
import json
def getfeatures():
try:
features = {featurelist}
outputjson = {{"status":"SUCCESS","features":features}}
output = json.dumps(outputjson)
print("Features:",output)
return(output)
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip(\'"\')}}
print("Features:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = getfeatures()
"""
with open( deploy_path/'featureslist.py', 'wb') as f:
f.write( str(featurefile).encode('utf8'))
def requirement_file(deploy_path,model,textFeatures,learner_type='ML'):
modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors']
requires = ''
for mod in modules:
requires += f"{mod}=={version(mod)}\n"
if len(textFeatures) > 0:
tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf']
for mod in tmodules:
requires += f"{mod}=={version(mod)}\n"
if model == 'Extreme Gradient Boosting (XGBoost)':
mmodules = ['xgboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model == 'Light Gradient Boosting (LightGBM)':
mmodules = ['lightgbm']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model == 'Categorical Boosting (CatBoost)':
mmodules = ['catboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'arima':
mmodules = ['pmdarima']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'fbprophet':
mmodules = ['prophet']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL':
mmodules = ['tensorflow']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833
mmodules = ['lifelines']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'sentencetransformer': #bug 12833
mmodules = ['sentence_transformers']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
with open( deploy_path/'requirements.txt', 'wb') as f:
f.write(str(requires).encode('utf8'))
def create_readme_file(deploy_path,modelfile,features):
data = json.dumps([{x:x+'_value'} for x in features])
backslash_data = data.replace('"', '\\"')
content = f"""
========== Files Structures ==========
{modelfile} ------ Trained Model
aion_prediction.py --> Python package entry point
script/inputprofiler.py --> Profiling like FillNA and Category to Numeric
========== How to call the model ==========
============== From Windows Terminal ==========
python aion_prediction.py "{backslash_data}"
============== From Linux Terminal ==========
python aion_prediction.py "{data}"
============== Output ==========
{{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}}]}} ## for single Row/Record
{{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}},{{"Data1":"Value","prediction":"Value"}}]}} ## For Multiple Row/Record
{{"status":"ERROR","message":"description"}} ## In Case Exception or Error
"""
filename = deploy_path/'readme.txt'
with open(filename, 'w') as f:
f.write(content)
def create_util_folder(deploy_path):
import tarfile
ext_path = Path(__file__).parent.parent/'utilities'
for x in ext_path.iterdir():
if x.suffix == '.tar':
if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']:
my_tar = tarfile.open(x)
my_tar.extractall(deploy_path)
my_tar.close()
|
model_deploy.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import platform
import json
import shutil
import logging
from pathlib import Path
from prediction_package import production
from prediction_package import prediction_transformation as cs
class DeploymentManager:
def __init__(self):
self.requirementfile=''
self.modelfile=''
self.s2i_environmentfile=''
self.selectorfile=''
self.profilerfile=''
self.readmepackagename=''
self.pythonpackage=''
self.log = logging.getLogger('eion')
def include_import_file(self,learner_type,method,scoreParam,model_type,model):
if((learner_type == 'DL') or (learner_type == 'TextDL')):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'):
self.modelfile += 'import joblib'
self.modelfile += '\n'
self.modelfile += 'import os'
self.modelfile += '\n'
self.modelfile += 'import pandas as pd'
self.modelfile += '\n'
self.modelfile += 'import numpy as np'
self.modelfile += '\n'
self.modelfile += 'from pathlib import Path'
self.modelfile += '\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\n'
self.modelfile += 'from keras.models import load_model'
self.modelfile += '\n'
self.modelfile += 'import warnings'
self.modelfile += '\n'
self.modelfile += 'from sklearn.preprocessing import StandardScaler'
self.modelfile += '\n'
self.modelfile += 'warnings.filterwarnings("ignore")'
self.modelfile += '\n'
if(learner_type == 'ImageClassification'):
self.modelfile += 'import os'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.models import Sequential'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.preprocessing import image'
self.modelfile += '\n'
self.modelfile += 'import numpy as np'
self.modelfile += '\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.layers import Input'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.models import Model'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.optimizers import Adam'
self.modelfile += '\n'
self.modelfile += 'import cv2'
self.modelfile += '\n'
if(learner_type == 'objectDetection'):
self.modelfile += 'import os\n'
self.modelfile += 'from object_detection.utils import label_map_util\n'
self.modelfile += 'from object_detection.utils import config_util\n'
self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\n'
self.modelfile += 'from object_detection.builders import model_builder\n'
self.modelfile += 'import tensorflow as tf\n'
self.modelfile += 'import numpy as np\n'
self.modelfile += 'from PIL import Image\n'
self.modelfile += 'import matplotlib.pyplot as plt\n'
self.modelfile += 'import pandas as pd\n'
self.modelfile += 'from pathlib import Path\n'
if(learner_type == 'Text Similarity'):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer'
self.modelfile += '\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\n'
if(model == 'Neural Architecture Search'):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\n'
self.modelfile += 'import joblib'
self.modelfile += '\n'
self.modelfile += 'import os'
self.modelfile += '\n'
self.modelfile += 'import pandas as pd'
self.modelfile += '\n'
self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\n'
self.modelfile += 'import numpy as np\n'
self.modelfile += 'from pathlib import Path\n'
if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network':
self.modelfile += 'from tensorflow import constant'
self.modelfile += '\n'
self.modelfile += 'from tf_agents.trajectories import time_step'
self.modelfile += '\n'
self.requirementfile += 'tensorflow==2.5.0'
if model.lower() == 'lstm' or model.lower() == 'mlp':
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\n'
self.requirementfile += 'tensorflow==2.5.0'
if(learner_type == 'Text Similarity'):
self.modelfile += 'def cosine_distance(vests):'
self.modelfile += '\n';
self.modelfile += ' x, y = vests'
self.modelfile += '\n';
self.modelfile += ' x = K.l2_normalize(x, axis=-1)'
self.modelfile += '\n';
self.modelfile += ' y = K.l2_normalize(y, axis=-1)'
self.modelfile += '\n';
self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)'
self.modelfile += '\n';
self.modelfile += 'def cos_dist_output_shape(shapes):'
self.modelfile += '\n';
self.modelfile += ' shape1, shape2 = shapes'
self.modelfile += '\n';
self.modelfile += ' return (shape1[0],1)'
self.modelfile += '\n';
if(learner_type == 'TextDL' or learner_type == 'DL'):
if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'):
self.modelfile += 'def recall_m(y_true, y_pred):'
self.modelfile += '\n';
self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))'
self.modelfile += '\n';
self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))'
self.modelfile += '\n';
self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())'
self.modelfile += '\n';
self.modelfile += ' return recall'
self.modelfile += '\n';
if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'):
self.modelfile += 'def precision_m(y_true, y_pred):'
self.modelfile += '\n';
self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))'
self.modelfile += '\n';
self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))'
self.modelfile += '\n';
self.modelfile += ' precision = true_positives / (predicted_positives + K.epsilon())'
self.modelfile += '\n';
self.modelfile += ' return precision'
self.modelfile += '\n';
if(scoreParam.lower() == 'f1_score'):
self.modelfile += 'def f1_m(y_true, y_pred):'
self.modelfile += '\n';
self.modelfile += ' precision = precision_m(y_true, y_pred)'
self.modelfile += '\n';
self.modelfile += ' recall = recall_m(y_true, y_pred)'
self.modelfile += '\n';
self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))'
self.modelfile += '\n';
if(scoreParam.lower() == 'rmse'):
self.modelfile += 'def rmse_m(y_true, y_pred):'
self.modelfile += '\n';
self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))'
self.modelfile += '\n';
if(scoreParam.lower() =='r2'):
self.modelfile += 'def r_square(y_true, y_pred):'
self.modelfile += '\n';
self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))'
self.modelfile += '\n';
self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))'
self.modelfile += '\n';
self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))'
self.modelfile += '\n';
if(learner_type.lower() in ['similarityidentification','contextualsearch']):
self.modelfile += 'from pathlib import Path\n'
if model_type == 'BM25':
self.modelfile += 'from rank_bm25 import BM25Okapi\n'
elif scoreParam == 'VectorDB Cosine':
self.modelfile += 'import chromadb\n'
else:
self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\n'
self.pythonpackage += '========== Python Packags Requires ========='
self.pythonpackage += '\n'
self.pythonpackage += 'scikit-learn'
self.pythonpackage += '\n'
self.pythonpackage += 'scipy'
self.pythonpackage += '\n'
self.pythonpackage += 'numpy'
self.pythonpackage += '\n'
if((learner_type == 'DL') or (learner_type =='TextDL')):
self.modelfile += 'import numpy as np'
self.modelfile += '\n'
self.requirementfile += 'scikit-learn==0.21.3'
self.requirementfile += '\n'
self.requirementfile += 'scipy==1.3.3'
self.requirementfile += '\n'
self.requirementfile += 'numpy==1.17.4'
self.requirementfile += '\n'
if(learner_type == 'TextML'):
self.requirementfile += 'spacy==2.2.3'
self.requirementfile += '\n'
self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz'
self.requirementfile += '\n'
if(learner_type == 'DL' or learner_type == 'TextDL'):
self.requirementfile += 'keras==2.3.1'
self.requirementfile += '\n'
self.requirementfile += 'tensorflow==2.0.0b1'
self.requirementfile += '\n'
if(learner_type == 'RecommenderSystem'):
self.requirementfile += 'surprise'
self.requirementfile += '\n'
if(method == 'package'):
self.modelfile += 'import surprise'
self.modelfile += '\n'
self.modelfile += 'import statsmodels'
self.modelfile += '\n'
self.requirementfile += 'statsmodels==0.10.2'
self.requirementfile += '\n'
def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False):
self.readme='========== Files Structures =========='
self.readme+='\n'
self.readme+=modelfile+' ------ Trained Model'
self.readme+='\n'
self.readme+='aion_prediction.py --> Python package entry point'
self.readme+='\n'
if not single_file:
self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric'
self.readme+='\n'
self.readme+='script/selector.py --> Feature Selection'
self.readme+='\n'
self.readme+='script/trained_model.py --> Read the model file and call the prediction'
self.readme+='\n'
self.readme+='script/output_format.py --> Output formatter file'
self.readme+='\n'
self.readme+= self.pythonpackage
self.readme+= '========== How to call the model =========='
self.readme+='\n'
self.readme+= '============== From Windows Terminal =========='
self.readme+='\n'
if method == 'optimus_package':
self.readme += 'python aion_prediction.py filename.json'
self.readme +='\n'
self.readme += '========== Embedded Methods =========='
self.readme +='\n'
self.readme += 'Function Name: predict_from_json - When input is Json Data'
self.readme +='\n'
self.readme += 'Function Name: predict_from_file - When input is Json File'
self.readme +='\n'
else:
callpython = 'python aion_prediction.py "[{'
for x in features:
if(callpython != 'python prediction.py "[{'):
callpython += ','
callpython += '\\\"'+str(x)+'\\\"'+':'+'\\\"'+str(x)+'_value'+'\\\"'
callpython += '}]"'
self.readme += callpython
self.readme+='\n'
self.readme+= '============== From Linux Terminal =========='
self.readme+='\n'
callpython = 'python aion_prediction.py \'[{'
temp =callpython
for x in features:
if(callpython != temp):
callpython += ','
callpython += '"'+str(x)+'"'+':'+'"'+str(x)+'_value'+'"'
callpython += '}]\''
self.readme += callpython
self.readme+='\n'
self.readme+= '============== Output =========='
self.readme+='\n'
self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"}]}' ## For Single Row/Record'
self.readme+='\n'
self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"},{"Data1":"Value","prediction":"Value"}]} ## For Multiple Row/Record'
self.readme+='\n'
self.readme+= '{"status":"ERROR","message":"description"} ## In Case Exception or Error'
self.readme+='\n'
#print(self.readme)
filename = os.path.join(deploy_path,'readme.txt')
self.log.info('-------> Readme File Location: '+filename)
f = open(filename, "wb")
f.write(str(self.readme).encode('utf8'))
f.close()
def create_class(self,classname):
#self.modelfile += 'class '+classname+'(object):'
self.modelfile += 'class trained_model(object):'
self.modelfile += '\n'
def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''):
profiler = deploy.get('profiler',{})
if isinstance(features, str):
features = features.split(',')
code = f"""
import scipy
import joblib
import numpy as np
import pandas as pd
from pathlib import Path
"""
if text_feature:
code += """
import importlib.util\n"""
if wordToNumericFeatures:
code += """
from word2number import w2n
def s2n(value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
"""
if 'code' in deploy.get('preprocess',{}).keys():
code += deploy['preprocess']['code']
if profiler.get('conversion_method','').lower() == 'glove':
code += """
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
from text.Embedding import load_pretrained
from text import TextProcessing
model_path = TextProcessing.checkAndDownloadPretrainedModel('glove')
embed_size, loaded_model = load_pretrained(model_path)
self.model.set_params(text_process__vectorizer__external_model = loaded_model)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
"""
elif profiler.get('conversion_method','').lower() == 'fasttext':
code += """
def get_pretrained_model_path():
try:
from AION.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
if not importlib.util.find_spec('fasttext'):
raise ValueError('fastText not installed')
else:
import os
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
loaded_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
self.model.set_params(text_process__vectorizer__external_model = loaded_model)
self.model.set_params(text_process__vectorizer__external_model_type = 'binary')
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
"""
else:
code += """
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
"""
if 'code' in deploy.get('preprocess',{}).keys():
code += " df = preprocess( df)\n"
if wordToNumericFeatures:
code += f"""
df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))"""
if profiler.get('unpreprocessed_columns'):
code += f"""
unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}']
df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if profiler.get('force_numeric_conv'):
code += f"""
df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')
"""
code += f"""
if self.model:
df = self.model.transform(df)"""
code += f"""
columns = {output_columns}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
"""
##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
pass
else:
if profiler.get('unpreprocessed_columns'):
code += f"""
df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now.
# code += f"""
# df.set_index('{datetimeFeature}', inplace=True)"""
code += f"""
return(df,'{datetimeFeature}')\n"""
else:
code += f"""
return(df)"""
return code
def no_profiling_code(self, features):
if isinstance(features, str):
features = features.split(',')
return f"""
import pandas as pd
import numpy as np
class inputprofiler(object):
def apply_profiler(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
return df[{features}]
"""
def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None,datetimeFeature=''):
filename = str(Path(deploy_path)/'script'/'inputprofiler.py')
if 'profiler' in config:
if model_type == 'BM25':
code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features'])
elif model == 'KaplanMeierFitter':
code = self.no_profiling_code(features)
elif model.lower() in ['arima', 'fbprophet']: #task 12627
code = self.no_profiling_code('noofforecasts')
else:
code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature)
if code:
with open(filename,'w',encoding="utf-8") as f:
f.write(code)
self.log.info('-------> Profiler File Location :'+filename)
return
self.profilerfile += 'import pandas as pd'
self.profilerfile += '\n'
self.profilerfile += 'import joblib'
self.profilerfile += '\n'
self.profilerfile += 'import os'
self.profilerfile += '\n'
self.profilerfile += 'from word2number import w2n'
self.profilerfile += '\n'
self.profilerfile += 'import numpy as np'
self.profilerfile += '\nfrom pathlib import Path\n'
#print("1")
#print(profiler)
if(learner_type == 'Text Similarity' or len(text_features) > 0):
self.profilerfile += 'from text import TextProcessing'
self.profilerfile += '\n'
self.profilerfile += 'def textCleaning(textCorpus):'
self.profilerfile += '\n'
self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()'
self.profilerfile += '\n'
self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)'
self.profilerfile += '\n'
self.profilerfile += ' return(textCorpus)'
self.profilerfile += '\n'
self.profilerfile += 'class inputprofiler(object):'
self.profilerfile += '\n'
self.profilerfile += ' def s2n(self,value):'
self.profilerfile += '\n'
self.profilerfile += ' try:'
self.profilerfile += '\n'
self.profilerfile += ' x=eval(value)'
self.profilerfile += '\n'
self.profilerfile += ' return x'
self.profilerfile += '\n'
self.profilerfile += ' except:'
self.profilerfile += '\n'
self.profilerfile += ' try:'
self.profilerfile += '\n'
self.profilerfile += ' return w2n.word_to_num(value)'
self.profilerfile += '\n'
self.profilerfile += ' except:'
self.profilerfile += '\n'
self.profilerfile += ' return np.nan '
self.profilerfile += '\n'
self.profilerfile += ' def apply_profiler(self,df):'
self.profilerfile += '\n'
if(len(wordToNumericFeatures) > 0):
for w2nFeature in wordToNumericFeatures:
if w2nFeature not in features:
continue
self.profilerfile += " df['"+w2nFeature+"']=df['"+w2nFeature+"'].apply(lambda x: self.s2n(x))"
self.profilerfile += '\n'
self.profilerfile += " df = df.replace(r'^\s*$', np.NaN, regex=True)"
self.profilerfile += '\n'
self.profilerfile += ' try:'
self.profilerfile += '\n'
self.profilerfile += ' df.dropna(how="all",axis=1,inplace=True)'
self.profilerfile += '\n'
self.profilerfile += ' except:'
self.profilerfile += '\n'
self.profilerfile += ' df.fillna(0)'
self.profilerfile += '\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/"model"/"preprocess_pipe.pkl"\n'
self.profilerfile += ' if preprocess_path.exists():\n'
self.profilerfile += ' model = joblib.load(preprocess_path)\n'
if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder':
self.profilerfile += f" df[{features}] = model.transform(df[{features}])\n"
else:
self.profilerfile += f" df = model.transform(df)\n"
if 'operation' in profiler:
y = profiler['operation']
for action in y:
feature = action['feature']
#if feature not in features:
# continue
operation = action['Action']
if(operation == 'Drop'):
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\n'
self.profilerfile += " df.drop(columns=['"+feature+"'],inplace = True)"
self.profilerfile += '\n'
if(operation == 'FillValue'):
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\n'
fvalue = action['value']
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value='"+fvalue+"')"
self.profilerfile += '\n'
if(operation == 'Encoder'):
value = action['value']
value = value.replace("\n", "\\n")
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\n'
self.profilerfile += " le_dict="+str(value)
self.profilerfile += '\n'
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].apply(lambda x: le_dict.get(x,-1))"
self.profilerfile += '\n'
self.profilerfile += " if -1 in df['"+feature+"'].values:"
self.profilerfile += '\n'
self.profilerfile += " raise Exception('Category value of "+feature+" not present in training data')"
self.profilerfile += '\n'
if 'conversion' in profiler:
catergoryConverton = profiler['conversion']
#print(catergoryConverton)
if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton):
self.profilerfile += " encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))"
self.profilerfile += '\n'
self.profilerfile += " CategoryFeatures = "+str(catergoryConverton['features'])
self.profilerfile += '\n'
if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding':
self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures]).toarray()"
self.profilerfile += '\n'
self.profilerfile += " feature_labels = encoder.get_feature_names(CategoryFeatures)"
self.profilerfile += '\n'
self.profilerfile += " transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) "
self.profilerfile += '\n'
else:
self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures])"
self.profilerfile += '\n'
self.profilerfile += " dataColumns=list(df.columns)"
self.profilerfile += '\n'
self.profilerfile += " nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))"
self.profilerfile += '\n'
self.profilerfile += " dataArray=df[nonNormFeatures]"
self.profilerfile += '\n'
self.profilerfile += " df = pd.concat([dataArray, transformed_data],axis=1)"
self.profilerfile += '\n'
y = json.loads(numericToLabel_json)
for feature_details in y:
feature = feature_details['feature']
if feature not in features:
continue
label = feature_details['Labels']
bins = feature_details['Bins']
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\n'
self.profilerfile += " cut_bins="+str(bins)
self.profilerfile += '\n'
self.profilerfile += " cut_labels="+str(label)
self.profilerfile += '\n'
self.profilerfile += " df['"+feature+"'] = pd.cut(df['"+feature+"'],bins=cut_bins,labels=cut_labels)"
self.profilerfile += '\n'
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value=0)"
self.profilerfile += '\n'
if(len(text_features) > 0):
if(len(text_features) > 1):
self.profilerfile += ' merge_features = '+str(text_features)
self.profilerfile += '\n'
self.profilerfile += ' df[\'combined\'] = df[merge_features].apply(lambda row: \' \'.join(row.values.astype(str)), axis=1)'
self.profilerfile += '\n'
self.profilerfile += ' features = [\'combined\']'
self.profilerfile += '\n'
else:
self.profilerfile += " features = "+str(text_features)
self.profilerfile += '\n'
if model_type == 'BM25':
self.profilerfile += """\
df_text = df[features[0]]
pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}'))
df['tokenize'] = pipe.transform(df_text)\n""".format(preprocessing_pipe=preprocessing_pipe)
elif conversion_method == 'sentenceTransformer':
self.profilerfile += """\
df_text = df[features[0]]
from sentence_transformers import SentenceTransformer
model = SentenceTransformer(\'sentence-transformers/msmarco-distilroberta-base-v2\')
df_vect = model.encode(df_text)
for empCol in {text_features}:
df = df.drop(columns=[empCol])
if isinstance(df_vect, np.ndarray):
df1 = pd.DataFrame(df_vect)
else:
df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\'vectorizer\'].get_feature_names())
df1 = df1.add_suffix(\'_vect\')
df = pd.concat([df, df1],axis=1)\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features)
else:
self.profilerfile += """\
df_text = df[features[0]]
pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}'))
df_vect=pipe.transform(df_text)
for empCol in {text_features}:
df = df.drop(columns=[empCol])
if isinstance(df_vect, np.ndarray):
df1 = pd.DataFrame(df_vect)
else:
df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\'vectorizer\'].get_feature_names())
df1 = df1.add_suffix(\'_vect\')
df = pd.concat([df, df1],axis=1)\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features)
if(learner_type == 'Text Similarity'):
self.profilerfile += ' df[\''+firstDocFeature+'\'] = textCleaning(df[\''+firstDocFeature+'\'])'
self.profilerfile += '\n'
self.profilerfile += ' df[\''+secondDocFeature+'\'] = textCleaning(df[\''+secondDocFeature+'\'])'
self.profilerfile += '\n'
if len(normFeatures) > 0 and normalizer != '':
self.profilerfile += " normFeatures = "+str(normFeatures)
self.profilerfile += '\n'
self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\'..\',\'model\',\''+normalizer+'\'))'
self.profilerfile += '\n'
self.profilerfile += ' dataColumns=list(df.columns)'
self.profilerfile += '\n'
self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))'
self.profilerfile += '\n'
self.profilerfile += ' dataframe=df[normFeatures]'
self.profilerfile += '\n'
self.profilerfile += ' transDf = normalizepipe.transform(dataframe)'
self.profilerfile += '\n'
self.profilerfile += ' nontransDF=df[nonNormFeatures].values'
self.profilerfile += '\n'
self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures'
self.profilerfile += '\n'
self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)'
self.profilerfile += '\n'
self.profilerfile += ' df=scaledDf'
self.profilerfile += '\n'
else:
self.profilerfile += ' df=df.dropna()\n'
self.profilerfile += ' return(df)'
filename = os.path.join(deploy_path,'script','inputprofiler.py')
self.log.info('-------> Profiler File Location :'+filename)
f = open(filename, "w",encoding="utf-8")
f.write(str(self.profilerfile))
f.close()
def isEnglish(self, s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None):
cs.create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config)
def create_init_function_for_regression(self,modelfile):
self.modelfile += ' def __init__(self):'
self.modelfile += '\n'
self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig):
cs.create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig)
def create_predict_proba(self,learner_type,method):
self.modelfile += ' def predict(self,X,features_names):'
self.modelfile += '\n'
self.modelfile += ' return self.model.predict_proba(X)'
def create_forcast(self,method,no_of_prediction):
self.modelfile += ' def predict(self,X,features_names):'
self.modelfile += '\n'
self.modelfile += ' no_of_prediction = '+str(no_of_prediction)
self.modelfile += '\n'
self.modelfile += ' lag_order = self.model.k_ar'
self.modelfile += '\n'
self.modelfile += ' return self.model.forecast(X.values[-lag_order:],steps=no_of_prediction)'
def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None):
scorePrm = scoreParam
cs.create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scorePrm)
def save_model_deploy(self,outputfolder,modelname):
#filename = outputfolder+modelname+'.py'
filename = os.path.join(outputfolder,'script','trained_model.py')
self.log.info('-------> Model File Location :'+filename)
f = open(filename, "w",encoding="utf-8")
f.write(str(self.modelfile))
f.close()
def create_TextCleaner(self,outputfolder):
profilerPath = os.path.join(outputfolder,'profiler')
try:
os.makedirs(profilerPath)
except OSError:
self.log.info("ProfilePath Folder Already Exists")
try:
textprofileFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','profiler','textDataProfiler.py')
initFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','profiler','__init__.py')
shutil.copy2(textprofileFileLocation,profilerPath)
shutil.copy2(initFileLocation,profilerPath)
'''
if(platform.system() == 'Windows'):
shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'\\..\\profiler\\textDataProfiler.py',profilerPath)
shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'\\..\\profiler\\__init__.py',profilerPath)
else:
shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'/../profiler/textDataProfiler.py',profilerPath)
shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'/../profiler/__init__.py',profilerPath)
'''
except OSError:
self.log.info("Copy to Profiler Path Failed")
def listToString(self,s):
str1='['
for feature in s:
if(str1 != '['):
str1 += ','
str1 += '"'+feature+'"'
str1+=']'
return str1
def print_files(self):
self.log.info(self.modelfile)
def create_util_folder(self, deploy_path,learner_type):
import tarfile
ext_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..','utilities'))
for x in os.listdir(ext_path):
if x.endswith('.tar'):
if x not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']:
tarPackage = os.path.join(ext_path, x)
my_tar = tarfile.open(tarPackage)
my_tar.extractall(deploy_path)
my_tar.close()
else:
if learner_type == 'RecommenderSystem':
tarPackage = os.path.join(ext_path, x)
my_tar = tarfile.open(tarPackage)
my_tar.extractall(deploy_path)
my_tar.close()
def deploy_model(self,deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deploy_path,features,profiler,datalocation,output_label,column_merge_flag,textFeatures,numericalFeatures,nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,optimizationmethod,deployFolder,iterName,iterVersion,wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,no_of_prediction,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,config=None):
try:
serviceName = '{}{}{}'.format(iterName, '_' if iterVersion != '' else '', iterVersion)
self.log.info('-------> Deploy Location :'+deploy_path)
if production.is_supported(model_type.lower()):
if learner_type == 'Text Similarity':
coder = production.get_deployer(learner_type)
coder.create_file(deploy_path, preprocessing_pipe, saved_model, firstDocFeature, secondDocFeature)
elif model_type.lower() in ['classification', 'regression','clustering','timeseriesforecasting']:
params = {}
params['usecase_name']= iterName
params['usecase_ver']= iterVersion
params['features']={}
params['features']['input_feat'] = config['profiler']['input_features']
params['features']['target_feat'] = targetFeature
params['features']['text_feat'] = textFeatures
params['paths']={}
params['paths']['deploy'] = Path(deploy_path)
params['paths']['usecase'] = params['paths']['deploy'].parent
params['profiler']=config['profiler']
if 'code' in config.get('preprocess',{}).keys():
params['profiler']['preprocess']=config['preprocess']
params['selector']={}
params['selector']['reducer']=True if pcaModel_pickle_file else False
params['selector']['reducer_file']=pcaModel_pickle_file
if pcaModel_pickle_file:
params['selector']['input_features']=bpca_features
params['selector']['output_features']=apca_features
else:
params['selector']['input_features']=config['profiler']['input_features']
params['selector']['output_features']=features
params['training']={}
params['training']['algo']= model
params['training']['model_file']=saved_model
if model_type.lower() == 'timeseriesforecasting':
if params['training']['algo'] in ['LSTM','MLP','ENCODER_DECODER_LSTM_MVI_UVO']:
params['training']['lag_order'] = int(lag_order)
params['training']['scaler_file'] = Path(scalertransformationFile).name
elif params['training']['algo'] == 'VAR':
params['training']['dictDiffCount'] = dictDiffCount
params['training']['no_of_prediction'] = no_of_prediction
elif params['training']['algo'] == 'FBPROPHET':
params['training']['sessonal_freq'] = sessonal_freq
params['training']['additional_regressors'] = additional_regressors
self.log.info(params)
deployer = production.get_deployer(model_type.lower(), params=params)
deployer.run( )
self.log.info('Status:- |... Model deployment files created')
self.log.info('Status:- |... Model deployment completed')
return
else:
# for output_formatter.py
from prediction_package.output_formatter import outputFormatter
outputObj = outputFormatter()
outputObj.crate_output_format_file(deploy_path, learner_type, model_type, model, output_label,
threshold, trained_data_file, dictDiffCount, targetFeature, features,datetimeFeature)
#for aion_predict.py
from prediction_package.aion_prediction import aionPrediction
predictionObj = aionPrediction()
# print(deploy_path)
predictionObj.create_prediction_file(deploy_name, deploy_path, learner_type, grouperbyjson,rowfilterexpression,model_type,datetimeFeature)
# for aion_service.py
predictionObj.create_model_service(deploy_path, serviceName, model_type)
# for aion_publish.py
predictionObj.create_publish_service(usecaseLocation, iterName, iterVersion, model_type)
if learner_type.lower()=="recommendersystem":
# Task 11190---
#For recommender system
from prediction_package.recommender_code import generate_recommender_code
generate_recommender_code(deploy_path)
return
#self.create_TextCleaner(deploy_path)
if(len(textFeatures) > 0):
self.create_TextCleaner(deploy_path)
self.include_import_file(learner_type,deployJson['method'],scoreParam, model_type,model)
if((learner_type == 'TS' and model.lower() not in ['lstm','mlp','var']) or learner_type == 'RecommenderSystem'):
features=[]
self.create_class(deploy_name)
if len(bpca_features) != 0:
self.create_profiler_file(learner_type,deploy_path,profiler,bpca_features,numericToLabel_json,column_merge_flag,textFeatures,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder, model, config,datetimeFeature)
else:
self.create_profiler_file(learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,textFeatures,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder, model, config,datetimeFeature)
self.create_selector_file(deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature,model_type, model,config)
self.create_init_function_for_classification(saved_model,'classes',learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,model,model_type,imageconfig)
except Exception as e:
print(e)
import traceback
exception_type, exception_object, exception_traceback = sys.exc_info()
filename = exception_traceback.tb_frame.f_code.co_filename
line_number = exception_traceback.tb_lineno
self.log.info("Exception type: ", exception_type)
self.log.info("File name: ", filename)
self.log.info("Line number: ", line_number)
self.log.info("multivariate model build error traceback: \n"+str(traceback.print_exc()))
raise Exception(e)
#print(model)
if(model.lower() == 'var'):
self.log.info("Create Forecast Function")
self.create_forcast(deployJson['method'],no_of_prediction)
else:
self.create_predict(learner_type,deployJson['method'],model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,features,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam)
self.save_model_deploy(deploy_path,deploy_name)
if(len(textFeatures) > 0):
if model_type.lower() == 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting':
predictionObj.create_text_drift_file(deploy_path,textFeatures,targetFeature,model_type)
if model_type.lower() == 'classification':
predictionObj.create_classification_text_performance_file(deploy_path,textFeatures,targetFeature)
elif model_type.lower() == 'regression':
predictionObj.create_regression_text_performance_file(deploy_path,textFeatures,targetFeature)
else:
if model_type.lower() == 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': #task 11997
predictionObj.create_drift_file(deploy_path,features,targetFeature,model_type)
if model_type.lower() == 'classification':
predictionObj.create_classification_performance_file(deploy_path,features,targetFeature)
elif model_type.lower() == 'regression':
predictionObj.create_regression_performance_file(deploy_path,features,targetFeature)
self.log.info('Status:- |... Model deployment files created')
self.crate_readme_file(deploy_path,saved_model,features,deployJson['method'])
from prediction_package.requirements import requirementfile
requirementfile(deploy_path,model,textFeatures,learner_type)
os.chdir(deploy_path)
textdata = False
if(learner_type == 'Text Similarity' or len(textFeatures) > 0):
textdata = True
self.create_util_folder(deploy_path,learner_type)
self.log.info('Status:- |... Model deployment completed')
def deployTSum(self,deploy_path,preTrainedModellocation):
def create_predict(preTrainedModellocation):
text = f"""
import sys
import json
def predict(data):
try:
import pandas as pd
import numpy as np
from pathlib import Path
keywordsFile =Path(__file__).parent/'data'/'keywordDataBase.csv'
outputSumFile =Path(__file__).parent/'data'/'summarizedOutput.csv'
fileName=data
#print("fileName---",fileName)
inputDataFileFrame = pd.DataFrame()
inputDataFileFrame['Sentences']=""
rowIndex=0
if fileName.endswith(".pdf"):
from pypdf import PdfReader
reader = PdfReader(fileName)
number_of_pages = len(reader.pages)
text=""
textOutputForFile=""
OrgTextOutputForFile=""
for i in range(number_of_pages) :
page = reader.pages[i]
text1 = page.extract_text()
text=text+text1
import nltk
tokens = nltk.sent_tokenize(text)
for sentence in tokens:
sentence=sentence.replace("\\n", " ")
if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) :
continue
inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
if fileName.endswith(".txt"):
data=[]
with open(fileName, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
sentences=str1.split(".")
count=0
for sentence in sentences:
count += 1
inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
inputDataFileFrame['LabelByKw']=0
#print(inputDataFileFrame)
keywordsFileFrame=pd.read_csv(keywordsFile,encoding='utf-8')
Keyword_list = keywordsFileFrame['Keyword'].tolist()
for i in inputDataFileFrame.index:
for x in Keyword_list:
if (str(inputDataFileFrame["Sentences"][i])).lower().find(x) != -1:
inputDataFileFrame['LabelByKw'][i]=1
break
import pickle
from sklearn.preprocessing import LabelEncoder
pkl_filename='classificationModel.sav'
pkl_filename =Path(__file__).parent/'model'/'classificationModel.sav'
with open(pkl_filename, 'rb') as file:
pickle_model = pickle.load(file)
testsample=inputDataFileFrame[["Sentences"]]
labelencoder = LabelEncoder()
testsample["Sentences"] = labelencoder.fit_transform(testsample["Sentences"])
y_predicted = pickle_model.predict_proba(testsample)
df=pd.DataFrame({{"SectionName":np.nan,"Sentences":np.nan, "Predicted_Prob":y_predicted[:,1]}})
df['LabelByModel']=df['Predicted_Prob'].apply(lambda x: 0 if x <= 0.5 else 1 )
inputDataFileFrame['LabelByModel']= df['LabelByModel']
textToSum=""
for i in inputDataFileFrame.index:
if (inputDataFileFrame['LabelByModel'][i] or inputDataFileFrame['LabelByKw'][i]) :
textToSum=textToSum+" "+inputDataFileFrame["Sentences"][i]
stdir=r"{preTrainedModellocation}"
stdir = stdir.replace('\\\\', '\\\\\\\\')
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
modelbert = AutoModelForSeq2SeqLM.from_pretrained(stdir,local_files_only=True)
tokenizer = AutoTokenizer.from_pretrained(stdir,local_files_only=True)
inputs = tokenizer("summarize: " + textToSum, return_tensors="pt", max_length=512, truncation=True)
outputs = modelbert.generate(inputs["input_ids"], max_length=512, min_length=140, length_penalty=2.0, num_beams=4, early_stopping=True)
summarizedOutputOfSection= tokenizer.decode(outputs[0])
summarizedOutputOfSection=summarizedOutputOfSection.replace("</s>","")
summarizedOutputOfSection=summarizedOutputOfSection.replace("<s>","")
sumDatadata = [summarizedOutputOfSection]
df = pd.DataFrame(sumDatadata, columns=['textSum'])
df.to_csv(outputSumFile,encoding='utf-8')
outputjson = {{"status":"SUCCESS","msg":"Press Download button to download summarized output","data":summarizedOutputOfSection}}
print("predictions:",json.dumps(outputjson))
return (json.dumps(outputjson))
except KeyError as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
"""
return text
deploy_path = Path(deploy_path)
aion_prediction = deploy_path/'aion_predict.py'
with open(aion_prediction, 'w') as f:
f.write(create_predict(preTrainedModellocation))
|
recommender_code.py | #task 11190: Item based Recommender system---Usnish
import os
def generate_recommender_code(deployPath):
code = """
import pandas as pd
import numpy as np
import os
ITEMID = 'itemId'
DATA_FOLDER = 'data'
USER_ITEM_MATRIX = 'user_item_matrix.csv'
ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv'
RATING = 'rating'
SIMILARITY_SCORE = 'similarity_score'
class collaborative_filter(object):
def __init__(self):
self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0)
self.matrix.index.name = ITEMID
self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX))
self.item_similarity_cosine.index.name = ITEMID
self.item_similarity_cosine.columns.name = ITEMID
def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5):
import operator
if not isinstance(picked_userid,str):
picked_userid = str(picked_userid)
if picked_userid not in self.matrix.columns:
raise KeyError("UserID Does Not Exist")
# Movies that the target user has not watched
try:
picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index()
picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist()
# Movies that the target user has watched
picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \
.sort_values(ascending=False)) \
.reset_index() \
.rename(columns={picked_userid: 'rating'})
# Dictionary to save the unwatched movie and predicted rating pair
rating_prediction = {}
# Loop through unwatched movies
for picked_movie in picked_userid_unwatched:
if not isinstance(picked_movie,str):
picked_movie = str(picked_movie)
# Calculate the similarity score of the picked movie with other movies
try:
picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename(
columns={picked_movie: SIMILARITY_SCORE})
# Rank the similarities between the picked user watched movie and the picked unwatched movie.
picked_userid_watched_similarity = pd.merge(left=picked_userid_watched,
right=picked_movie_similarity_score,
on=ITEMID,
how='inner') \
.sort_values(SIMILARITY_SCORE, ascending=False)[
:number_of_similar_items]
# Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user
try:
predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6)
except Exception as e:
predicted_rating = 0
# Save the predicted rating in the dictionary
rating_prediction[picked_movie] = predicted_rating
except Exception as e:
rating_prediction[picked_movie] = 0
# Return the top recommended movies
return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations]
except Exception as e:
print(e)
raise KeyError(str(e))
def predict(self,X):
predictions = []
for index,row in X.iterrows():
score = self.item_based_rec(int(row["uid"]),int(row["numberOfRecommendation"]))
df = pd.DataFrame(score,columns=['ItemId','Ratings'])
predictions.append(df)
return predictions"""
filename = os.path.join(deployPath, 'script', 'item_recommendation.py')
# print(deploy_path)
f = open(filename, "wb")
f.write(str(code).encode('utf8'))
f.close()
|
aion_prediction.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
class aionPrediction:
def __init__(self):
self.log = logging.getLogger('eion')
def create_optimus_prediction_file (self,classname,deploy_path,learner_type):
self.predictionFile = 'import warnings'
self.predictionFile += '\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\n'
self.predictionFile += 'import json'
self.predictionFile += '\n'
self.predictionFile += 'import os'
self.predictionFile += '\n'
self.predictionFile += 'import sys'
self.predictionFile += '\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\n'
self.predictionFile += 'from importlib import import_module'
self.predictionFile += '\n'
self.predictionFile += 'import importlib.util'
self.predictionFile += '\n'
self.predictionFile += 'class prediction:'
self.predictionFile += '\n'
self.predictionFile += ' def predict_from_json(self,json_data):'
self.predictionFile += '\n'
self.predictionFile += ' data = json.loads(json_data)'
self.predictionFile += '\n'
self.predictionFile += ' output=self.predict(data)'
self.predictionFile += '\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\n'
self.predictionFile += '\n'
self.predictionFile += ' def predict_from_file(self,filename):'
self.predictionFile += '\n'
self.predictionFile += ' with open(filename,\'r\',encoding=\'utf-8\') as f:'
self.predictionFile += '\n'
self.predictionFile += ' data = json.load(f)'
self.predictionFile += '\n'
self.predictionFile += ' output=self.predict(data)'
self.predictionFile += '\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\n'
self.predictionFile += '\n'
self.predictionFile += ' def predict(self,json_data):'
self.predictionFile += '\n'
self.predictionFile += ' try:'
self.predictionFile += '\n'
#self.predictionFile += ' jsonData = json.loads(json_data)'
self.predictionFile += ' jsonData=json_data'
self.predictionFile += '\n'
self.predictionFile += ' model_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/trained_model.py")'
self.predictionFile += '\n'
self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)'
self.predictionFile += '\n'
self.predictionFile += ' model_obj.loader.exec_module(model)'
self.predictionFile += '\n'
#if(learner_type != 'TextML'):
self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/inputprofiler.py")'
self.predictionFile += '\n'
self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)'
self.predictionFile += '\n'
self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)'
self.predictionFile += '\n'
self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/selector.py")'
self.predictionFile += '\n'
self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)'
self.predictionFile += '\n'
self.predictionFile += ' selector_obj.loader.exec_module(selector)'
self.predictionFile += '\n'
self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/output_format.py")'
self.predictionFile += '\n'
self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)'
self.predictionFile += '\n'
self.predictionFile += ' output_format_obj.loader.exec_module(output_format)'
self.predictionFile += '\n'
self.predictionFile += ' df = json_normalize(jsonData)'
self.predictionFile += '\n'
self.predictionFile += ' df0 = df.copy()'
self.predictionFile += '\n'
#if(learner_type != 'TextML'):
self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()'
self.predictionFile += '\n'
self.predictionFile += ' df = profilerobj.apply_profiler(df)'
self.predictionFile += '\n'
self.predictionFile += ' selectobj = selector.selector()'
self.predictionFile += '\n'
self.predictionFile += ' df = selectobj.apply_selector(df)'
self.predictionFile += '\n'
self.predictionFile += ' output = model.trained_model().predict(df,"")'
self.predictionFile += '\n'
self.predictionFile += ' outputobj = output_format.output_format()'
self.predictionFile += '\n'
self.predictionFile += ' output = outputobj.apply_output_format(df0,output)'
#self.predictionFile += '\n'
#self.predictionFile += ' print(output)'
self.predictionFile += '\n'
self.predictionFile += ' return output'
self.predictionFile += '\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' return json.dumps(output)'
self.predictionFile += '\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' return json.dumps(output)'
self.predictionFile += '\n'
self.predictionFile += '\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\n'
self.predictionFile += ' predictobj = prediction()'
self.predictionFile += '\n'
self.predictionFile += ' predictobj.predict_from_file(sys.argv[1])'
self.predictionFile += '\n'
filename = os.path.join(deploy_path,'prediction.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_text_drift_file(self,deploy_path,features,target,model_type): #task-14549
self.predictionFile = 'import warnings'
self.predictionFile += '\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\n'
self.predictionFile += 'import json'
self.predictionFile += '\n'
self.predictionFile += 'import os'
self.predictionFile += '\n'
self.predictionFile += 'import sys'
self.predictionFile += '\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\n'
self.predictionFile += 'from monitoring import check_drift'
self.predictionFile += '\n'
self.predictionFile += 'def drift(data):'
self.predictionFile += '\n'
self.predictionFile += ' try:'
self.predictionFile += '\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\n'
self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:'
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\n'
self.predictionFile += ' else:'
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\n'
self.predictionFile += ' jsonData[\'features\'] = \''+",".join([feature for feature in features])+'\''
self.predictionFile += '\n'
self.predictionFile += ' jsonData[\'target\'] = \''+target+'\''
self.predictionFile += '\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += ' htmlfilepath=evidently_details(jsonData)'
self.predictionFile += '\n'
else:
self.predictionFile += ' htmlfilepath=\'\''
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.dumps(jsonData)'
self.predictionFile += '\n'
self.predictionFile += ' output = check_drift(jsonData)'
self.predictionFile += '\n'
self.predictionFile += ' output = json.loads(output)'
self.predictionFile += '\n'
self.predictionFile += ' output[\'htmlPath\'] = str(htmlfilepath)'
self.predictionFile += '\n'
self.predictionFile += ' print("drift:", json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += 'def evidently_details(deployJson):'
self.predictionFile += '\n'
self.predictionFile += ' features = deployJson[\'features\'].split(\',\')'
self.predictionFile += '\n'
self.predictionFile += ' target = deployJson[\'target\']'
self.predictionFile += '\n'
self.predictionFile += """\
try:
from evidently.report import Report
from evidently.metrics import TextDescriptorsDriftMetric, ColumnDriftMetric
from evidently.pipeline.column_mapping import ColumnMapping
from sklearn.preprocessing import LabelEncoder
historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?'])
currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?'])
historicaldataFrame.columns = historicaldataFrame.columns.str.strip()
currentdataFrame.columns = currentdataFrame.columns.str.strip()
hdf = historicaldataFrame.dropna(subset=features)
cdf = currentdataFrame.dropna(subset=features)
hdf['Text_Features'] = hdf[features].apply("-".join, axis=1)
cdf['Text_Features'] = cdf[features].apply("-".join, axis=1)
hdf['target'] = historicaldataFrame[target]
cdf['target'] = currentdataFrame[target]
le = LabelEncoder()
le.fit(hdf['target'])
hdf['target'] = le.transform(hdf['target'])
le.fit(cdf['target'])
cdf['target'] = le.transform(cdf['target'])
hd = hdf[['Text_Features', 'target']]
cd = cdf[['Text_Features', 'target']]
column_mapping = ColumnMapping()
column_mapping.target = 'target'
column_mapping.prediction = 'target'
column_mapping.text_features = ['Text_Features']
column_mapping.numerical_features = []
column_mapping.categorical_features = []
performance_report = Report(metrics=[ColumnDriftMetric('target'),TextDescriptorsDriftMetric(column_name='Text_Features')])
performance_report.run(reference_data=hd, current_data=cd,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),"log","My_report.html")
performance_report.save_html(report)
return(report)
except Exception as e:
print('Error: ', e)
return('NA')"""
self.predictionFile += '\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\n'
self.predictionFile += ' output = drift(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_ipdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_drift_file(self,deploy_path,features,target,model_type):
self.predictionFile = 'import warnings'
self.predictionFile += '\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\n'
self.predictionFile += 'import json'
self.predictionFile += '\n'
self.predictionFile += 'import os'
self.predictionFile += '\n'
self.predictionFile += 'import sys'
self.predictionFile += '\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\n'
self.predictionFile += 'from monitoring import check_drift'
self.predictionFile += '\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\n'
self.predictionFile += 'from script.inputprofiler import inputprofiler'
self.predictionFile += '\n'
self.predictionFile += 'def drift(data):'
self.predictionFile += '\n'
self.predictionFile += ' try:'
self.predictionFile += '\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\n'
self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:'
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\n'
self.predictionFile += ' else:'
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\n'
self.predictionFile += ' jsonData[\'features\'] = \''+",".join([feature for feature in features])+'\''
self.predictionFile += '\n'
self.predictionFile += ' jsonData[\'target\'] = \''+target+'\''
self.predictionFile += '\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += ' htmlfilepath=evidently_details(jsonData)'
self.predictionFile += '\n'
else:
self.predictionFile += ' htmlfilepath=\'\''
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.dumps(jsonData)'
self.predictionFile += '\n'
self.predictionFile += ' output = check_drift(jsonData)'
self.predictionFile += '\n'
self.predictionFile += ' output = json.loads(output)'
self.predictionFile += '\n'
self.predictionFile += ' output[\'htmlPath\'] = str(htmlfilepath)'
self.predictionFile += '\n'
self.predictionFile += ' output = json.dumps(output)'
self.predictionFile += '\n'
self.predictionFile += ' print("drift:",output)'
self.predictionFile += '\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += 'def evidently_details(deployJson):'
self.predictionFile += '\n'
self.predictionFile += ' features = deployJson[\'features\'].split(\',\')'
self.predictionFile += '\n'
self.predictionFile += ' target = deployJson[\'target\']'
self.predictionFile += '\n'
self.predictionFile += """\
try:
from evidently.report import Report
from evidently.metric_preset import DataDriftPreset
historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?'])
currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?'])
historicaldataFrame.columns = historicaldataFrame.columns.str.strip()
currentdataFrame.columns = currentdataFrame.columns.str.strip()
profilerobj = inputprofiler()
historicaldataFramep = profilerobj.run(historicaldataFrame)
currentdataFramep = profilerobj.run(currentdataFrame)
hdf = historicaldataFramep[features]
cdf = currentdataFramep[features]
hdf['target'] = historicaldataFrame[target]
cdf['target'] = currentdataFrame[target]
data_drift_report = Report(metrics = [DataDriftPreset()])
data_drift_report.run(reference_data=hdf,current_data=cdf,column_mapping = None)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','my_report.html')
data_drift_report.save_html(report)
return(report)
except Exception as e:
print('Error')
return('NA')"""
self.predictionFile += '\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\n'
self.predictionFile += ' output = drift(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_ipdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_prediction_file(self,classname,deploy_path,learner_type,grouperbyjson,rowfilterexpression,model_type,datetimeFeature):
self.predictionFile = 'import warnings'
self.predictionFile += '\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\n'
self.predictionFile += 'import json'
self.predictionFile += '\n'
self.predictionFile += 'import os'
self.predictionFile += '\n'
self.predictionFile += 'import sys'
self.predictionFile += '\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\n'
if(learner_type.lower() != 'recommendersystem'): #task 11190
self.predictionFile += 'from script.selector import selector'
self.predictionFile += '\n'
self.predictionFile += 'from script.inputprofiler import inputprofiler'
self.predictionFile += '\n'
#self.predictionFile += 'from '+classname+' import '+classname
self.predictionFile += 'from script.trained_model import trained_model'
self.predictionFile += '\n'
else:
self.predictionFile += 'from script.item_recommendation import collaborative_filter'
self.predictionFile += '\n'
self.predictionFile += 'from script.output_format import output_format'
self.predictionFile += '\n'
if (learner_type != 'RecommenderSystem'): #task 11190
self.predictionFile += 'profilerobj = inputprofiler()'
self.predictionFile += '\n'
self.predictionFile += 'selectobj = selector()'
self.predictionFile += '\n'
self.predictionFile += 'modelobj = trained_model()'
self.predictionFile += '\n'
else:
self.predictionFile += 'colabobj = collaborative_filter()'
self.predictionFile += '\n'
self.predictionFile += 'outputobj = output_format()'
self.predictionFile += '\n'
self.predictionFile += 'def predict(data):'
self.predictionFile += '\n'
self.predictionFile += ' try:'
self.predictionFile += '\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".tsv":'
self.predictionFile += '\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',sep=\'\\t\',skipinitialspace = True,na_values=[\'-\',\'?\'])'
self.predictionFile += '\n'
self.predictionFile += ' elif os.path.splitext(data)[1] == ".csv":'
self.predictionFile += '\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',skipinitialspace = True,na_values=[\'-\',\'?\'])'
self.predictionFile += '\n'
self.predictionFile += ' elif os.path.splitext(data)[1] == ".dat":'
self.predictionFile += '\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',skipinitialspace = True,na_values=[\'-\',\'?\'])'
self.predictionFile += '\n'
self.predictionFile += ' else:'
self.predictionFile += '\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\n'
self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:'
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\n'
self.predictionFile += ' else:'
self.predictionFile += '\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\n'
self.predictionFile += ' df = json_normalize(jsonData)'
self.predictionFile += '\n'
self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)'
self.predictionFile += '\n'
if str(rowfilterexpression) != '':
self.predictionFile += ' filterexpression = "'+rowfilterexpression+'"'
self.predictionFile += '\n'
self.predictionFile += ' df = df.query(filterexpression)'
self.predictionFile += '\n'
#print(grouperbyjson)
if str(grouperbyjson) != '':
datetime = grouperbyjson['datetime']
unit = grouperbyjson['unit']
if unit == '':
self.predictionFile += ' df[\'date\'] = pd.to_datetime(df[\''+datetime+'\'])'
self.predictionFile += '\n'
else:
self.predictionFile += ' df[\'date\'] = pd.to_datetime(df[\''+datetime+'\'],unit=\''+unit+'\')'
self.predictionFile += '\n'
self.predictionFile += ' df = df.reset_index()'
self.predictionFile += '\n'
self.predictionFile += ' df.set_index(\'date\',inplace=True)'
self.predictionFile += '\n'
self.predictionFile += ' df = df.'+grouperbyjson['groupbystring']
self.predictionFile += '\n'
self.predictionFile += ' df.columns = df.columns.droplevel(0)'
self.predictionFile += '\n'
self.predictionFile += ' df = df.reset_index()'
self.predictionFile += '\n'
self.predictionFile += ' df0 = df.copy()'
self.predictionFile += '\n'
if(learner_type != 'RecommenderSystem'): #task 11190
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)'
self.predictionFile += '\n'
else:
self.predictionFile += ' df = profilerobj.apply_profiler(df)'
self.predictionFile += '\n'
self.predictionFile += ' df = selectobj.apply_selector(df)'
self.predictionFile += '\n'
#self.predictionFile += ' modelobj = '+classname+'()'
self.predictionFile += ' output = modelobj.predict(df,"")'
self.predictionFile += '\n'
else:
self.predictionFile += ' output = colabobj.predict(df)'
self.predictionFile += '\n'
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)'
self.predictionFile += '\n'
else:
self.predictionFile += ' output = outputobj.apply_output_format(df0,output)'
self.predictionFile += '\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' print("predictions:",json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
self.predictionFile += '\n'
self.predictionFile += ' print("predictions:",json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\n'
self.predictionFile += ' output = predict(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_predict.py')
f = open(filename, "w")
f.write(str(self.predictionFile))
f.close()
def create_classification_text_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
# from evidently.dashboard import Dashboard
# from evidently.tabs import ClassificationPerformanceTab
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
from evidently.report import Report
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.metric_preset import ClassificationPreset
def odrift(data):
try:
"""
self.predictionFile += ' features = \''+features+'\''
self.predictionFile += '\n'
self.predictionFile += ' target = \''+target+'\''
self.predictionFile += '\n'
self.predictionFile +="""\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.text_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()])
iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_classification_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from evidently.report import Report
from evidently.metric_preset import ClassificationPreset
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
def odrift(data):
try:
"""
self.predictionFile += ' features = \''+features+'\''
self.predictionFile += '\n'
self.predictionFile += ' target = \''+target+'\''
self.predictionFile += '\n'
self.predictionFile +="""\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
model_performance_dashboard = Report(metrics = [ClassificationPreset()])
model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
model_performance_dashboard.save_html(report)
metrics_output = model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_model_service(self,deploy_path,serviceName,problemType):
filedata = """
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from aion_predict import predict"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
from aion_xai import local_analysis
from aion_ipdrift import drift
from aion_opdrift import odrift"""
filedata += """
import json
import os
import pandas as pd
import io
import argparse
from pathlib import Path
from flask_cors import CORS, cross_origin
app = Flask(__name__)
#cross origin resource from system arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ipaddress', help='IP Address')
parser.add_argument('-p', '--port', help='Port Number')
parser.add_argument("-cors", type=str, required=False)
d = vars(parser.parse_args())
modelPath = Path(__file__).parent
try:
with open( (modelPath/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
disp_data = {}
is_explainable = True
if "cors" in d.keys():
if d["cors"] != '' and d["cors"] != None:
d["cors"] = [s.strip() for s in d["cors"].split(",")]
#cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}})
cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}})
api = Api(app)
class predictapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
output = predict().run(json.dumps(data))
return jsonify(json.loads(output))
class predictfileapi(Resource):
def post(self):
if 'file' in request.files:
file = request.files['file']
urlData = file.read()
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
output = predict().run(data)
return jsonify(json.loads(output))
else:
displaymsg='File is mising'
return jsonify(displaymsg)
def get(self):
msg=\"""
RequestType: POST
Body:send file content in body\"""
return jsonify(msg)
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
class explainapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
if is_explainable:
output = local_analysis(json.dumps(data))
else:
output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"})
return jsonify(json.loads(output))
class monitoringapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
data = request.get_json()
output = drift(json.dumps(data))
return jsonify(json.loads(output))
class performanceapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
data = request.get_json()
output = odrift(json.dumps(data))
return jsonify(json.loads(output))
"""
filedata += """
api.add_resource(predictapi, '/AION/{serviceName}/predict')""".format(serviceName=serviceName)
filedata += """
api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')""".format(serviceName=serviceName)
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
api.add_resource(explainapi, '/AION/{serviceName}/explain')
api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring')
api.add_resource(performanceapi, '/AION/{serviceName}/performance')""".format(serviceName=serviceName)
filedata += """
if __name__ == '__main__':
args = parser.parse_args()
app.run(args.ipaddress,port = args.port,debug = True)"""
filename = os.path.join(deploy_path,'aion_service.py')
f = open(filename, "wb")
f.write(str(filedata).encode('utf8'))
f.close()
def create_regression_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from evidently.report import Report
from evidently.metric_preset import RegressionPreset
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
def odrift(data):
try:
"""
self.predictionFile += ' features = \''+features+'\''
self.predictionFile += '\n'
self.predictionFile += ' target = \''+target+'\''
self.predictionFile += '\n'
self.predictionFile +="""\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[RegressionPreset()])
iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_regression_text_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from aion_predict import predict
from evidently.report import Report
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.metric_preset import RegressionPreset
def odrift(data):
try:
"""
self.predictionFile += ' features = \''+features+'\''
self.predictionFile += '\n'
self.predictionFile += ' target = \''+target+'\''
self.predictionFile += '\n'
self.predictionFile +="""\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[RegressionPreset()])
iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_publish_service(self,datalocation,usecaseid,version,problemType):
filename = os.path.join(datalocation,'aion_publish_service.py')
if not os.path.exists(filename):
filedata = """
import sys
import json
import time
import sqlite3
import argparse
import pandas as pd
import io
from pathlib import Path
from datetime import datetime
filename = Path(__file__).parent/'config.json'
with open (filename, "r") as f:
data = json.loads(f.read())
modelVersion = str(data['version'])
modelPath = Path(__file__).parent/modelVersion
sys.path.append(str(modelPath))
try:
with open( (modelPath/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
disp_data = {}
is_explainable = True
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from flask_cors import CORS, cross_origin
from flask import Response
from aion_predict import predict
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
from aion_ipdrift import drift
from aion_opdrift import odrift
if is_explainable:
from aion_xai import local_analysis
"""
filedata += """
dataPath = Path(__file__).parent/'data'
dataPath.mkdir(parents=True, exist_ok=True)
app = Flask(__name__)
#cross origin resource from system arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ipaddress', help='IP Address')
parser.add_argument('-p', '--port', help='Port Number')
parser.add_argument("-cors", type=str, required=False)
d = vars(parser.parse_args())
if "cors" in d.keys():
if d["cors"] != '' and d["cors"] != None:
d["cors"] = [s.strip() for s in d["cors"].split(",")]
#cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}})
cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}})
api = Api(app)
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name,condition=''):
if condition == '':
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
else:
return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def update(self,table_name,updates,condition):
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()"""
filedata += """
app = Flask(__name__)
api = Api(app)
class predictapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('metrices'):
data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('metrices',data.columns, data.dtypes)
data = request.get_json()
output = predict().run(json.dumps(data))
outputobj = json.loads(output)
if outputobj['status'] == 'SUCCESS':
try:
df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
except:
pass
try:
data = sqlite_dbObj.read('metrices')
#print(data)
if len(data) == 0:
data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}]
data = pd.read_json(json.dumps(data), orient ='records')
sqlite_dbObj.write(data,'metrices')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0")
except Exception as e:
print(e)
pass
return jsonify(json.loads(output))
class predictfileapi(Resource):
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('metrices'):
data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('metrices',data.columns, data.dtypes)
if 'file' in request.files:
file = request.files['file']
urlData = file.read()
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
output = predict().run(data)
outputobj = json.loads(output)
if outputobj['status'] == 'SUCCESS':
try:
df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
except:
pass
try:
data = sqlite_dbObj.read('metrices')
#print(data)
if len(data) == 0:
data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}]
data = pd.read_json(json.dumps(data), orient ='records')
sqlite_dbObj.write(data,'metrices')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0")
except Exception as e:
print(e)
pass
return jsonify(json.loads(output))
else:
output = {'status':'error','msg':'File is missing'}
return jsonify(output)
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
class explainapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
if is_explainable:
output = local_analysis(json.dumps(data))
else:
output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"})
return jsonify(json.loads(output))
class monitoringapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('monitoring'):
data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz'
if not sqlite_dbObj.table_exists('prodData'):
return jsonify({'status':'Error','msg':'Prod data not available'})
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = dataPath/('AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
data = request.get_json()
data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
output = drift(json.dumps(data))
outputData = json.loads(output)
status = outputData['status']
if status == 'SUCCESS':
Msg = str(outputData['data'])
else:
Msg = 'Error during drift analysis'
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.write(data,'monitoring')
return jsonify(json.loads(output))"""
filedata += """
class matricesapi(Resource):
def get(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if sqlite_dbObj.table_exists('metrices'):
df1 = sqlite_dbObj.read('metrices')
else:
df1 = pd.DataFrame()
#print(df1)
if sqlite_dbObj.table_exists('monitoring'):
df2 = sqlite_dbObj.read('monitoring')
else:
df2 = pd.DataFrame()
msg = {'Deployed Version':str(modelVersion)}
if df1.shape[0] > 0:
msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])})
else:
msg.update({'noOfPredictCalls':'0'})
driftDetails = []
for idx in reversed(df2.index):
driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])}
driftDetails.append(driftd)
msg.update({'driftDetails':driftDetails})
return jsonify(msg)
class performanceapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('monitoring'):
data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz'
if not sqlite_dbObj.table_exists('prodData'):
return jsonify({'status':'Error','msg':'Prod data not available'})
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = dataPath/('AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
data = request.get_json()
data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
output = odrift(json.dumps(data))
return jsonify(json.loads(output))
"""
filedata += """
api.add_resource(predictapi, '/AION/{serviceName}/predict')
api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')
api.add_resource(matricesapi, '/AION/{serviceName}/metrices')""".format(serviceName=usecaseid)
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
api.add_resource(explainapi, '/AION/{serviceName}/explain')
api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring')
api.add_resource(performanceapi, '/AION/{serviceName}/performance')
""".format(serviceName=usecaseid)
filedata += """
if __name__ == '__main__':
args = parser.parse_args()
app.run(args.ipaddress,port = args.port,debug = True)"""
f = open(filename, "wb")
f.write(str(filedata).encode('utf8'))
f.close()
data = {'version':version}
filename = os.path.join(datalocation,'config.json')
with open(filename, "w") as outfile:
json.dump(data, outfile)
outfile.close() |
utility.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
TAB_CHAR = ' ' * 4
def import_modules(importer, modules_list):
for module in modules_list:
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
|
imports.py | """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\n"
else:
require += f"{mod}=={version(mod)}\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\n\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\n\n#local modules" + local_modules + '\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
|
EncryptPythonSourceCode.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import subprocess
import os
import glob
import sys
import python_minifier
def encrypt_files(path):
cwd = os.getcwd()
secure_path = os.path.join(path,'SecuredScripts')
try:
if not os.path.exists(secure_path):
os.mkdir(secure_path)
files = [f for f in glob.glob(path + "/*.py")]
for file in files:
#encrypted_file_details[0] = file
#file = files[0]
#print(file)
#filename_w_dir = os.path.splitext(file)
filename_w_ext = os.path.basename(file)
filename, file_extension = os.path.splitext(filename_w_ext)
file_folder_path = os.path.join(secure_path,filename)
#print(file_folder_path)
if not os.path.exists(file_folder_path):
os.mkdir(file_folder_path)
# Minify python source code
minify_file = os.path.join(file_folder_path,filename+'_minify.py')
pythonfolder,_ = os.path.split(sys.executable)
pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe')
minify_command = "\""+sys.executable+"\" \""+pyminify_script+ "\" \"" + file + "\" > \"" + minify_file+"\""
subprocess.call(minify_command, shell=True)
# Change directory to folder path
os.chdir(file_folder_path)
# Obfuscate minified file
pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe')
obfusc_commmand = "\""+sys.executable+"\" \""+pyarmor_script+"\" obfuscate \"" + minify_file+"\""
#print(obfusc_commmand)
subprocess.call(obfusc_commmand, shell=True)
# Change directory to dist path
obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py')
#print(obfusc_file)
chdirpath = os.path.join(file_folder_path,'dist')
os.chdir(chdirpath)
# Compress obfuscated file
compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py')
#print(compressed_file)
pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe')
compress_command = "\""+sys.executable+"\" \""+pyminifier_script+"\" --gzip -o \"" +compressed_file + "\" \"" + obfusc_file+"\""
#print(compress_command)
subprocess.call(compress_command, shell=True)
#compile_command = sys.executable+'-m py_compile "' + compressed_file+'"'
#print(compile_command)
#subprocess.call(compile_command , shell=True)
#encrypted_file_details['compiled_file'] = file
#compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc')
#encrypted_file_details[1] = compiled_file
#encrypted_file_list.append(encrypted_file_details)
#encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc'
#encrypt_command = "python " + cwd + "\\Encrypt_Key_Dcrypt.py " + compiled_file + ' ' + encrypted_file + " --g -e"
#print(encrypt_command)
#subprocess.call(encrypt_command, shell=True)
#encrypted_file_list += ']'
#return(encrypted_file_list)
os.chdir(path)
except OSError as err:
print ("Creation of the directory %s failed "+str(err))
# Driver function
if __name__=="__main__":
path = sys.argv[1]
encrypt_files(path)
#(base) C:\Himanshu\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py
#Obfuscate
#(base) C:\Himanshu\DataPreprocessing>pyarmor obfuscate C:\Himanshu\DataPreprocessing\DataPreprocessing_minify.py
#Compression
#(base) C:\Himanshu\DataPreprocessing>pyminifier --gzip -o C:\Himanshu\DataPreprocessing\dist\DataPreprocessing_compressed.py C:\Himanshu\DataPreprocessing\dist\DataPreprocessing_minify.py
#(base) C:\Himanshu\DataPreprocessing>cd dist
#(base) C:\Himanshu\DataPreprocessing\dist>python DataPreprocessing_compressed.py "DocumentText" "Label" 90 ".csv" "C:\Himanshu\DataAcquisition\ClassificationDataNewBalanced.csv"
#Compiling compressed .py to .pyc file
#(base) C:\Himanshu\DataPreprocessing\dist>python -m py_compile DataPreprocessing_compressed.py
#Encrypt .pyc file
#(base) C:\Himanshu\DataPreprocessing\dist>python C:\Himanshu\Encrypt_Key_Dcrypt.py C:\Himanshu\DataPreprocessing\dist\__pycache__\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e
#Decrypt file
#(base) C:\Himanshu\DataPreprocessing\dist>python C:\Himanshu\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d
#Run decrypted file
#(base) C:\Himanshu\DataPreprocessing\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc "DocumentText" "Label" 90 ".csv" "C:\Himanshu\DataAcquisition\ClassificationDataNewBalanced.csv" |
create_docker.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import shutil
import subprocess
from os.path import expanduser
import platform
deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target')
modelname='AION_12'
version='1'
def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata):
modelPath = os.path.join(deploymentfolder)
filename = os.path.join(deploymentfolder,'docker_image')
modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py')
shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh')
aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl')
drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl')
sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages')
model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version)
docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version)
model_sitepackage = os.path.join(model_dockerSetup,'site-packages')
model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py')
model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh')
model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl')
model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl')
try:
os.mkdir(model_dockerSetup)
except Exception as e:
print("Error in creating Setup directpry "+str(e))
pass
shutil.copytree(modelPath, docker_setup)
if textdata:
shutil.copytree(sitepackage, model_sitepackage)
modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing')
'''
try:
os.makedirs(modelpretrainpath, exist_ok=True)
except Exception as e:
print("Error in creating Setup directpry "+str(e))
pass
'''
home = expanduser("~")
if platform.system() == 'Windows':
hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing')
else:
hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing')
shutil.copytree(hostpretrainpath, modelpretrainpath)
shutil.copyfile(modelservice, model_dockerSetupservicefile)
shutil.copyfile(shellscript, model_dockershellscript)
shutil.copyfile(aix, model_aix)
shutil.copyfile(drift,model_drift)
try:
os.mkdir(filename)
except:
pass
requirementfilename = os.path.join(model_dockerSetup,'requirements.txt')
installfilename = os.path.join(model_dockerSetup,'install.py')
dockerfile = os.path.join(model_dockerSetup,'Dockerfile')
dockerdata='FROM python:3.8-slim-buster'
dockerdata+='\n'
if textdata:
dockerdata+='WORKDIR /root'
dockerdata+='\n'
dockerdata+='COPY HCLT HCLT'
dockerdata+='\n'
dockerdata+='WORKDIR /app'
dockerdata+='\n'
dockerdata+='COPY requirements.txt requirements.txt'
dockerdata+='\n'
dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version
dockerdata+='\n'
if textdata:
dockerdata+='COPY site-packages site-packages'
dockerdata+='\n'
dockerdata+='COPY install.py install.py'
dockerdata+='\n'
dockerdata+='COPY run_modelService.py run_modelService.py'
dockerdata+='\n'
dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl'
dockerdata+='\n'
dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl'
dockerdata+='\n'
dockerdata+='COPY start_modelservice.sh start_modelservice.sh'
dockerdata+='\n'
if textdata:
dockerdata+='''RUN apt-get update \
&& apt-get install -y build-essential manpages-dev \
&& python -m pip install --no-cache-dir --upgrade pip \
&& python -m pip install --no-cache-dir pandas==1.2.4 \
&& python -m pip install --no-cache-dir numpy==1.19.5 \
&& python -m pip install --no-cache-dir joblib==1.0.1 \
&& python -m pip install --no-cache-dir Cython==0.29.23 \
&& mv site-packages/* /usr/local/lib/python3.8/site-packages \
&& python -m pip install --no-cache-dir scipy==1.6.3 \
&& python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \
&& python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \
&& python -m pip install --no-cache-dir scikit-learn==0.24.2 \
&& python -m pip install --no-cache-dir spacy==2.2.3 \
&& python -m pip install --no-cache-dir nltk==3.6.2 \
&& python -m pip install --no-cache-dir textblob==0.15.3 \
&& python -m pip install --no-cache-dir gensim==3.8.3 \
&& python -m pip install --no-cache-dir demoji==1.1.0 \
&& python -m pip install --no-cache-dir lxml==4.6.3 \
&& python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \
&& python -m pip install --no-cache-dir Unidecode==1.2.0 \
&& python -m pip install --no-cache-dir pyspellchecker==0.6.2 \
&& python -m pip install --no-cache-dir pycontractions==2.0.1 \
&& python -m pip install --no-cache-dir tensorflow==2.4.1 \
&& python -m pip install --no-cache-dir nltk==3.6.2 \
&& python -m pip install --no-cache-dir -r requirements.txt \
&& python install.py \
&& chmod +x start_modelservice.sh
ENTRYPOINT ["./start_modelservice.sh"]
'''
else:
dockerdata+='''RUN apt-get update \
&& apt-get install -y build-essential manpages-dev \
&& python -m pip install --no-cache-dir --upgrade pip \
&& python -m pip install --no-cache-dir pandas==1.2.4 \
&& python -m pip install --no-cache-dir numpy==1.19.5 \
&& python -m pip install --no-cache-dir joblib==1.0.1 \
&& python -m pip install --no-cache-dir Cython==0.29.23 \
&& python -m pip install --no-cache-dir scipy==1.6.3 \
&& python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \
&& python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \
&& python -m pip install --no-cache-dir scikit-learn==0.24.2 \
&& python -m pip install --no-cache-dir -r requirements.txt \
&& chmod +x start_modelservice.sh
ENTRYPOINT ["./start_modelservice.sh"]
'''
f = open(dockerfile, "w")
f.write(str(dockerdata))
f.close()
requirementdata=''
requirementdata+='word2number==1.1'
if learner_type == 'DL':
requirementdata+='\n'
requirementdata+='tensorflow==2.5.0'
f = open(requirementfilename, "w")
f.write(str(requirementdata))
f.close()
if textdata:
installfile='''
import nltk
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')'''
f = open(installfilename, "w")
f.write(str(installfile))
f.close()
try:
command = 'docker pull python:3.8-slim-buster'
os.system(command);
#subprocess.check_call(["chmod", "+x", "start_modelservice.sh"], cwd=model_dockerSetup)
subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+version,"."], cwd=model_dockerSetup)
subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+version+".tar",modelname.lower()+":"+version], cwd=model_dockerSetup)
dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+"_"+version+".tar")
shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+"_"+version+".tar"))
shutil.rmtree(model_dockerSetup)
return 'Success','SUCCESSFULLY'
except Exception as e:
print("Error: "+str(e))
shutil.rmtree(model_dockerSetup)
return 'Error',str(e)
#createDockerImage(deploymentfolder,modelname,version) |
requirements.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from importlib.metadata import version
import sys
import os
def requirementfile(deploy_path,model,textFeatures,learner_type):
print('hola', model)
modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors']
requires = ''
for mod in modules:
requires += f"{mod}=={version(mod)}\n"
if len(textFeatures) > 0:
tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf']
for mod in tmodules:
requires += f"{mod}=={version(mod)}\n"
if model == 'Extreme Gradient Boosting (XGBoost)':
mmodules = ['xgboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model == 'Light Gradient Boosting (LightGBM)':
mmodules = ['lightgbm']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model == 'Categorical Boosting (CatBoost)':
mmodules = ['catboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'arima':
mmodules = ['pmdarima']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'fbprophet':
mmodules = ['prophet']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL':
mmodules = ['tensorflow']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833
mmodules = ['lifelines']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
if model.lower() == 'sentencetransformer': #bug 12833
mmodules = ['sentence_transformers']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\n"
filename = os.path.join(deploy_path,'requirements.txt')
f = open(filename, "wb")
f.write(str(requires).encode('utf8'))
f.close()
|
eion_compress.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import sys
import subprocess
import glob
import shutil
import time
from aion_deployment.EncryptPythonSourceCode import encrypt_files
import json
def encrypt(alldirs):
for dir in alldirs:
try:
encrypt_files(dir)
except Exception as error_obj:
print("Exception in encrypting", error_obj)
print("-"*50)
def replace_by_compressed(alldirs):
for dir in alldirs:
try:
#print("Processing dir", dir)
files = [f for f in glob.glob(dir + "/*.py")]
secure_path = os.path.join(dir, 'SecuredScripts')
time.sleep(6)
for file in files:
try:
filename_w_ext = os.path.basename(file)
filename, file_extension = os.path.splitext(filename_w_ext)
if filename == "__init__":
continue
#print("Processing file", file)
file_folder_path = os.path.join(secure_path, filename, 'dist')
compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py')
shutil.copy(compressed_file_path, dir)
os.remove(file)
new_compressed_file_path = os.path.join(dir, filename+'_compressed.py')
target_file_path = os.path.join(dir, filename_w_ext)
os.rename(new_compressed_file_path, target_file_path)
if filename == 'aion_prediction':
shutil.copytree(os.path.join(file_folder_path, 'pytransform'), os.path.join(dir, 'pytransform'))
except Exception as error_obj:
print("Exception in file ", error_obj)
shutil.rmtree(secure_path)
except Exception as error_obj:
print("Exception in dir ", error_obj)
def start_Obfuscate(path):
project_path = path
subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))]
alldirs = [
project_path,
]
for subdir in subdirs:
if(subdir != 'pytransform'):
alldirs.append(os.path.join(project_path, subdir))
encrypt(alldirs)
replace_by_compressed(alldirs)
if __name__=="__main__":
project_path = sys.argv[1]
print("project_path", project_path)
subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))]
alldirs = [
project_path,
]
for subdir in subdirs:
alldirs.append(os.path.join(project_path, subdir))
encrypt(alldirs)
print("*"*50)
replace_by_compressed(alldirs)
# python eion_compress.py "C:\Users\ashwani.s\Desktop\22April\22April\Mohita" "C:\Users\ashwani.s\Desktop\eion\eion" > logfile.log
|
production.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package.aion_prediction import aionPrediction
from AION.prediction_package.utility import TAB_CHAR
from AION.prediction_package import utility
from AION.prediction_package import common
from AION.prediction_package.base import deployer
def is_supported(problem_type, algo=None):
"""
Return True if problem_type supported otherwise False
"""
supported = ['classification','regression','clustering','timeseriesforecasting','Text Similarity']
return problem_type in supported
def get_deployer(problem_type, algo=None, params={}):
"""
Return deployer class object based on problem type
Raise error if no class is associated with problem type
"""
params['problem_type'] = problem_type
if problem_type == 'classification':
return classification( params)
elif problem_type == 'regression':
return regression( params)
elif problem_type == 'clustering':
return clustering( params)
elif problem_type == 'timeseriesforecasting':
from AION.prediction_package.time_series import forecasting
return forecasting.get_deployer( params)
elif problem_type == 'Text Similarity':
return textSimilarity( params)
else:
raise ValueError('deployment is not supported')
class classification( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'classification'
def create_idrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name)
else:
obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name)
def create_odrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_classification_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'])
else:
obj.create_classification_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'])
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')"""
run_code = f"""
def run(self, df):\
"""
if self.params['training']['algo'] in ['Neural Network']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = df.astype(np.float32)
return pd.DataFrame(np.argmax(self.model.predict(df),axis=1))
"""
elif self.params['training']['algo'] in ['Neural Architecture Search']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
self.importer.addModule(module='autokeras',mod_as='ak')
init_code += f"""
self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS)
"""
run_code += """
df = df.astype(np.float32)
return pd.DataFrame(self.model.predict(df))
"""
elif self.params['training']['algo'] in ['Deep Q Network','Dueling Deep Q Network']:
self.importer.addModule('joblib')
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='constant',mod_from='tensorflow')
self.importer.addModule(module='time_step',mod_from='tf_agents.trajectories')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
q, _ = self.model(np.array(df), step_type=constant([time_step.StepType.FIRST] * np.array(df).shape[0]), training=False)
return pd.DataFrame(q.numpy())
"""
elif self.params['training']['algo'] in ['Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']:
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = np.expand_dims(df, axis=2)
df = df.astype(np.float32)
return pd.DataFrame(np.argmax(self.model.predict(df),axis=1))
"""
else:
self.importer.addModule(module='joblib')
self.importer.addModule(module='numpy',mod_as='np')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
return pd.DataFrame(self.model.predict_proba(df), columns=self.model.classes_)
"""
return init_code, run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('joblib')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
output = round(output,2)
encoder_file = (Path(__file__).parent/"model")/"label_encoder.pkl"
if encoder_file.exists():
encoder = joblib.load(encoder_file)
output.rename(columns=dict(zip(output.columns, encoder.inverse_transform(list(output.columns)))), inplace=True)
raw_df['prediction'] = output.idxmax(axis=1)
raw_df['probability'] = output.max(axis=1).round(2)
raw_df['remarks'] = output.apply(lambda x: x.to_json(double_precision=2), axis=1)
outputjson = raw_df.to_json(orient='records',double_precision=5)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
class regression( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'regression'
def create_idrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name)
else:
obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name)
def create_odrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_regression_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'])
else:
obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'])
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
"""
run_code = f"""
def run(self, df):\
"""
if self.params['training']['algo'] in ['Neural Architecture Search']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
self.importer.addModule(module='autokeras',mod_as='ak')
init_code += f"""
self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS)
"""
run_code += """
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']:
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = np.expand_dims(df, axis=2)
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
else:
self.importer.addModule('joblib')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
return code + init_code + run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
raw_df['prediction'] = output[0]
raw_df['prediction'] = raw_df['prediction'].round(2)
outputjson = raw_df.to_json(orient='records',double_precision=5)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
class clustering( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'clustering'
def training_code( self):
self.importer.addModule('joblib')
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
"""
run_code = f"""
def run(self, df):\
"""
if self.params['training']['algo'] == 'DBSCAN':
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
return self.model.fit_predict(df)
"""
else:
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
return self.model.predict(df).reshape(1, -1)
"""
return code + init_code + run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
raw_df['prediction'] = output[0]
raw_df['prediction'] = raw_df['prediction'].round(2)
outputjson = raw_df.to_json(orient='records',double_precision=2)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
return code
if __name__ == '__main__':
config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}}
deployer = get_deployer('classification',params=config)
deployer.run( ) |
output_formatter.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
class outputFormatter:
def __init__(self):
self.log = logging.getLogger('eion')
self.log.info('========> Inside Output Formatter')
def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature):
self.output_formatfile = 'import json'
self.output_formatfile += '\n'
self.output_formatfile += 'import numpy as np'
self.output_formatfile += '\n'
self.output_formatfile += 'import pandas as pd'
self.output_formatfile += '\n'
self.output_formatfile += 'import os'
self.output_formatfile += '\n'
self.output_formatfile += 'from pathlib import Path'
self.output_formatfile += '\n'
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity'
self.output_formatfile += '\n'
self.output_formatfile += 'class output_format(object):'
self.output_formatfile += '\n'
if(model == 'VAR'):
self.output_formatfile += ' def invertTransformation(self,predictions):'
self.output_formatfile += '\n'
self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","data","trainingdata.csv"))'
self.output_formatfile += '\n'
self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount)
self.output_formatfile += '\n'
self.output_formatfile += ' targetFeature = "'+str(targetFeature)+'"'
self.output_formatfile += '\n'
self.output_formatfile += ' columns = targetFeature.split(",")'
self.output_formatfile += '\n'
self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)'
self.output_formatfile += '\n'
self.output_formatfile += ' for j in range(0,len(columns)):'
self.output_formatfile += '\n'
self.output_formatfile += ' for i in range(0, len(predictions)):'
self.output_formatfile += '\n'
self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)'
self.output_formatfile += '\n'
self.output_formatfile += ' prediction = pred'
self.output_formatfile += '\n'
self.output_formatfile += ' for col in columns:'
self.output_formatfile += '\n'
self.output_formatfile += ' if col in dictDiffCount:'
self.output_formatfile += '\n'
self.output_formatfile += ' if dictDiffCount[col]==2:'
self.output_formatfile += '\n'
self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()'
self.output_formatfile += '\n'
self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()'
self.output_formatfile += '\n'
self.output_formatfile += ' prediction = pred'
self.output_formatfile += '\n'
self.output_formatfile += ' return(prediction)'
self.output_formatfile += '\n'
self.log.info("op:modelType: \n"+str(modelType))
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
# if modelType == 'anomaly_detection':
self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):'
self.output_formatfile += '\n'
self.output_formatfile += ' try:'
self.output_formatfile += '\n'
self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) '
self.output_formatfile += '\n'
self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) '
self.output_formatfile += '\n'
self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() '
self.output_formatfile += '\n'
self.output_formatfile += ' except Exception as e:'
self.output_formatfile += '\n'
self.output_formatfile += ' print(f"find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} ")\n'
self.output_formatfile += ' return anomaly_info_df'
self.output_formatfile += '\n'
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
if (datetimeFeature!='' and datetimeFeature!='NA'):
self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' def apply_output_format(self,df,modeloutput):'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' def apply_output_format(self,df,modeloutput):'
self.output_formatfile += '\n'
if modelType.lower() == 'classification':
self.output_formatfile += ' modeloutput = round(modeloutput,2)'
self.output_formatfile += '\n'
if(learner_type == 'ImageClassification'):
if(str(output_label) != '{}'):
inv_mapping_dict = {v: k for k, v in output_label.items()}
self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict)
self.output_formatfile += '\n'
self.output_formatfile += ' predictions = []'
self.output_formatfile += '\n'
self.output_formatfile += ' for x in modeloutput:'
self.output_formatfile += '\n'
self.output_formatfile += ' x = le_dict[x]'
self.output_formatfile += '\n'
self.output_formatfile += ' predictions.append(x)'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' predictions=modeloutput'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = predictions'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\'records\')'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\n'
elif(learner_type == 'Text Similarity'):
self.output_formatfile += ' df[\'prediction\'] = np.where(modeloutput > '+str(threshold)+',1,0)'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'probability\'] = modeloutput'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\n'
elif(learner_type == 'TS'):
if(model == 'VAR'):
self.output_formatfile += ' modeloutput = self.invertTransformation(modeloutput)'
self.output_formatfile += '\n'
self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\',double_precision=2)'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}'
elif(model.lower() == 'fbprophet'):
self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\')'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}'
elif((model.lower() == 'lstm' or model.lower() == 'mlp') and len(features) >= 1):
self.output_formatfile += ' modeloutput = modeloutput.round(2)\n'
self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\')\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}\n'
else:
self.output_formatfile += ' modeloutput = modeloutput.round(2)'
self.output_formatfile += '\n'
self.output_formatfile += ' modeloutput = json.dumps(modeloutput.tolist())'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":eval(modeloutput)}'
self.output_formatfile += '\n'
elif(learner_type in ['RecommenderSystem','similarityIdentification','contextualSearch']):
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = modeloutput'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\n'
else:
if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'):
if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'):
if(str(output_label) != '{}'):
inv_mapping_dict = {v: k for k, v in output_label.items()}
self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict)
self.output_formatfile += '\n'
'''
if(model in ['SGDClassifier']):
self.output_formatfile += ' modeloutput = modeloutput.replace({"predict_class": le_dict})'
else:
self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)'
'''
if modelType != 'anomaly_detection':
self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)'
self.output_formatfile += '\n'
if(threshold != -1):
'''
if(model in ['SGDClassifier']):
self.output_formatfile += ' df[\'prediction\'] = np.where(modeloutput[\'probability\'] > '+str(threshold)+',1,0)'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'probability\'] = modeloutput[\'probability\']'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'remarks\'] = ""'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'probability\'] = np.where(df[\'prediction\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\n'
'''
self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'probability\'] = np.where(df[\'prediction\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\n'
else:
'''
if(model in ['SGDClassifier']):
self.output_formatfile += ' df[\'prediction\'] = modeloutput[\'predict_class\']'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'probability\'] = ""'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'remarks\'] = "NA"'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' df[\'prediction\'] = modeloutput.idxmax(axis=1)'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'probability\'] = modeloutput.max(axis=1)'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\n'
'''
if modelType == 'anomaly_detection':
# if (model.lower()=='autoencoder'):
if model.lower() in ['autoencoder']:
if (datetimeFeature != '' and datetimeFeature.lower() != 'na'):
self.output_formatfile += ' df[modeloutput.columns] = modeloutput\n'
self.output_formatfile += ' anomaly_df=df[df[\'anomaly\'] == True]\n'
self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\n'
self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\'data\')\n'
self.output_formatfile += ' anomaly_prediction_df.to_csv(f"{new_dir}/anomaly_data.csv")\n'
self.output_formatfile += ' try:\n'
self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\n'
self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\n'
self.output_formatfile += ' anomaly_prediction_df.drop("Time_diff",axis=1,inplace=True)\n'
self.output_formatfile += ' except:\n'
self.output_formatfile += ' pass\n'
self.output_formatfile += ' try:\n'
self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\'left\')\n'
self.output_formatfile += ' df_out[\'anomaly\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n'
self.output_formatfile += ' df_out[\'anomalyType\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n'
self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str) \n'
self.output_formatfile += ' df_out.drop("time_diff",axis=1,inplace=True)\n'
self.output_formatfile += ' except Exception as e:\n'
self.output_formatfile += ' print("anomaly data updated issue",e)\n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n'
self.output_formatfile += ' df=df_out \n'
else:
self.output_formatfile += ' df[modeloutput.columns] = modeloutput\n'
elif (model.lower()=='dbscan'):
if (datetimeFeature != '' and datetimeFeature.lower() != 'na'):
self.output_formatfile += ' df[\'anomaly\'] = modeloutput[\'cluster\']== -1\n'
self.output_formatfile += ' anomaly_df=df[df[\'anomaly\'] == True]\n'
self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\n'
self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\'data\')\n'
self.output_formatfile += ' try:\n'
self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\n'
self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\n'
self.output_formatfile += ' except:\n'
self.output_formatfile += ' pass\n'
self.output_formatfile += ' try:\n'
self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\'left\')\n'
self.output_formatfile += ' df_out[\'anomaly\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n'
self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n'
self.output_formatfile += ' except Exception as e:\n'
self.output_formatfile += ' print("anomaly data updated.")\n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n'
self.output_formatfile += ' df=df_out \n'
else:
self.output_formatfile += ' df[\'anomaly\'] = modeloutput[\'cluster\']== -1\n'
self.output_formatfile += ' df.sort_values(by=[\'anomaly\'], ascending=False, inplace=True)\n'
else:
self.output_formatfile += ' df[\'prediction\'] = modeloutput'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' df[\'prediction\'] = modeloutput.idxmax(axis=1)'
self.output_formatfile += '\n'
if learner_type != 'DL':
self.output_formatfile += ' df[\'probability\'] = modeloutput.max(axis=1).round(2)'
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\n'
else:
if model == 'COX':
self.output_formatfile += '\n'
self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)'
self.output_formatfile += '\n'
#self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\'records\',double_precision=2)'
#self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = modeloutput'
self.output_formatfile += '\n'
else:
self.output_formatfile += ' df[\'prediction\'] = modeloutput[0]'
if(learner_type == 'objectDetection'):
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = df[\'prediction\']'
else:
self.output_formatfile += '\n'
self.output_formatfile += ' df[\'prediction\'] = df[\'prediction\'].round(2)'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)'
self.output_formatfile += '\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\n'
self.output_formatfile += ' return(json.dumps(outputjson))'
filename = os.path.join(deploy_path,'script','output_format.py')
#print(deploy_path)
f = open(filename, "wb")
self.log.info('-------> Output Mapping File Location :'+filename)
f.write(str(self.output_formatfile).encode('utf8'))
f.close() |
inputdrift.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import scipy
import warnings
import scipy.stats as st
import logging
import json
class inputdrift():
def __init__(self,conf):
self.log = logging.getLogger('eion')
def get_input_drift(self,ndf,hdf,outputfolder):
selectedColumns = self.features.split(',')
dataalertcount=0
distributionChangeColumns=""
distributionChangeMessage=[]
for i in range(0,len(selectedColumns)):
data1=hdf[selectedColumns[i]]
data2=ndf[selectedColumns[i]]
if(data1.dtype !="str" and data2.dtype !="str" ):
cumulativeData=data1.append(data2)
teststaticValue=teststatic(self,data1,data2)
if (teststaticValue < 0.05):
distributionName1,sse1=DistributionFinder(self,data1)
distributionName2,sse2=DistributionFinder(self,data2)
if(distributionName1 == distributionName2):
dataalertcount = dataalertcount
else:
dataalertcount = dataalertcount+1
distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+","
changedColumn = {}
changedColumn['Feature'] = selectedColumns[i]
changedColumn['KS_Training'] = teststaticValue
changedColumn['Training_Distribution'] = distributionName1
changedColumn['New_Distribution'] = distributionName2
distributionChangeMessage.append(changedColumn)
else :
dataalertcount = dataalertcount
else :
response ="Selected Columns should be Numerical Values"
if(dataalertcount == 0):
resultStatus="Model is working as expected"
else :
resultStatus=json.dumps(distributionChangeMessage)
return(dataalertcount,resultStatus)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int"):
dataType="Discrete"
elif(data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(data.astype(int),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
##KStestStatic -pvalue finding
def teststatic(self,data1,data2):
try:
teststatic =st.ks_2samp(data1,data2)
pValue=0.0
scipyVersion =scipy.__version__
if(scipyVersion <= "0.14.1"):
pValue =teststatic[1]
else:
pValue =teststatic.pvalue
except:
response = str(sys.exc_info()[0])
print("Input Drift Job Failed "+response)
return pValue
|
prediction_transformation.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import platform
import json
import shutil
import logging
from pathlib import Path
def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None):
self.selectorfile += 'import pandas as pd'
self.selectorfile += '\n'
self.selectorfile += 'import joblib'
self.selectorfile += '\n'
self.selectorfile += 'import os'
self.selectorfile += '\n'
self.selectorfile += 'import numpy as np'
self.selectorfile += '\n'
self.selectorfile += 'class selector(object):'
self.selectorfile += '\n'
self.selectorfile += ' def apply_selector(self,df):'
self.selectorfile += '\n'
if pcaModel_pickle_file != '':
self.selectorfile += " pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+pcaModel_pickle_file+"'))"
self.selectorfile += '\n'
self.selectorfile += ' bpca_features = '+str(bpca_features)
self.selectorfile += '\n'
self.selectorfile += ' apca_features = '+str(apca_features)
self.selectorfile += '\n'
self.selectorfile += ' df = pcaModel.transform(df[bpca_features])'
self.selectorfile += '\n'
self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)'
self.selectorfile += '\n'
if(len(features) != 0) and model_type != 'BM25':
if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder':
self.selectorfile += ' df = df['+str(features)+']'
self.selectorfile += '\n'
self.selectorfile += ' return(df)'
filename = os.path.join(deploy_path,'script','selector.py')
f = open(filename, "wb")
self.log.info('-------> Feature Selector File Location :'+filename)
f.write(str(self.selectorfile).encode('utf8'))
f.close()
featurefile = 'import json'
featurefile +='\n'
featurefile += 'def getfeatures():'
featurefile +='\n'
featurefile +=' try:'
featurefile +='\n'
featurelist = []
if 'profiler' in config:
if 'input_features_type' in config['profiler']:
inputfeatures = config['profiler']['input_features_type']
for x in inputfeatures:
featurelt={}
featurelt['feature'] = x
print(x,inputfeatures[x])
if x == targetFeature:
featurelt['Type'] = 'Target'
else:
if inputfeatures[x] in ['int','int64','float','float64']:
featurelt['Type'] = 'Numeric'
elif inputfeatures[x] == 'object':
featurelt['Type'] = 'Text'
elif inputfeatures[x] == 'category':
featurelt['Type'] = 'Category'
else:
featurelt['Type'] = 'Unknown'
featurelist.append(featurelt)
featurefile +=' features = '+str(featurelist)
featurefile +='\n'
featurefile +=' outputjson = {"status":"SUCCESS","features":features}'
featurefile +='\n'
featurefile +=' output = json.dumps(outputjson)'
featurefile +='\n'
featurefile +=' print("Features:",output)'
featurefile +='\n'
featurefile +=' return(output)'
featurefile +='\n'
featurefile +=' except Exception as e:'
featurefile +='\n'
featurefile +=' output = {"status":"FAIL","message":str(e).strip(\'"\')}'
featurefile +='\n'
featurefile +=' print("Features:",json.dumps(output))'
featurefile +='\n'
featurefile +=' return (json.dumps(output))'
featurefile +='\n'
featurefile +='if __name__ == "__main__":'
featurefile +='\n'
featurefile +=' output = getfeatures()'
filename = os.path.join(deploy_path,'featureslist.py')
f = open(filename, "wb")
f.write(str(featurefile).encode('utf8'))
f.close()
def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig):
self.modelfile += ' def __init__(self):'
self.modelfile += '\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()=="autoencoder"):
modelfile=modelfile.replace('.sav','')
self.modelfile+=" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
elif(learner_type == 'TextDL' or learner_type == 'DL'):
if modelName.lower() == 'googlemodelsearch':
self.modelfile += ' import autokeras as ak'
self.modelfile += '\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))"
self.modelfile += '\n'
else:
if scoreParam == 'recall':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'recall': recall_m},compile=False)"
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[recall_m])'
self.modelfile += '\n'
elif scoreParam == 'precision':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'precision': precision_m},compile=False)"
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[precision_m])'
self.modelfile += '\n'
elif scoreParam == 'roc_auc':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),compile=False)"
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[tf.keras.metrics.AUC()])'
self.modelfile += '\n'
elif scoreParam == 'f1_score':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'f1_score': f1_m},compile=False)"
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[f1_m])'
self.modelfile += '\n'
elif scoreParam == 'r2':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'r2': r_square},compile=False)"
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[r_square])'
self.modelfile += '\n'
elif scoreParam == 'rmse':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'rmse': rmse_m},compile=False)"
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[rmse_m])'
self.modelfile += '\n'
elif scoreParam == 'mse':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
elif scoreParam == 'mae':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
elif scoreParam == 'accuracy':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
else:
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
elif(learner_type == 'Text Similarity'):
self.modelfile += " self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+preprocessing_pipe+"'))"
self.modelfile += '\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})"
self.modelfile += '\n'
elif(learner_type in ['similarityIdentification','contextualSearch']):
if scoreParam == 'VectorDB Cosine':
vectorfiledbname = 'trainingdataVecDB'
self.modelfile += f"\
\n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\
\n client = chromadb.PersistentClient(path=persist_directory)\
\n self.collection_name = '{vectorfiledbname}'\
\n self.collection = client.get_collection(self.collection_name)\n"
else:
self.modelfile += " self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\n\n"
elif(learner_type == 'ImageClassification'):
self.modelfile += ' self.config='+str(imageconfig)
self.modelfile += '\n'
if(modelName.lower() == 'densenet'):
self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\'img_width\'],self.config[\'img_height\'],self.config[\'img_channel\'])))'
else:
self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\'img_width\'],self.config[\'img_height\'],self.config[\'img_channel\'])))'
self.modelfile += '\n'
self.modelfile += ' headModel = baseModel.output'
self.modelfile += '\n'
self.modelfile += ' headModel = Flatten(name="flatten")(headModel)'
self.modelfile += '\n'
self.modelfile += ' headModel = Dense(1024, activation=\'relu\')(headModel)'
self.modelfile += '\n'
self.modelfile += ' headModel = Dropout(0.5)(headModel)'
self.modelfile += '\n'
self.modelfile += ' headModel = Dense(2, activation=\'sigmoid\')(headModel)'
self.modelfile += '\n'
self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)'
self.modelfile += '\n'
self.modelfile += ' opt = Adam(lr=self.config[\'lr\'])'
self.modelfile += '\n'
self.modelfile += ' self.model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])'
self.modelfile += '\n'
self.modelfile += " self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
elif(learner_type == 'objectDetection'):
self.modelfile += " self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\n"
self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+"/export/pipeline.config"\n'
self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+"/export/checkpoint/"\n'
self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+"/export/label_map.pbtxt"\n'
self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\n'
self.modelfile += ' self.detection_model = model_builder.build(model_config=configs["model"], is_training=False)\n'
self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\n'
self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, "ckpt-0")).expect_partial()\n'
self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\
use_display_name=True)\n'
elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'):
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
elif modelName.lower() == 'neural architecture search':
self.modelfile += ' import autokeras as ak'
self.modelfile += '\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects=ak.CUSTOM_OBJECTS)"
self.modelfile += '\n'
else:
self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\n'
def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None):
self.modelfile += ' def predict(self,X,features_names):'
self.modelfile += '\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()=="autoencoder"):
self.modelfile += f" X=X[{feature}]\n"
self.modelfile += f" X = np.asarray(X).astype('float32')\n"
self.modelfile += f" reconstructed = self.model.predict(X)\n"
self.modelfile += f" predict_loss = tf.keras.losses.mae(reconstructed,X)\n"
self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\n'
self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\n'
self.modelfile += ' prediction_df = pd.DataFrame()\n'
self.modelfile += ' prediction_df["loss"] = predict_loss\n'
self.modelfile += ' prediction_df["max_threshold"] = max_threshold\n'
self.modelfile += ' prediction_df["min_threshold"] = min_threshold\n'
self.modelfile += ' prediction_df["anomaly"] = np.where((prediction_df["loss"] > prediction_df["max_threshold"]) | (prediction_df["loss"] <= prediction_df["min_threshold"]), True, False)\n'
self.modelfile += ' return prediction_df\n'
elif(learner_type == 'RecommenderSystem'):
self.modelfile += ' predictions = []'
self.modelfile += '\n'
self.modelfile += ' for index,row in X.iterrows():'
self.modelfile += '\n'
self.modelfile += ' score = self.model.predict(int(row["uid"]),int(row["iid"]))'
self.modelfile += '\n'
self.modelfile += ' predictions.append(score.est)'
self.modelfile += '\n'
self.modelfile += ' return predictions'
elif(learner_type in ['similarityIdentification','contextualSearch']):
tfeatures = list(modelFeatures.split(","))
if indexFeature != '' and indexFeature != 'NA':
ifeatures = indexFeature.split(",")
for ifes in ifeatures:
if ifes not in tfeatures:
tfeatures.append(ifes)
if model_type == 'BM25':
self.modelfile += f"\n\
tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\n\
bm25 = BM25Okapi(tokenized_corpus)\n\
tokenized_query = [doc.split(' ') for doc in X.tokenize]\n\
logcnt = 5\n\
output = []\n\
for query in tokenized_query:\n\
doc_scores = bm25.get_scores(query)\n\
related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\n\
x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\n\
x['Score'] = doc_scores[related_docs_indices]\n\
x['Score'] = round(x['Score'],2).astype(str)+'%'\n\
output.append(x)\n\
return output\n"
elif scoreParam == 'VectorDB Cosine':
featuresVecDB = modelFeatures.split(",")
self.modelfile += ' logcnt = 5\n'
self.modelfile += f" columns = {featuresVecDB}\n"
self.modelfile += f"\
\n output = []\
\n for rowindex, row in X.iterrows():\
\n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\
\n results = self.collection.query(\
\n query_embeddings=queryembedding.tolist(),\
\n n_results=logcnt\
\n )\
\n x = pd.DataFrame(columns=columns)\
\n for i in range(0, len(results['ids'][0])):\
\n documentAry = results['documents'][0][i]\
\n documentAry = documentAry.split(' ~&~ ')\
\n for j in range(0, len(documentAry)):\
\n x.at[i,columns[j]] = documentAry[j]\
\n x.at[i,'Score'] = results['distances'][0][i]\
\n output.append(x)\
\n return output"
else:
self.modelfile += ' columns = self.train_input.columns.tolist()\n'
self.modelfile += ' logcnt = 5\n'
self.modelfile += f" train_input = self.train_input[{tfeatures}]\n"
for tf in tfeatures:
self.modelfile += f" columns.remove('{tf}')\n"
self.modelfile += f"\
\n results = cosine_similarity(self.train_input[columns],X)\
\n output = []\
\n for i in range(results.shape[1]):\
\n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\
\n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\
\n scores = []\
\n for j in range(0,logcnt):\
\n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\
\n x['Score'] = scores\
\n output.append(x)\
\n return output"
elif(learner_type == 'Text Similarity'):
self.modelfile += ' X["'+firstDocFeature+'"] = X["'+firstDocFeature+'"].astype(str)'
self.modelfile += '\n'
self.modelfile += ' X["'+secondDocFeature+'"] = X["'+secondDocFeature+'"].astype(str)'
self.modelfile += '\n'
self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X["'+firstDocFeature+'"].values)'
self.modelfile += '\n'
self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X["'+secondDocFeature+'"].values)'
self.modelfile += '\n'
self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\'post\')'
self.modelfile += '\n'
self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\'post\')'
self.modelfile += '\n'
self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])'
self.modelfile += '\n'
self.modelfile += ' return(prediction)'
self.modelfile += '\n'
elif(learner_type == 'ImageClassification'):
self.modelfile += ' predictions = []'
self.modelfile += '\n'
self.modelfile += ' for index, row in X.iterrows(): '
self.modelfile += '\n'
self.modelfile += ' img = cv2.imread(row[\'imagepath\'])'
self.modelfile += '\n'
self.modelfile += ' img = cv2.resize(img, (self.config[\'img_width\'],self.config[\'img_height\']))'
self.modelfile += '\n'
self.modelfile += ' img = image.img_to_array(img)'
self.modelfile += '\n'
self.modelfile += ' img = np.expand_dims(img, axis=0)'
self.modelfile += '\n'
self.modelfile += ' img = img/255'
self.modelfile += '\n'
self.modelfile += ' prediction = self.model.predict(img)'
self.modelfile += '\n'
self.modelfile += ' prediction = np.argmax(prediction,axis=1)'
self.modelfile += '\n'
self.modelfile += ' predictions.append(prediction[0])'
self.modelfile += '\n'
self.modelfile += ' return(predictions)'
self.modelfile += '\n'
elif(learner_type == 'objectDetection'):
self.modelfile += ' @tf.function\n'
self.modelfile += ' def detect_fn(image):\n'
self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\n'
self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\n'
self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\n'
self.modelfile += ' return detections\n'
self.modelfile += ' def load_image_into_numpy_array(path):\n'
self.modelfile += ' return np.array(Image.open(path))\n'
self.modelfile += ' imageLocation = []\n'
self.modelfile += ' for i, row in X.iterrows():\n'
self.modelfile += ' if ("confidance" in row) and row["confidance"] <= 1.0:\n'
self.modelfile += ' confidance = row["confidance"]\n'
self.modelfile += ' else:\n'
self.modelfile += ' confidance = 0.8\n'
self.modelfile += ' imageName = str(Path(row["imagepath"]).stem)+"_output"+str(Path(row["imagepath"]).suffix)\n'
self.modelfile += ' image_np = load_image_into_numpy_array(row["imagepath"])\n'
self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n'
self.modelfile += ' detections = detect_fn(input_tensor)\n'
self.modelfile += ' num_detections = int(detections.pop("num_detections"))\n'
self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\n\
for key, value in detections.items()}\n'
self.modelfile += ' detections["num_detections"] = num_detections\n'
self.modelfile += ' detections["detection_classes"] = detections["detection_classes"].astype(np.int64)\n'
self.modelfile += ' label_id_offset = 1\n'
self.modelfile += ' image_np_with_detections = image_np.copy()\n'
self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\n\
image_np_with_detections,\n\
detections["detection_boxes"],\n\
detections["detection_classes"]+label_id_offset,\n\
detections["detection_scores"],\n\
self.category_index,\n\
use_normalized_coordinates=True,\n\
max_boxes_to_draw=200,\n\
min_score_thresh=confidance,\n\
agnostic_mode=False)\n'
self.modelfile += ' plt.figure()\n'
self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\n'
self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\n'
self.modelfile += ' plt.show()\n'
self.modelfile += ' return imageLocation\n'
else:
if(learner_type == 'DL' and model != 'Neural Network'):
self.modelfile += ' X = np.expand_dims(X, axis=2)'
self.modelfile += '\n'
if(learner_type == 'TextDL'):
self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))'
self.modelfile += '\n'
elif(learner_type == 'TextML'):
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)'
self.modelfile += '\n'
elif(learner_type == 'DL' and model_type == 'Classification'):
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\n'
self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))'
self.modelfile += '\n'
else:
if(model_type == 'Classification' or model_type == 'TLClassification'):
if model == 'Neural Architecture Search':
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\n'
self.modelfile += ' return pd.DataFrame(self.model.predict(X))'
self.modelfile += '\n'
else:
if optimizationmethod == 'genetic':
self.modelfile += '\n'
self.modelfile += ' try:'
self.modelfile += '\n'
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))'
self.modelfile += '\n'
self.modelfile += ' except:'
self.modelfile += '\n'
self.modelfile += ' return pd.DataFrame(self.model.predict(X))'
else:
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\n'
if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network':
self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)'
self.modelfile += '\n'
self.modelfile += ' return pd.DataFrame(q.numpy())'
else:
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)'
self.modelfile += '\n'
elif model_type == 'Regression' and model == 'NAS':
self.modelfile += \
"""
X = X.astype(np.float32)
return self.model.predict(X)
"""
elif(learner_type == 'TS'):
if model.lower() == 'fbprophet':
self.modelfile += ' sessonal_freq="'+str(sessonal_freq)+'"'
self.modelfile += '\n'
self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=int(X["noofforecasts"][0]),freq=sessonal_freq,include_history = False)'
self.modelfile += '\n'
if (additional_regressors):
self.modelfile += '\n'
self.modelfile += ' additional_regressors='+str(additional_regressors)
self.modelfile += '\n'
self.modelfile += ' ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]'
self.modelfile += '\n'
self.modelfile += ' ts_prophet_future.reset_index(drop=True)'
self.modelfile += '\n'
self.modelfile += ' ts_prophet_future=ts_prophet_future.dropna()'
self.modelfile += '\n'
self.modelfile += ' train_forecast = self.model.predict(ts_prophet_future)'
self.modelfile += '\n'
self.modelfile += ' prophet_forecast_tail=train_forecast[[\'ds\', \'yhat\', \'yhat_lower\',\'yhat_upper\']].tail( int(X["noofforecasts"][0]))'
self.modelfile += '\n'
self.modelfile += ' return(prophet_forecast_tail)'
elif model.lower() == 'lstm' or model.lower() == 'mlp':
self.modelfile += ' lag_order='+str(lag_order)
self.modelfile += '\n'
self.modelfile += ' xt = X.values'
self.modelfile += '\n'
scalertransformationFile = scalertransformationFile.split('\\')[-1]
self.modelfile += ' loaded_scaler_model = joblib.load(os.path.join(os.path.dirname(__file__),\'..\',\'model\',\''+scalertransformationFile+'\'))'
self.modelfile += '\n'
self.modelfile += ' xt = xt.astype(\'float32\')'
self.modelfile += '\n'
self.modelfile += ' xt = loaded_scaler_model.transform(xt)'
self.modelfile += '\n'
self.modelfile += ' noOfPredictions = 10'
self.modelfile += '\n'
self.modelfile += ' pred_data = xt'
self.modelfile += '\n'
self.modelfile += ' y_future = []'
self.modelfile += '\n'
self.modelfile += ' for i in range(noOfPredictions):'
self.modelfile += '\n'
if len(feature) == 1:
self.modelfile += ' pred_data = pred_data[-lag_order:]'
self.modelfile += '\n'
if model.lower() == 'mlp':
self.modelfile += ' pred_data = pred_data.reshape((1,lag_order))'
else:
self.modelfile += ' pred_data = pred_data.reshape((1,lag_order,1))'
self.modelfile += '\n'
self.modelfile += ' pred = self.model.predict(pred_data)'
self.modelfile += '\n'
self.modelfile += ' predoutput = loaded_scaler_model.inverse_transform(pred) '
self.modelfile += '\n'
self.modelfile += ' y_future.append(predoutput.flatten()[-1])'
self.modelfile += '\n'
self.modelfile += ' pred_data = np.append(pred_data,pred)'
self.modelfile += '\n'
self.modelfile += ' pred = pd.DataFrame(index=range(0,len(y_future)),columns='+str(feature)+')'
self.modelfile += '\n'
self.modelfile += ' for i in range(0, len(y_future)):'
self.modelfile += '\n'
self.modelfile += ' pred.iloc[i] = y_future[i]'
self.modelfile += '\n'
self.modelfile += ' return pred'
else:
self.modelfile += ' pdata = pred_data[-lag_order:]'
self.modelfile += '\n'
self.modelfile += ' pdata = pdata.reshape((1,lag_order,'+str(len(feature))+'))'
self.modelfile += '\n'
self.modelfile += ' pred = self.model.predict(pdata)'
self.modelfile += '\n'
self.modelfile += ' predoutput = loaded_scaler_model.inverse_transform(pred) '
self.modelfile += '\n'
self.modelfile += ' y_future.append(predoutput)'
self.modelfile += '\n'
self.modelfile += ' pred_data = np.append(pred_data,pred,axis=0)'
self.modelfile += '\n'
self.modelfile += ' pred = pd.DataFrame(index=range(0,len(y_future)),columns='+str(feature)+')'
self.modelfile += '\n'
self.modelfile += ' for i in range(0, len(y_future)):'
self.modelfile += '\n'
self.modelfile += ' pred.iloc[i] = y_future[i]'
self.modelfile += '\n'
self.modelfile += ' return pred'
else:
self.modelfile += ' return self.model.predict(n_periods=int(X["noofforecasts"][0]))'
else:
if model == 'KaplanMeierFitter':
self.modelfile += '\n'
self.modelfile += ' res = self.model.predict(X[\''+feature[0]+'\'].astype(int))'
self.modelfile += '\n'
self.modelfile += ' if isinstance(res, pd.DataFrame):\n'
self.modelfile += ' return res.values.reshape(1,-1)\n'
self.modelfile += ' else:\n'
self.modelfile += ' return np.array([res])\n'
elif model == 'COX':
self.modelfile += ' res = []\n'
self.modelfile += ' for idx,row in X.iterrows():\n'
self.modelfile += ' res.append(self.model.predict_survival_function(X, times=row[self.model.duration_col])[idx].values[0])\n'
self.modelfile += ' return pd.DataFrame(res)'
#self.modelfile += ' return self.model.predict_survival_function(X, times=X[self.model.duration_col])'
self.modelfile += '\n'
elif(learner_type == 'DL' and model_type in ['Classification','Regression']):
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\n'
self.modelfile += ' return self.model.predict(X).reshape(1, -1)'
self.modelfile += '\n'
elif (model_type == 'Clustering' and model == 'DBSCAN'):
self.modelfile += ' return self.model.fit_predict(X)'
elif(model_type.lower() == 'anomaly_detection' and model.lower() == 'dbscan'):
self.modelfile += " pred=self.model.fit_predict(X)\n"
self.modelfile += " X.loc[:,'cluster'] = self.model.labels_ \n"
self.modelfile += ' return X\n'
elif model_type.lower() == 'anomaly_detection':
self.modelfile += ' X = X.astype(np.float32)\n'
self.modelfile += ' return self.model.predict(X)'
else:
if model_type != 'Clustering':
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\n'
#self.modelfile += ' return self.model.predict(X).reshape(1, -1)'
self.modelfile += \
"""
if isinstance(self.model, LatentDirichletAllocation):
output = np.matrix(self.model.transform(X)).argmax(axis=1)
return output.flatten().tolist()
return self.model.predict(X).reshape(1, -1)
"""
|
base.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package.aion_prediction import aionPrediction
from AION.prediction_package.utility import TAB_CHAR
from AION.prediction_package import utility
from AION.prediction_package import common
def file_header( usecase=''):
return ''
class deployer():
"""
base deployer class which can be used to generate the deployemnt code.
This class will be inherited by deployer specific to problem type.
"""
def __init__(self, params={}):
if not params['paths']['deploy']:
raise ValueError('Deploy path is not provided')
self.deploy_path = Path(params['paths']['deploy'])
if not self.deploy_path.exists():
self.deploy_path.mkdir(parents=True, exist_ok=True)
self.name = params.get('problem_type', '')
self.params = params
self.importer = importModule()
self.feature_reducer = False
def profiler_code(self):
return common.profiler_code(self.params['profiler'])
def feature_engg_code(self):
if self.params['selector'].get('reducer',False):
code, modules = common.feature_reducer_code(self.params['selector'])
else:
code, modules = common.feature_selector_code(self.params['selector'])
utility.import_modules(self.importer, modules)
return code
def training_code(self):
return common.training_code(self.params['training'])
def formatter_code(self):
return ''
def run(self):
"""
run function will be called to start the deployment process.
This function will create following files
inputprofiler.py for preprocessing the input
aion_predict.py for prediction
model service file
"""
code = self.predict_code( )
with open(self.deploy_path/'aion_predict.py', 'w') as f:
f.write(code)
profiler_code = self.profiler_code()
with open(self.deploy_path/'script'/'inputprofiler.py', 'w') as f:
f.write(profiler_code)
self.create_model_service( )
self.create_publish_service()
self.create_idrift()
self.create_odrift()
common.create_feature_list(self.params, self.params['features']['target_feat'], self.deploy_path)
common.requirement_file(self.deploy_path,self.params['training']['algo'],self.params['features']['text_feat'])
common.create_readme_file(self.deploy_path, self.params['training']['model_file'], self.params['features']['input_feat'])
self.create_utils_folder()
def predict_code(self):
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
utility.import_modules(self.importer, imported_modules)
self.importer.addLocalModule(module='inputprofiler',mod_from='script.inputprofiler')
code_text = ""
code_text += self.feature_engg_code()
code_text += self.training_code()
code_text += self.formatter_code()
code_text += common.main_code()
code = file_header()
code += self.importer.getCode()
return code + code_text
def create_model_service(self):
service_name = '{}{}{}'.format(self.params['usecase_name'], '_' if self.params['usecase_ver'] != '' else '', self.params['usecase_ver'])
obj = aionPrediction()
obj.create_model_service(self.deploy_path, service_name, self.name)
def create_publish_service(self):
obj = aionPrediction()
obj.create_publish_service(self.params['paths']['usecase'], self.params['usecase_name'],self.params['usecase_ver'], self.name)
def create_idrift(self):
pass
def create_odrift(self):
pass
def create_utils_folder(self):
common.create_util_folder(self.deploy_path)
|
forecasting.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package.aion_prediction import aionPrediction
from AION.prediction_package.utility import TAB_CHAR
from AION.prediction_package import utility
from AION.prediction_package.base import deployer
from AION.prediction_package import common
import numpy as np
def get_deployer( params):
if params['training']['algo'] == 'ARIMA':
return arima(params)
elif params['training']['algo'] == 'LSTM':
return lstm(params)
elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO':
return lstmencdec_mviuvo(params)
elif params['training']['algo'] == 'MLP':
return mlp(params)
elif params['training']['algo'] == 'VAR':
return var(params)
elif params['training']['algo'] == 'FBPROPHET':
return fbprophet(params)
else:
raise ValueError(f"Algorithm {params['training']['algo']} for time series forecasting is not supported")
def _profiler_code(params, importer):
"""
This will create the profiler file based on the config file.
separated file is created as profiler is required for input drift also.
"""
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
utility.import_modules(importer, imported_modules)
if 'code' in params['profiler'].get('preprocess',{}).keys():
code = params['profiler']['preprocess']['code']
else:
code = ""
code += """
class inputprofiler():
"""
init_code = """
def __init__(self):
"""
init_code += """
# preprocessing
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if not preprocess_path.exists():
raise ValueError(f'Preprocess model file not found: {preprocess_path}')
self.profiler = joblib.load(preprocess_path)
"""
run_code = """
def run(self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
"""
if 'code' in params['profiler'].get('preprocess',{}).keys():
run_code += """
df = preprocess( df)"""
if params['profiler'].get('unpreprocessed_columns'):
run_code += f"""
unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}']
df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if params['profiler'].get('force_numeric_conv'):
run_code += f"""
df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""
run_code += _profiler_main_code(params)
if params['profiler'].get('unpreprocessed_columns'):
run_code += f"""
df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
run_code += """ return df
"""
utility.import_modules(importer, imported_modules)
import_code = importer.getCode()
return import_code + code + init_code + run_code
def _profiler_main_code(params):
code = f"""
df = self.profiler.transform(df)
columns = {params['profiler']['output_features']}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
"""
return code
class arima( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code( self):
imported_modules = [
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
def __init__(self):
pass
def run( self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
return df[['noofforecasts']]
"""
return importer.getCode() + code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df
"""
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='joblib')
return f"""
class trainer():
def __init__(self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
def run(self,df):
return self.model.predict(n_periods=int(df["noofforecasts"][0]))
"""
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.round(2)
df = json.dumps(df.tolist())
outputjson = {"status":"SUCCESS","data":eval(df)}
return(json.dumps(outputjson))
"""
class lstm( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code(self):
importer = importModule()
return _profiler_code( self.params, importer)
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = load_model(model_file)
"""
run_code = f"""
def run(self, df):
lag_order={self.params['training']['lag_order']}
xt = df.values
scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}"
if not scaler_file.exists():
raise ValueError(f'Scaling file not found: {{scaler_file}}')
loaded_scaler_model = joblib.load(scaler_file)
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
noOfPredictions = 10
pred_data = xt
y_future = []
for i in range(noOfPredictions):
"""
if len(self.params['selector']['output_features']) == 1:
run_code += f"""
pred_data = pred_data[-lag_order:]
pred_data = pred_data.reshape((1,lag_order,1))
pred = self.model.predict(pred_data)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput.flatten()[-1])
pred_data = np.append(pred_data,pred)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
else:
run_code += f"""
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])}))
pred = self.model.predict(pdata)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput)
pred_data = np.append(pred_data,pred,axis=0)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
return init_code, run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.round(2)
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
class lstmencdec_mviuvo( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
outputFeatrues = params['profiler']['output_features']
self.targetColIndx = outputFeatrues.index(params['features']['target_feat'])
selectedColDict = params['selector']['output_features']
self.selectedCols = list()
for col in selectedColDict:
self.selectedCols.append(col)
def profiler_code(self):
importer = importModule()
return _profiler_code( self.params, importer)
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = load_model(model_file)
"""
run_code = f"""
def run(self, df):
targetColIndx = {self.targetColIndx}
lag_order={self.params['training']['lag_order']}
xt = df.values
scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}"
if not scaler_file.exists():
raise ValueError(f'Scaling file not found: {{scaler_file}}')
loaded_scaler_model = joblib.load(scaler_file)
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
noOfPredictions = 10
pred_data = xt
y_future = []
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])}))
pred = self.model.predict(pdata)
pred_1d = pred.ravel()
pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, {len(self.params['selector']['output_features'])})
pdata_2d[:,targetColIndx] = pred_1d
pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d)
predout = pred_2d_inv[:, targetColIndx]
predout = predout.reshape(len(pred_1d),1)
pred = pd.DataFrame(index=range(0,len(predout)),columns=['{self.params['features']['target_feat']}'])
for i in range(0, len(predout)):
pred.iloc[i] = predout[i]
return pred
"""
return init_code, run_code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df[{self.selectedCols}]
"""
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.round(2)
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
class mlp( lstm):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = load_model(model_file)"""
run_code = f"""
def run(self, df):
lag_order={self.params['training']['lag_order']}
xt = df.values
scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}"
if not scaler_file.exists():
raise ValueError(f'Scaling file not found: {{scaler_file}}')
loaded_scaler_model = joblib.load(scaler_file)
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
noOfPredictions = 10
pred_data = xt
y_future = []
for i in range(noOfPredictions):
"""
if len(self.params['selector']['output_features']) == 1:
run_code += f"""
pred_data = pred_data[-lag_order:]
pred_data = pred_data.reshape((1,lag_order))
pred = self.model.predict(pred_data)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput.flatten()[-1])
pred_data = np.append(pred_data,pred)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
else:
run_code += f"""
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])}))
pred = self.model.predict(pdata)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput)
pred_data = np.append(pred_data,pred,axis=0)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
return init_code, run_code
class var( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code(self):
importer = importModule()
code = _profiler_code( self.params, importer)
return code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df[{self.params['selector']['output_features']}]
"""
def training_code( self):
self.importer.addModule(module='joblib')
return f"""
class trainer():
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
def run(self,df):
lag_order = self.model.k_ar
return self.model.forecast(df.values[-lag_order:],steps={self.params['training']['no_of_prediction']})
"""
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return f"""
class output_format():
def __init__( self):
pass
def invertTransformation(self,predictions):
datasetdf = pd.read_csv((Path(__file__).parent/"data")/"trainingdata.csv")
dictDiffCount = {self.params['training']['dictDiffCount']}
target_features = "{self.params['features']['target_feat']}"
columns = target_features.split(',')
pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)
for j in range(0,len(columns)):
for i in range(0, len(predictions)):
pred.iloc[i][j] = round(predictions[i][j],2)
prediction = pred
for col in columns:
if col in dictDiffCount:
if dictDiffCount[col]==2:
prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()
prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()
prediction = pred
return(prediction)
def run(self,raw_df,df):
df = self.invertTransformation(df)
df = df.to_json(orient='records',double_precision=2)
outputjson = {{"status":"SUCCESS","data":json.loads(df)}}
return(json.dumps(outputjson))
"""
class fbprophet( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code( self):
imported_modules = [
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
def __init__(self):
pass
def run( self,df):
df = df.replace(r'^\s*$', np.NaN, regex=True)
return df[['noofforecasts']]
"""
return importer.getCode() + code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df
"""
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
self.importer.addModule(module='joblib')
code = f"""
class trainer():
def __init__(self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
"""
code += f"""
def run(self,df):
sessonal_freq = '{self.params['training']['sessonal_freq']}'
ts_prophet_future = self.model.make_future_dataframe(periods=int(df["noofforecasts"][0]),freq=sessonal_freq,include_history = False)
"""
if (self.params['training']['additional_regressors']):
code += f"""
additional_regressors={self.params['training']['additional_regressors']}
ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]
ts_prophet_future.reset_index(drop=True)
ts_prophet_future=ts_prophet_future.dropna()
"""
code += """
train_forecast = self.model.predict(ts_prophet_future)
prophet_forecast_tail=train_forecast[[\'ds\', \'yhat\', \'yhat_lower\',\'yhat_upper\']].tail( int(df["noofforecasts"][0]))
return(prophet_forecast_tail)"""
return code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
|
ssh_command.py | import paramiko
from pathlib import Path
import logging
import json
import os
import sys
import pandas as pd
import time
import timeit
import re
running_state_code = 16
stopped_state_code = 80
#prompt_command = '/home/aion/AION/llm/sbin/run_inference.sh'
log_file_path = '/home/aion/data/log/fine_tuning_log.log'
def read_file_from_server(ip,username,password,pem_file,remote_file_name,localfilepath):
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
else:
client.connect(host, username=username, password=password)
sftp = client.open_sftp()
sftp.get(remote_file_name,localfilepath)
sftp.close()
client.close()
def read_log_file(ip,username,password,pem_file):
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
else:
client.connect(host, username=username, password=password)
#log_file_path = '/home/ubuntu/AION/data/log/fine_tuning_log.log'
stdin, stdout, stderr = client.exec_command(f'tail -n 500 {log_file_path}')
time.sleep(5)
client.close()
return stdout.read().decode()
def run_ssh_cmd(ip,pem_file,username,password,log,command):
try:
buf = ''
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
startTime = timeit.default_timer()
while True:
try:
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
else:
client.connect(host, username=username, password=password)
break
except Exception as e:
print(e)
dataLoadTime = timeit.default_timer() - startTime
if dataLoadTime >= 600:
break
time.sleep(10)
stdin, stdout, stderr =client.exec_command(command)
for line in stdout:
if log != '':
log.info(line)
else:
# if buf != '':
# buf= buf+'\n'
buf = buf+line
client.close()
return buf
except Exception as e:
print(str(e))
raise Exception(str(e))
def copy_files_to_server(ip, pem_file,local_data_file_path,local_config_file_path,username,password,remote_data_dir,remote_config_dir):
try:
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
startTime = timeit.default_timer()
while True:
try:
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
elif password != '':
client.connect(host, username=username, password=password)
sftp = client.open_sftp()
break
except Exception as e:
print(e)
time.sleep(10)
dataLoadTime = timeit.default_timer() - startTime
if dataLoadTime >= 600:
break
try:
sftp.stat(remote_data_dir)
print(f"Path: '{remote_data_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_data_dir)
try:
sftp.stat(remote_config_dir)
print(f"Path: '{remote_config_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_config_dir)
Train_Data_InRemoteArtifacts = sftp.listdir(remote_data_dir)
for traindata_files in Train_Data_InRemoteArtifacts:
if not traindata_files.endswith('.ipynb_checkpoints'):
sftp.remove(remote_data_dir +'/'+ traindata_files)
if os.path.isdir(local_data_file_path):
list_pdf_json_files = os.listdir(local_data_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_data_file_path+'/'+file_pdf_json, remote_data_dir+'/'+ file_pdf_json)
print(file_pdf_json + " data copied successfully")
else:
filename = os.path.basename(local_data_file_path)
directoryname= os.path.dirname(local_data_file_path)
sftp.put(directoryname+'/'+filename, remote_data_dir+'/'+ filename)
print(filename + " data copied successfully")
if local_config_file_path != '':
config_InRemoteArtifacts = sftp.listdir(remote_config_dir)
for config_file in config_InRemoteArtifacts:
print(config_file)
if not config_file.endswith('.ipynb_checkpoints'):
sftp.remove(remote_config_dir +'/'+ config_file)
if os.path.isdir(local_config_file_path):
list_pdf_json_files = os.listdir(local_config_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_config_file_path+'/'+file_pdf_json, remote_config_dir+'/'+ file_pdf_json)
print(file_pdf_json + " config copied successfully")
else:
# updated_config_file_path = create_config(local_config_file_path)
filename = os.path.basename(local_config_file_path)
directoryname= os.path.dirname(local_config_file_path)
sftp.put(directoryname+'/'+filename, remote_config_dir+'/'+ 'config.json')
print(filename + " config copied successfully")
remote_files = sftp.listdir(remote_config_dir)
print("List of uploaded files",remote_files)
sftp.close()
client.close()
except Exception as e:
print(e)
|
bench_marking.py | import json
import ast
import sys
import time
from pathlib import Path
import pandas as pd
from AION.llm import llm_utils
bench_mark_file = {'code':'code_eval.sh', 'doc': 'doc_eval.sh'}
DB_TABLE = 'llm_benchmarking'
def bench_mark(hypervisor,instanceid,model,usecaseid,eval='code'):
output = {}
started = False
if eval not in bench_mark_file.keys():
raise ValueError(f"Evaluation for '{eval}' is not supported.\nSupported types are {list(bench_mark_file.keys())}")
db = benchmark_db( DB_TABLE, usecaseid)
db.update_state('running')
try:
server = llm_utils.hypervisor( hypervisor,instanceid)
if not server.is_already_running():
started, msg = server.start()
if not started:
raise ValueError( msg)
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
sh_file = llm_utils.remote_code_dir(as_str=True) + '/' + bench_mark_file[eval]
cmd = sh_file + ' ' + usecaseid + ' '+ str(model)
print(cmd)
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', cmd)
if isinstance( buf, str):
print( buf)
else:
print( buf.decode('utf-8'))
if buf:
if 'eval_output:' in buf:
output = buf.split('eval_output:')[-1].rstrip()
output = ast.literal_eval( output)
record = {}
record['state'] = 'Success'
record['eval_type'] = eval
record['result_type'] = 'value' if eval =='code' else 'dict'
record['result'] = output
db.put_record( record)
else:
record = {}
record['state'] = 'Error'
record['eval_type'] = eval
db.put_record( record)
return output
except Exception as e:
print(e)
record = {}
record['state'] = 'Error'
record['eval_type'] = eval
record['result_type'] = 'value' if eval =='code' else 'dict'
record['result'] = [{'error': str(e)}]
db.put_record( record)
output = {'status':'Error','msg':str(e)}
return output
class benchmark_db():
def __init__(self, table_name, usecaseid):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = str(Path(DATA_DIR)/'sqlite')
self.sqlite_obj = sqlite_db(file_path,'config.db')
self.table_name = table_name
self.usecaseid = usecaseid
self.columns = ['usecaseid','state','eval_type','result_type','result']
self.sqlite_obj.create_table(self.table_name, self.columns, ['TEXT' for x in self.columns])
def put_record(self, record={}):
db_data = self.sqlite_obj.get_data(self.table_name,'usecaseid',self.usecaseid)
if (len(db_data) > 0):
self.sqlite_obj.update_dict_data(record,f'"usecaseid"="{self.usecaseid}"',self.table_name)
else:
data = {x:[str(record[x])] if x in record.keys() else [''] for x in self.columns}
data['usecaseid'] = self.usecaseid
self.sqlite_obj.write_data(pd.DataFrame.from_dict(data),self.table_name)
def update_state(self, state, error=None):
data = {x:'' for x in self.columns}
data['state'] = state
data['usecaseid'] = self.usecaseid
if error:
data['result'] = error
self.put_record( data)
def read_data(self):
return self.sqlite_obj.read_data(self.table_name)
if __name__ == '__main__':
run_code_benchmarking = False
if run_code_benchmarking:
#for code
bench_mark('aws','i-0c7bfeddd00658f45','CodeLLaMA-2-7B','AI0025_1',eval='code')
else:
# for document
bench_mark('aws','i-0c7bfeddd00658f45','LLaMA-2-7B','AI0041_1',eval='doc')
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = str(Path(DATA_DIR)/'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
data = sqlite_obj.read_data('llm_benchmarking')
print(data)
|
llm_utils.py | import json
import os
import time
from pathlib import Path
BASE_DIR = '/home/aion'
DATA_DIR = '/home/aion/data'
CONFIG_DIR = '/home/aion/data/config'
PROMPT_DATA_DIR = '/home/aion/data/prompt_data'
CODE_DIR = '/home/aion/llm/sbin'
def remote_base_dir(as_str=False):
if as_str:
return BASE_DIR
return Path( BASE_DIR)
def remote_data_dir(as_str=False):
if as_str:
return DATA_DIR
return Path( DATA_DIR)
def remote_config_dir(as_str=False):
if as_str:
return CONFIG_DIR
return Path( CONFIG_DIR)
def remote_code_dir(as_str=False):
if as_str:
return CODE_DIR
return Path( CODE_DIR)
def remote_prompt_data_dir(as_str=False):
if as_str:
return DATA_DIR
return Path( DATA_DIR)
def get_ami_details(config,selectedAMI):
y = {}
for x in config:
if x['id'] == selectedAMI:
return x
return y
def get_ip(cloudconfig,instanceid,hypervisor,region,image):
from AION.appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import get_instance_ip
return get_instance_ip(aws_access_key_id, aws_secret_key, instanceid,region)
elif hypervisor == 'GCP':
credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials']
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid)
zone = amiDetails['regionName']
projectID = cloud_infra['gcpCredentials']['projectID']
from llm.gcp_compute_api import check_instance
status,ip = check_instance(credentialsJson,projectID, zone, instanceid)
return ip
else:
raise ValueError(f"Given hypervisor '{hypervisor}' is not supported")
def hypervisor( hypervisor,instanceid='', image=''):
if not hypervisor:
raise ValueError('No hypervisor provided')
if hypervisor.lower() == 'aws':
return aws_server(instanceid, image)
elif hypervisor.lower() == 'gcp':
return gcp_server(instanceid, image)
else:
raise ValueError(f"Hyperviser '{hypervisor}' is not supported")
class gcp_server():
def __init__( self, instanceid='', image=''):
self.hypervisor_name = 'gcp'
from AION.appbe.compute import readComputeConfig
self.cloud_infra = readComputeConfig()
if image and image.lower() != 'na':
self.ami_details = get_ami_details(self.cloud_infra['GCP']['machineImage'], image)
self.instanceid = ''
elif instanceid and instanceid.lower() != 'na':
self.ami_details = get_ami_details( self.cloud_infra['GCP']['instances'], instanceid)
self.instanceid = instanceid
else:
raise ValueError("Either provide 'image name' or 'instance id'")
self.credentialsJson = self.cloud_infra['gcpCredentials']['gcpCredentials']
self.projectID = self.cloud_infra['gcpCredentials']['projectID']
self.zone = self.ami_details['regionName']
self.stopped = False
self.ip = ''
self.created = False
def is_machine_running(self):
from llm.gcp_compute_api import check_instance
status,self.ip = check_instance(self.credentialsJson,self.projectID,self.zone,self.instanceid)
return 'running' == status.lower()
def start(self):
from AION.llm.gcp_compute_api import is_running
from AION.llm.gcp_compute_api import check_instance
from AION.llm.gcp_compute_api import start_instance
status = is_running(self.credentialsJson, self.projectID, self.zone, self.instanceid).lower()
if 'running' == status:
stratus, self.ip = check_instance(self.credentialsJson, self.projectID, self.zone, self.instanceid)
self.already_running = True
return True
else:
status, self.ip = start_instance(self.credentialsJson, self.projectID, self.zone, self.instanceid)
return status == 'Success'
def create(self,key_name = None):
from AION.llm.gcp_compute_api import create_instance
machineImageName = self.ami_details['id']
machineImageProjectID = self.ami_details['machineImageProjectID']
self.ip,msg = create_instance(self.credentialsJson,self.projectID,self.zone,key_name,machineImageName,machineImageProjectID)
if self.ip != '':
self.created = True
return self.ip,msg
def stop(self):
if self.already_running or self.stopped:
return True
from AION.llm.gcp_compute_api import stop_server_instance
status = stop_server_instance(self.credentialsJson,self.projectID, self.zone, self.instanceid)
if status == 'Process Completed':
self.stopped = True
return True
return False
def showndown(self):
if self.created:
self.terminate()
else:
if self.already_running or self.stopped:
return True
from AION.llm.gcp_compute_api import stop_server_instance
status = stop_server_instance(self.credentialsJson,self.projectID, self.zone, self.instanceid)
if status == 'Process Completed':
self.stopped = True
return True
return False
def terminate(self):
from AION.llm.gcp_compute_api import terminate_instance
msg, status = terminate_instance(self.projectID, self.zone, self.instanceid)
print(msg)
return status == 'success'
def ssh_details(self):
return self.ami_details['ssh']
class aws_server():
def __init__(self, instanceid='', image='', boot_up_time=0):
self.hypervisor_name = 'aws'
from AION.appbe.compute import readComputeConfig
self.cloud_infra = readComputeConfig()
if image and image.lower() != 'na':
self.ami_details = get_ami_details(self.cloud_infra['AWS_EC2']['amis'], image)
self.instanceid = ''
self.machine_type = 'ami'
elif instanceid and instanceid.lower() != 'na':
self.ami_details = get_ami_details( self.cloud_infra['AWS_EC2']['instances'], instanceid)
self.instanceid = instanceid
self.machine_type = 'instance'
else:
raise ValueError("Either provide 'image name' or 'instance id'")
self.access_key = self.cloud_infra['awsCredentials']['accessKey']
self.secret_key = self.cloud_infra['awsCredentials']['secretAccessKey']
self.securitygroupid = self.cloud_infra['awsCredentials']['securitygroupid']
self.region = self.ami_details['regionName']
self.already_running = False
self.boot_up_time = boot_up_time
self.stopped = False
self.created = False
def is_already_running(self):
return self.already_running
def is_machine_running(self):
from AION.llm.aws_instance_api import is_running
status = is_running(self.instanceid, self.region, self.access_key, self.secret_key).lower()
return 'running' == status.lower()
def start(self):
from AION.llm.aws_instance_api import is_running
from AION.llm.aws_instance_api import get_instance_ip
from AION.llm.aws_instance_api import start_instance
status = is_running(self.instanceid, self.region, self.access_key, self.secret_key).lower()
if 'running' == status:
self.ip = get_instance_ip(self.access_key, self.secret_key, self.instanceid, self.region)
self.already_running = True
return True, 'already running'
else:
status, msg, self.ip = start_instance(self.access_key, self.secret_key, self.instanceid, self.region)
return status == 'Success', msg
def create(self, key_name=None):
from AION.llm.aws_instance_api import create_instance
image_id = self.ami_details['id']
security_group_ids = self.ami_details['instanceSetting']['securityGroupIds']
if not key_name:
key_name = self.ami_details['instanceSetting']['keyName']
instance_type = self.ami_details['instanceSetting']['instanceType']
self.instanceid,msg = create_instance(image_id, instance_type, self.securitygroupid, self.region, key_name,
self.access_key, self.secret_key)
if self.instanceid != '':
self.created = True
return self.instanceid,msg
def showndown(self):
from AION.llm.aws_instance_api import stop_server_instance
if self.created:
return self.terminate()
else:
if self.already_running or self.stopped:
return True
status = stop_server_instance(self.access_key, self.secret_key, self.instanceid, self.region)
if status == 'Process Completed':
self.stopped = True
return True
return False
def stop(self):
from AION.llm.aws_instance_api import stop_server_instance
if self.already_running or self.stopped:
return True
status = stop_server_instance(self.access_key, self.secret_key, self.instanceid, self.region)
if status == 'Process Completed':
self.stopped = True
return True
return False
def terminate(self):
from AION.llm.aws_instance_api import terminate_instance
msg, status = terminate_instance(self.instanceid, self.region, self.access_key, self.secret_key)
print(msg)
return status == 'success'
def ssh_details(self):
return self.ami_details['ssh']
|
llm_cache.py |
class CachePrompt:
tableName = 'cachePrompts'
def __init__(self, conn):
self.conn = conn
def selectFromCache(self,prompt ,usecaseId ,modelType,temperature=None ,max_token=None):
searchFromLLMFlag = False
try:
query = f'''SELECT * FROM {CachePrompt.tableName} WHERE usecaseId= "{usecaseId}" AND prompt = "{prompt}" AND modelType = "{modelType}"'''
if temperature:
query += f''' AND temperature= "{temperature}"'''
if max_token:
query += f''' AND max_token= "{max_token}"'''
cursor = self.conn.execute(query)
results = [x for x in cursor.fetchone()]
column_names = list(map(lambda x:x[0],cursor.description))
response = results[column_names.index('response')]
return searchFromLLMFlag,response
except Exception as e:
print(e)
searchFromLLMFlag = True
return searchFromLLMFlag,''
def deleteRecord(self ,usecaseId,max_records=5):
q_exitingRecords = f'''SELECT count(*) FROM {CachePrompt.tableName} WHERE usecaseId= "{usecaseId}" '''
cursor = self.conn.execute(q_exitingRecords)
numberOfRecords = cursor.fetchone()[0]
if numberOfRecords >= max_records:
idForDeletion = f'SELECT * FROM {CachePrompt.tableName} WHERE usecaseId= "{usecaseId}" ORDER BY created_at ASC LIMIT 1;'
cursor = self.conn.execute(idForDeletion)
id2del =[x[0] for x in cursor][0]
sql_delete_query = f"""DELETE from {CachePrompt.tableName} WHERE id = {id2del};"""
self.conn.execute(sql_delete_query)
self.conn.commit()
def insertRecord(self,prompt,response,usecaseId ,modelType,temperature=None ,max_token=None, max_records=5):
self.conn.execute('''CREATE TABLE IF NOT EXISTS cachePrompts
(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
prompt TEXT NOT NULL,
context TEXT ,
usecaseId text NOT NULL,
modelType text NOT NULL,
temperature float NOT NULL,
max_token INT,
response TEXT NOT NULL,
created_at TEXT DEFAULT CURRENT_TIMESTAMP );''')
cur = self.conn.execute(f"select * from {CachePrompt.tableName}").fetchall()
print(cur)
self.deleteRecord(usecaseId,max_records=5)
self.conn.execute(f"INSERT INTO {CachePrompt.tableName} (prompt, usecaseId,modelType,temperature,max_token,response) \
VALUES (?, ?, ?,?, ?, ?)", (prompt, usecaseId,modelType,temperature, max_token, response));
self.conn.commit()
|
aws_instance_api.py | import json
import os
import sys
import pandas as pd
import time
from stat import S_ISDIR, S_ISREG
import paramiko
from pathlib import Path
import logging
import boto3
from botocore.exceptions import ClientError
import re
remote_data_dir = '/home/ubuntu/AION/data/storage'
remote_config_dir = '/home/ubuntu/AION/data/config'
running_state_code = 16
stopped_state_code = 80
sh_file_path = '/home/ubuntu/AION/llm/sbin/run_experiment.sh '
prompt_command = '/home/ubuntu/AION/llm/sbin/run_inference.sh'
def create_instance(image_id, instance_type, security_group_id,region,instance_name,aws_access_key_id,aws_secret_key):
try:
ec2 = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, region_name=region)
response = ec2.run_instances(ImageId=image_id, InstanceType=instance_type, SecurityGroupIds=[security_group_id], MaxCount=1, MinCount=1, TagSpecifications=[{'ResourceType': 'instance', 'Tags': [{'Key': 'Name', 'Value': instance_name}]}])
#print('Instance ID:', response['Instances'][0]['InstanceId'])
return response['Instances'][0]['InstanceId'],''
except Exception as e:
print(e)
return '',str(e)
def check_instance(aws_access_key_id, aws_secret_key, instance_id,region):
ip = ''
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instance_status(InstanceIds=[instance_id],IncludeAllInstances=True)
if response['InstanceStatuses'][0]['InstanceState']['Name'] == 'running':
ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
return 'Running',ip
else:
return 'NotRunning',ip
def get_instance_ip(aws_access_key_id, aws_secret_key, instance_id,region):
try:
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instances(InstanceIds=[instance_id])
ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
return ip
except Exception as e:
print(e)
return ''
def start_instance(aws_access_key_id, aws_secret_key, instance_id,region):
ip = ''
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instance_status(InstanceIds=[instance_id],IncludeAllInstances=True)
if response['InstanceStatuses'][0]['InstanceState']['Name'] == 'running':
print("Instance is already running")
try:
response = ec2.start_instances(InstanceIds=[instance_id], DryRun=False)
#print(response)
instance_status_code = 0
while instance_status_code != running_state_code:
response = ec2.describe_instances(InstanceIds=[instance_id])
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == running_state_code:
ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
break
except Exception as e:
print(e)
return 'Fail',str(e),''
return 'Success','Success',ip
def is_running(instance_id,region,aws_access_key_id,aws_secret_key):
try:
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instance_status(InstanceIds=[instance_id], IncludeAllInstances=True)
if 'Reservations' in response and len(response['Reservations']) > 0:
state = response['Reservations'][0]['Instances'][0]['State']['Name']
return state
elif 'InstanceStatuses' in response:
return response['InstanceStatuses'][0]['InstanceState']['Name']
else :
return 'failed'
except:
return "error"
def terminate_instance(instance_id,region,aws_access_key_id,aws_secret_key):
try:
ec2 = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, region_name=region)
response = ec2.terminate_instances(InstanceIds=[instance_id])
return response['TerminatingInstances'][0]['InstanceId'],'success'
except Exception as e:
print(e),'failed'
def copy_files_to_server(ip, pem_file,local_data_file_path,local_config_file_path,username):
try:
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while True:
try:
client.connect(host, username=username, key_filename=pem_file)
sftp = client.open_sftp()
break
except:
time.sleep(10)
try:
sftp.stat(remote_data_dir)
print(f"Path: '{remote_data_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_data_dir)
try:
sftp.stat(remote_config_dir)
print(f"Path: '{remote_config_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_config_dir)
Train_Data_InRemoteArtifacts = sftp.listdir(remote_data_dir)
for traindata_files in Train_Data_InRemoteArtifacts:
print(traindata_files)
if not traindata_files.endswith('.ipynb_checkpoints'):
sftp.remove(remote_data_dir +'/'+ traindata_files)
if os.path.isdir(local_data_file_path):
list_pdf_json_files = os.listdir(local_data_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_data_file_path+'/'+file_pdf_json, remote_data_dir+'/'+ file_pdf_json)
print(file_pdf_json + " data copied successfully")
else:
filename = os.path.basename(local_data_file_path)
directoryname= os.path.dirname(local_data_file_path)
sftp.put(directoryname+'/'+filename, remote_data_dir+'/'+ filename)
print(filename + " data copied successfully")
config_InRemoteArtifacts = sftp.listdir(remote_config_dir)
for config_file in config_InRemoteArtifacts:
print(config_file)
if not config_file.endswith('.ipynb_checkpoints'):
sftp.remove(remote_config_dir +'/'+ config_file)
if local_config_file_path != '':
if os.path.isdir(local_config_file_path):
list_pdf_json_files = os.listdir(local_config_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_config_file_path+'/'+file_pdf_json, remote_config_dir+'/'+ file_pdf_json)
print(file_pdf_json + " config copied successfully")
else:
# updated_config_file_path = create_config(local_config_file_path)
filename = os.path.basename(local_config_file_path)
directoryname= os.path.dirname(local_config_file_path)
sftp.put(directoryname+'/'+filename, remote_config_dir+'/'+ 'config.json')
print(filename + " config copied successfully")
remote_files = sftp.listdir(remote_config_dir)
print("List of uploaded files",remote_files)
sftp.close()
client.close()
except Exception as e:
print(e)
def check_status(ip,username,pem_file):
logoutput = read_log_file(ip,username,pem_file)
if "aion_llmfinetuning_Status" in logoutput:
return True
else:
return False
def read_log_file(ip,username,pem_file):
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, key_filename=pem_file)
log_file_path = '/home/ubuntu/AION/data/log/fine_tuning_log.log'
stdin, stdout, stderr = client.exec_command(f'tail -n 500 {log_file_path}')
time.sleep(5)
client.close()
return stdout.read().decode()
def run_ssh_cmd(ip,pem_file,username,log,command):
try:
buf = ''
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while True:
try:
client.connect(host, username=username, key_filename=pem_file)
break
except:
time.sleep(10)
stdin, stdout, stderr =client.exec_command(command)
for line in stdout:
if log != '':
log.info(line.strip())
else:
if buf != '':
buf= buf+'\n'
buf = buf+line.strip()
print(buf)
client.close()
return buf
except Exception as e:
print(str(e))
raise Exception(str(e))
def start_logging(deployFolder,modelName,version):
try:
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
filehandler = logging.FileHandler(deployLocation/name, 'w','utf-8')
log = logging.getLogger('log_llm')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
except Exception as e:
print(str(e))
def update_sqllite_data(usecaseid,variable,variable_value):
try:
print(usecaseid)
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',usecaseid)
if (len(data) > 0):
sqlite_obj.update_data('"'+variable+'"="'+variable_value+'"','"usecaseid"="'+str(usecaseid)+'"','LLMTuning')
return('Success')
data = dict(usecaseid=usecaseid,ip='',instance='',hypervisor='AWS',status='NA')
data.update({variable:variable_value})
df = pd.DataFrame(data, index=[0])
sqlite_obj.write_data(df,'LLMTuning')
return('Success')
except Exception as e:
print(e)
return('Error')
def LLM_predict(cloudconfig,instanceid,promptfile):
with open(cloudconfig, 'r') as config_f:
cloud_infra = json.load(config_f)
config_f.close()
aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID']
aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey']
region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName']
ip = start_instance(aws_access_key_id, aws_secret_key, instanceid,region)
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['keyFilePath'])
username = cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['userName']
copy_files_to_server(ip,pem_file,promptfile,'',username)
promptfile = os.path.basename(promptfile)
command = prompt_command+' '+remote_data_dir+'/'+ promptfile
buf = run_ssh_cmd(ip, pem_file, username,'',command)
return buf
def LLM_tuning_lemma7b(config,cloudconfig):
with open(config, 'r') as config_f:
config_data = json.load(config_f)
config_f.close()
modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion']
log = start_logging(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'])
with open(cloudconfig, 'r') as config_f:
cloud_infra = json.load(config_f)
config_f.close()
currentDirectory = os.path.dirname(os.path.abspath(__file__))
aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID']
aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey']
instance_type = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceSetting']['InstanceType']
security_group_id = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceSetting']['SecurityGroupId']
region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName']
image_id = cloud_infra['AWS_EC2']['LLaMa7B']['amiId']
pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['keyFilePath'])
username = cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['userName']
datafile = config_data['basic']['dataLocation']
instance_name = config_data['basic']['modelName']+'_'+str(config_data['basic']['modelVersion'])+'_LLMTuning'
configfile = config
ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP']
if image_id != '':
log.info("Status:-|... Create Instance")
instance_id = create_instance(image_id, instance_type, security_group_id,region,instance_name)
elif cloud_infra['AWS_EC2']['LLaMa7B']['InstanceId'] != '':
instance_id = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceId']
update_sqllite_data(modelid,'instance',instance_id)
print(instance_id)
else:
instance_id = ''
ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP']
if instance_id != '':
log.info("Status:-|... Start Instance")
update_sqllite_data(modelid,'status','Initialize')
ip = start_instance(aws_access_key_id, aws_secret_key, instance_id,region)
print(ip)
if ip != '':
update_sqllite_data(modelid,'ip',ip)
try:
log.info("Status:-|... Copy Files")
copy_files_to_server(ip,pem_file,datafile,configfile,username)
log.info("Status:-|... Start LLM Tuning")
update_sqllite_data(modelid,'status','Tuning')
run_ssh_cmd(ip, pem_file, username,log,sh_file_path)
log_data = read_log_file(ip,username,pem_file)
outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputStr = json.loads(outputStr)
from llm.llm_tuning import save_output
outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr)
print(outputStr)
if "Tuning completed Successfully" in log_data:
update_sqllite_data(modelid,'status','Success')
output = json.dumps(outputStr)
print(f"\naion_learner_status:{output}\n")
return output
else:
update_sqllite_data(modelid,'status','Error')
output = json.dumps(outputStr)
print(f"\naion_learner_status:{output}\n")
return output
except Exception as e:
print(e)
log.info(str(e))
output = {'status':'FAIL','message':str(e),'LogFile':''}
output = json.dumps(output)
print(f"\naion_learner_status:{output}\n")
return output
else:
output = {'status':'FAIL','message':'Not Configured Properly','LogFile':''}
output = json.dumps(output)
print(f"\naion_learner_status:{output}\n")
return output
def stop_server_instance(aws_access_key_id, aws_secret_access_key, instance_id,region):
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
try:
ec2.stop_instances(InstanceIds=[instance_id, ], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
# Dry run succeeded, call stop_instances without dryrun
try:
response = ec2.stop_instances(InstanceIds=[instance_id], DryRun=False)
response = ec2.describe_instances(InstanceIds=[instance_id])
instance_status_code = 0
while instance_status_code != stopped_state_code:
response = ec2.describe_instances(InstanceIds=[instance_id])
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == stopped_state_code:
print("Instance Stopped")
break
return "Process Completed"
except ClientError as e:
print(e)
return "Process failed"
if __name__ == "__main__":
status,msg = LLM_tuning_lemma7b(sys.argv[1],sys.argv[2])
print(status, msg)
def check_file_on_server(file_path, ip, pem_file):
is_wait = True
try:
host = ip
username = "ubuntu"
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, key_filename=pem_file)
sftp = client.open_sftp()
sftp.stat(file_path)
print("Model File created On Server")
is_wait = False
except IOError:
is_wait = True
print("Model training is in progress..")
return is_wait
def removeremotefolder_files(sftp, path):
try:
files = sftp.listdir(path=path)
for f in files:
filepath = path + "/" + f
print(filepath)
if isdir(sftp, filepath):
removeremotefolder_files(sftp, filepath)
else:
sftp.remove(filepath)
sftp.rmdir(path)
except IOError as e:
print(e)
def isdir(sftp, path):
try:
return S_ISDIR(sftp.stat(path).st_mode)
except IOError:
return False
def get_remote_folder(ip, remotedir, localdir, pem_file, preserve_mtime=False):
host = ip
username = "ubuntu"
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, key_filename=pem_file)
sftp = client.open_sftp()
for entry in sftp.listdir(remotedir):
remotepath = remotedir + "/" + entry
localpath = os.path.join(localdir, entry)
mode = sftp.stat(remotepath).st_mode
if S_ISDIR(mode):
try:
os.mkdir(localpath, mode=777)
except OSError:
pass
get_remote_folder(ip, remotepath, localpath, pem_file, preserve_mtime)
elif S_ISREG(mode):
sftp.get(remotepath, localpath)
print("{} downloaded successfully".format(entry))
|
llm_tuning.py | import json
import os
import sys
import pandas as pd
import time
from stat import S_ISDIR, S_ISREG
from pathlib import Path
import logging
import re
import tarfile
from llm import llm_utils
#remote_data_dir = '/home/ubuntu/AION/data/storage'
remote_data_rawdata_dir = '/home/aion/data/storage/raw_data'
remote_data_processeddata_dir = '/home/aion/data/storage/processed_data'
remote_config_dir = '/home/aion/data/config'
sh_file_path = '/home/aion/llm/sbin/llm_model_finetuning.sh'
unstructured_script_path = '/home/aion/llm/sbin/llm_model_finetuning.sh'
def start_logging(deployFolder,modelName,version):
try:
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
filehandler = logging.FileHandler(deployLocation/name, 'w','utf-8')
log = logging.getLogger('log_llm')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
except Exception as e:
print(str(e))
def update_sqllite_data(usecaseid,variable,variable_value):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
column_names = sqlite_obj.column_names('LLMTuning')
#print(column_names)
if 'region' not in column_names:
query = 'Alter Table LLMTuning ADD region TEXT'
sqlite_obj.execute_query(query)
if 'image' not in column_names:
query = 'Alter Table LLMTuning ADD image TEXT'
sqlite_obj.execute_query(query)
data = sqlite_obj.get_data('LLMTuning','usecaseid',usecaseid)
if (len(data) > 0):
sqlite_obj.update_data('"'+variable+'"="'+variable_value+'"','"usecaseid"="'+str(usecaseid)+'"','LLMTuning')
return('Success')
data = dict(usecaseid=usecaseid,ip='',instance='',hypervisor='NA',status='NA',region='',image='')
data.update({variable:variable_value})
df = pd.DataFrame(data, index=[0])
sqlite_obj.write_data(df,'LLMTuning')
return('Success')
except Exception as e:
print(e)
return('Error')
def save_output(deployFolder,modelName,version,outputstr,hypervisor,instance):
try:
deployLocation = Path(deployFolder)/modelName/str(version)/'etc'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'output.json'
dpath = Path(deployFolder)/modelName/str(version)
outputstr['data']['deployLocation'] = str(dpath)
outputstr['data']['vmDetails'] = str(hypervisor)+' Instance: '+str(instance)
outputstr['data']['LogFile'] = str(dpath/'log'/'model_training_logs.log')
with open(deployLocation/name, 'w',encoding='utf-8') as f:
json.dump(outputstr, f)
f.close()
return (outputstr)
except Exception as e:
print(str(e))
print(outputstr)
def llm_logs(config,cloudconfig,instanceid,hypervisor,mlmodels):
try:
with open(config, 'r') as config_f:
config_data = json.load(config_f)
config_f.close()
modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion']
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
region = amiDetails['regionName']
from llm.aws_instance_api import check_instance
status,ip = check_instance(aws_access_key_id, aws_secret_key, instanceid,region)
if status.lower() == 'running':
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
from llm.ssh_command import read_log_file
logs = read_log_file(ip,username,'',pem_file)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
lines = [line.rstrip('\n') for line in f]
for log in logs:
if log not in lines:
# inserts on top, elsewise use lines.append(name) to append at the end of the file.
lines.insert(0, log)
f.seek(0) # move to first position in the file, to overwrite !
f.write('\n'.join(lines))
else:
status = {'status':'Error','msg':'Instance not running'}
output = json.dumps(status)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
f.write('aion_learner_status:'+str(output))
f.close()
else:
credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials']
projectID = cloud_infra['gcpCredentials']['projectID']
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid)
zone = amiDetails['regionName']
username = username = amiDetails['ssh']['userName']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
password = ''
from llm.gcp_compute_api import check_instance
status,ip = check_instance(credentialsJson,projectID, zone, instanceid)
if status.lower() == 'running':
from llm.ssh_command import read_log_file
logs = read_log_file(ip,username,'',pem_file)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
lines = [line.rstrip('\n') for line in f]
for log in logs:
if log not in lines:
lines.insert(0, log)
f.seek(0) # move to first position in the file, to overwrite !
f.write('\n'.join(lines))
else:
status = {'status':'Error','msg':'Instance not running'}
output = json.dumps(status)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
f.write('aion_learner_status:'+str(output))
f.close()
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def tardirectory(path,tarfilepath,ext):
with tarfile.open(tarfilepath, 'w:tar') as tarhandle:
for root, dirs, files in os.walk(path):
for f in files:
if ext == 'doc':
if f.endswith('.' + 'doc') or f.endswith('.' + 'docx'):
tarhandle.add(os.path.join(root, f), arcname=f)
else:
if f.endswith('.'+ext):
tarhandle.add(os.path.join(root, f),arcname=f)
tarhandle.close()
def getAMIDetails(config,selectedAMI):
y = {}
for x in config:
if x['id'] == selectedAMI:
return x
return y
def run(config):
with open(config, 'r') as config_f:
config_data = json.load(config_f)
config_f.close()
modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion']
log = start_logging(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'])
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
currentDirectory = os.path.dirname(os.path.abspath(__file__))
filetimestamp = str(int(time.time()))
instance_name = config_data['basic']['modelName']+'-'+str(config_data['basic']['modelVersion'])+'-LLM-'+filetimestamp
instance_name = instance_name.lower()
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
if os.path.isdir(config_data['basic']['dataLocation']):
from appbe.dataPath import DATA_FILE_PATH
filetimestamp = str(int(time.time()))
tarfilepath = os.path.join(DATA_FILE_PATH,filetimestamp+'.tar')
tardirectory(config_data['basic']['dataLocation'],tarfilepath,config_data['basic']['folderSettings']['fileExtension'])
config_data['basic']['dataLocationUnstructured'] = tarfilepath
with open(config, "w") as outfile:
json.dump(config_data, outfile)
outfile.close()
if cloud_infra['computeInfrastructure'] == 'GCP':
log.info("Status:-|... Compute Infrastructure GCP GCE")
credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials']
#credentialsJson = "C:\AION\GCP-Instance-Utilityv2\GCP-Instance-Utility\ers-research.json"
selectedID = cloud_infra['gcpCredentials']['selectedID']
projectID = cloud_infra['gcpCredentials']['projectID']
zone = cloud_infra['gcpCredentials']['regionName']
selectMachineType = cloud_infra['gcpCredentials']['machineType']
if selectMachineType.lower() == 'image':
amiDetails = getAMIDetails(cloud_infra['GCP']['machineImage'],selectedID)
machineImageName = amiDetails['id']
else:
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], selectedID)
zone = amiDetails['regionName']
machineImageName = ''
instance_name = selectedID
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
if machineImageName != '':
log.info("Status:-|... Create Instance Start")
try:
server = llm_utils.gcp_server("",machineImageName)
ip,msg = server.create(instance_name)
log.info("Status:-|... Create Instance End")
if ip == '':
if "resources available" in msg:
msg = "The respective zone (or region) does not have enough resources available to fulfill the request. Please try after some time."
output_json = {"status": "FAIL", "message": str(msg), "LogFile": ''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to create the instance. "+str(msg))
print(f"\naion_learner_status:{output}\n")
return output
except Exception as e:
print(str(e))
output_json = {"status":"FAIL","message":'Failed to create the instance.',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to create the instance.")
print(f"\naion_learner_status:{output}\n")
return output
else:
server = llm_utils.gcp_server(instance_name, "")
server.start()
ip = server.ip
if ip != '':
time.sleep(20)
if selectMachineType.lower() == 'machineimage':
update_sqllite_data(modelid, 'image', machineImageName)
update_sqllite_data(modelid,'hypervisor','GCP')
update_sqllite_data(modelid, 'region', zone)
update_sqllite_data(modelid,'ip',ip)
update_sqllite_data(modelid,'instance',instance_name)
from llm.ssh_command import copy_files_to_server
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
datafile = config_data['basic']['dataLocationUnstructured']
else:
datafile = config_data['basic']['dataLocation']
log.info("Status:-|... Upload tuning data Start")
try:
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
copy_files_to_server(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir)
else:
copy_files_to_server(ip, pem_file, datafile, config, username,'', remote_data_processeddata_dir,remote_config_dir)
time.sleep(20)
log.info("Status:-|... Upload tuning data End")
log.info("Status:-|... Start LLM Tuning")
update_sqllite_data(modelid,'status','Tuning')
from llm.ssh_command import run_ssh_cmd
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
script_path = unstructured_script_path
else:
script_path = sh_file_path
print(script_path)
run_ssh_cmd(ip,pem_file, username,'',log,script_path)
from llm.ssh_command import read_log_file
log_data = read_log_file(ip,username,'',pem_file)
outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputStr = json.loads(outputStr)
outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr,'GCP GCE',instance_name)
vmRunning = config_data['basic'].get('vmRunning','KeepRunning')
if vmRunning.lower() != 'keeprunning':
from llm.gcp_compute_api import stop_instance
server.stop()
if "Tuning Completed Successfully" in log_data:
update_sqllite_data(modelid,'status','Success')
output = json.dumps(outputStr)
print(f"\naion_learner_status:{output}\n")
return output
else:
server.showndown()
update_sqllite_data(modelid,'status','Error')
output = json.dumps(outputStr)
print(f"\naion_learner_status:{output}\n")
return output
except Exception as e:
print(e)
server.showndown()
output_json = {"status": "FAIL", "message": str(e), "LogFile": ''}
output = json.dumps(output_json)
log.info("Status:-|... " + str(e))
print(f"\naion_learner_status:{output}\n")
return output
else:
output_json = {"status":"FAIL","message":'Failed to initialize the instance',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to initialize the instance")
print(f"\naion_learner_status:{output}\n")
return output
elif cloud_infra['computeInfrastructure'] == 'AWS':
log.info("Status:-|... Compute Infrastructure AWS EC2")
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
selectMachineType = cloud_infra['awsCredentials']['machineType']
selectedID = cloud_infra['awsCredentials']['selectedID']
region = cloud_infra['awsCredentials']['regionName']
if selectMachineType.lower() == 'ami':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'],selectedID)
instance_type = amiDetails['instanceSetting']['instanceType']
security_group_id = cloud_infra['awsCredentials']['securitygroupid']
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], selectedID)
region = amiDetails['regionName']
#region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName']
image_id = amiDetails['id']
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
datafile = config_data['basic']['dataLocationUnstructured']
else:
datafile = config_data['basic']['dataLocation']
if selectMachineType.lower() == 'ami':
log.info("Status:-|... Create Instance Start")
server = llm_utils.aws_server('', image_id)
instance_id,msg = server.create(instance_name)
if instance_id == '':
output_json = {"status":"FAIL","message":'Failed to initialize the instance. '+str(msg),"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to initialize the instance")
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
log.info("Status:-|... Create Instance End")
elif selectMachineType.lower() == 'instance':
instance_id = image_id
update_sqllite_data(modelid,'instance',instance_id)
server = llm_utils.aws_server( instance_id, '')
else:
output_json = {"status":"FAIL","message":'AMI is not configured',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... AMI is not configured")
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
# instance_id = ''
# ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP']
try:
from appbe.models import get_published_models
already_published,published_usecase = get_published_models(instance_id)
if already_published:
Msg = f'Fine Tuned Model-{published_usecase} is already published at the same instance, Please unpublish the mentioned model to proceed.'
output_json = {"status":"Error","message":Msg,"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... A Model is already Published at the same instance.")
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
except Exception as e:
log.info(str(e))
print(str(e))
if instance_id != '':
log.info("Status:-|... Start Instance")
if selectMachineType.lower() == 'ami':
update_sqllite_data(modelid, 'image', image_id)
update_sqllite_data(modelid, 'region', region)
update_sqllite_data(modelid,'instance',instance_id)
update_sqllite_data(modelid,'hypervisor','AWS')
update_sqllite_data(modelid,'status','Initialize')
status,msg = server.start()
ip = server.ip
time.sleep(20)
if status and ip != '':
update_sqllite_data(modelid,'ip',ip)
try:
log.info("Status:-|... Copy Files")
from llm.ssh_command import copy_files_to_server
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
print(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir)
copy_files_to_server(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir)
else:
print(ip, pem_file, datafile, config, username, '', remote_data_processeddata_dir, remote_config_dir)
copy_files_to_server(ip, pem_file, datafile, config, username, '', remote_data_processeddata_dir,remote_config_dir)
time.sleep(20)
log.info("Status:-|... Start LLM Tuning")
update_sqllite_data(modelid,'status','Tuning')
from llm.ssh_command import run_ssh_cmd
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
script_path = unstructured_script_path
else:
script_path = sh_file_path
#print(script_path)
#sys.exit()
run_ssh_cmd(ip, pem_file, username,'',log,script_path)
from llm.ssh_command import read_log_file
log_data = read_log_file(ip,username,'',pem_file)
outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputStr = json.loads(outputStr)
outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr,'AWS EC2',instance_id)
vmRunning = config_data['basic'].get('vmRunning','KeepRunning')
if vmRunning.lower() != 'keeprunning':
server.stop()
if "Tuning Completed Successfully" in log_data:
update_sqllite_data(modelid,'status','Success')
output = json.dumps(outputStr)
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
else:
server.showndown()
update_sqllite_data(modelid,'status','Error')
output = json.dumps(outputStr)
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
except Exception as e:
print(e)
log.info(str(e))
server.showndown()
output = {'status': 'FAIL', 'message': str(e), 'LogFile': ''}
output = json.dumps(output)
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
else:
output = {'status':'FAIL','message':msg,'LogFile':''}
output = json.dumps(output)
print(f"\naion_learner_status:{output}\n")
log.info(f"\naion_learner_status:{output}\n")
return output
else:
output_json = {"status":"FAIL","message":'Failed to initialize the instance',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to initialize the instance")
print(f"\naion_learner_status:{output}\n")
return output |
llm_summarization.py | import json
import os
import sys
import pandas as pd
import time
from stat import S_ISDIR, S_ISREG
from pathlib import Path
import logging
import re
remote_data_dir = '/home/ubuntu/AION/data/storage'
remote_config_dir = '/home/ubuntu/AION/data/config'
sh_file_path = '/home/ubuntu/AION/llm/sbin/run_experiment.sh'
import os
import tarfile
def tardirectory(path,tarfilepath):
with tarfile.open(tarfilepath, 'w:tar') as tarhandle:
for root, dirs, files in os.walk(path):
for f in files:
tarhandle.add(os.path.join(root, f),arcname=f)
tarhandle.close()
def createCodeSummary(codedir,cloudconfig,filetype):
try:
from appbe.dataPath import DATA_FILE_PATH
filetimestamp = str(int(time.time()))
tarfilepath = os.path.join(DATA_FILE_PATH,filetimestamp+'.tar')
tardirectory(codedir,tarfilepath)
with open(cloudconfig, 'r') as config_f:
cloud_infra = json.load(config_f)
config_f.close()
aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID']
aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey']
instance_type = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceSetting']['InstanceType']
security_group_id = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceSetting']['SecurityGroupId']
region = cloud_infra['AWS_EC2']['CodeSummarization']['RegionName']
image_id = cloud_infra['AWS_EC2']['CodeSummarization']['amiId']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['CodeSummarization']['ssh']['keyFilePath'])
username = cloud_infra['AWS_EC2']['CodeSummarization']['ssh']['userName']
instance_id = ''
if image_id != '':
from llm.aws_instance_api import create_instance
instance_id = create_instance(image_id, instance_type, security_group_id,region,instance_name,aws_access_key_id, aws_secret_key)
if instance_id == '':
return 'Failed','Instance Creation Failed'
if instance_id == '':
if cloud_infra['AWS_EC2']['CodeSummarization']['InstanceId'] != '':
instance_id = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceId']
else:
return 'Failed','Instance Creation Failed.'
if instance_id != '':
from llm.aws_instance_api import start_instance
ip = start_instance(aws_access_key_id, aws_secret_key, instance_id,region)
if ip != '':
from llm.ssh_command import copy_files_to_server
copy_files_to_server(ip,pem_file,tarfilepath,'',username,'',remote_data_dir,remote_config_dir)
from llm.ssh_command import run_ssh_cmd
command = 'rm -r /home/ubuntu/AION/data/code'
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
command = 'mkdir -p /home/ubuntu/AION/data/code'
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
command = 'tar -xvf '+remote_data_dir+'/'+filetimestamp+'.tar -C /home/ubuntu/AION/data/code'
print(command)
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
command = sh_file_path+' '+'/home/ubuntu/AION/data/code'+' '+filetype
print(command)
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
from llm.ssh_command import read_file_from_server
filetimestamp = str(int(time.time()))
codesummar = os.path.join(DATA_FILE_PATH,filetimestamp+'.csv')
read_file_from_server(ip,username,'',pem_file,'/home/ubuntu/AION/data/storage/code_summararies.csv',codesummar)
return 'Success',codesummar
else:
return 'Failed','Instance Initialization Failed.'
else:
return 'Failed','Instance Initialization Failed . AMI/Instance is not configured. Please check with ERS Research'
except Exception as e:
print(e)
return 'Failed','Code Summarization Failed' |
gcp_compute_api.py | from google.cloud import compute_v1
import os
PROJECT_ID = 'ers-research'
ZONE = 'us-west1-b'
INSTANCE_NAME = 'aion-llm-a100-vm1'
MACHINE_IMAGE_NAME = 'aion-40gb-a100-image'
MACHINE_IMAGE_PROJECT_ID = 'ers-research'
def create_instance(credentialsJson,project_id, zone, instance_name, machine_image_name, machine_image_project_id):
try:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
machine_image = compute_v1.MachineImagesClient().get(project=machine_image_project_id, machine_image=machine_image_name)
instance = compute_v1.Instance()
instance.name = instance_name
instance.machine_type = f"zones/{zone}/machineTypes/a2-ultragpu-1g"
instance.source_machine_image = machine_image.self_link
boot_disk = compute_v1.AttachedDisk()
boot_disk.auto_delete = True
boot_disk.boot = True
instance.disks = [boot_disk]
network_interface = compute_v1.NetworkInterface()
access_config = compute_v1.AccessConfig()
access_config.type = "ONE_TO_ONE_NAT"
network_interface.access_configs = [access_config]
instance.network_interfaces = [network_interface]
operation = compute.insert(project=project_id, zone=zone, instance_resource=instance)
operation.result()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
print("--->instace created ")
print(instance.network_interfaces[0])
return instance.network_interfaces[0].access_configs[0].nat_i_p,''
except Exception as e:
print(e)
return '',str(e)
def is_running(credentialsJson,project_id, zone, instance_name):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
status = instance.status
return status
def check_instance(credentialsJson,project_id, zone, instance_name):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
status = instance.status
if status.lower() == 'running':
print(instance.network_interfaces[0].access_configs[0].nat_i_p)
ip = instance.network_interfaces[0].access_configs[0].nat_i_p
else:
ip = ''
return status,ip
def start_instance(credentialsJson,project_id, zone, instance_name):
try:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
operation = compute.start(project=project_id, zone=zone, instance=instance_name)
operation.result()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
status = instance.status
if status.lower() == 'running':
print(instance.network_interfaces[0].access_configs[0])
ip = instance.network_interfaces[0].access_configs[0].nat_i_p
else:
ip = ''
except Exception as e:
print(e)
status = 'Error'
ip = ''
return status,ip
def stop_instance(credentialsJson,project_id, zone, instance_name):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
operation = compute.stop(project=project_id, zone=zone, instance=instance_name)
operation.result()
def terminate_instance(project_id, zone, instance_name):
try:
compute = compute_v1.InstancesClient()
operation = compute.delete(project=project_id, zone=zone, instance=instance_name)
operation.result()
return "","suceess"
except Exception as e:
return str(e),"error"
# if __name__ == '__main__':
# ip_address = create_instance(PROJECT_ID, ZONE, INSTANCE_NAME, MACHINE_IMAGE_NAME, MACHINE_IMAGE_PROJECT_ID)
# print(f"IP address of the new VM: {ip_address}")
# #start_instance(PROJECT_ID, ZONE, INSTANCE_NAME)
# # stop_instance(PROJECT_ID, ZONE, INSTANCE_NAME)
# # terminate_instance(PROJECT_ID, ZONE, INSTANCE_NAME)
|
llm_inference.py | import json
import os
import time
remote_data_dir = '/home/aion/data/storage/prompt'
remote_config_dir = '/home/aion/data/config'
prompt_command = '/home/aion/llm/sbin/llm_predict.sh'
command_prepare_model = '/home/aion/llm/sbin/llm_merge_weights.sh'
command_start_service = '/home/aion/llm/sbin/llm_publish_model.sh'
command_stop_service = 'publish.py'
from AION.llm import llm_utils
from pathlib import Path
def getAMIDetails(config,selectedAMI):
y = {}
for x in config:
print(x)
if x['id'] == selectedAMI:
return x
return y
def get_ip(cloudconfig,instanceid,hypervisor,region,image):
try:
# with open(cloudconfig, 'r') as config_f:
# cloud_infra = json.load(config_f)
# config_f.close()
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import get_instance_ip
return get_instance_ip(aws_access_key_id, aws_secret_key, instanceid,region)
elif hypervisor == 'GCP':
#print(hypervisor,instanceid)
server = llm_utils.hypervisor( hypervisor,instanceid)
if server.is_machine_running():
return server.ip
else:
return ''
except Exception as e:
print(e)
raise Exception
def kill_inference_server(cloudconfig,instanceid,hypervisor,region,image):
# with open(cloudconfig, 'r') as config_f:
# cloud_infra = json.load(config_f)
# config_f.close()
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
ip = get_ip(cloudconfig,instanceid,hypervisor,region,image)
if ip == '':
print("Machine is not running.")
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
command = 'pkill -f'+ ' '+command_stop_service
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
elif hypervisor == 'GCP':
server = llm_utils.hypervisor( hypervisor,instanceid)
if server.is_machine_running():
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
from llm.ssh_command import run_ssh_cmd
command = 'pkill -f'+ ' '+command_stop_service
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'],'','',command)
else:
raise Exception("Error")
def LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image):
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
# with open(cloudconfig, 'r') as config_f:
# cloud_infra = json.load(config_f)
# config_f.close()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import start_instance
status,msg,ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region)
print(status,msg,ip)
if status.lower() == 'success':
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
command = command_prepare_model + ' ' + usecaseid + ' '+ str(model)
print(command)
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
if "Error" in buf:
print("Error in Merging model")
raise Exception("Error in Merging model")
print("merging finished")
command = command_start_service+' '+ usecaseid
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
print("inference server running")
return buf
else:
print(msg)
return msg
elif hypervisor == 'GCP':
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
server = llm_utils.hypervisor(hypervisor,instanceid)
if not server.is_machine_running():
started, msg = server.start()
if not started:
raise ValueError( msg)
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
from llm.ssh_command import run_ssh_cmd
#print(model)
#print(usecaseid)
command = command_prepare_model + ' ' + usecaseid + ' '+ str(model)
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', command)
if "Error" in buf:
print("Error in Merging model")
raise Exception("Error in Merging model")
#print("merging finished")
command = command_start_service+' '+ usecaseid
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', command)
#print("inference server running")
return buf
else:
print("Not configured for gcp")
raise Exception("Eror")
def LLM_predict(cloudconfig,instanceid,promptfile,hypervisor,model,usecaseid,region,image,temperature,maxtokens,modelType):
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
try:
temperature = float(temperature)
except:
temperature = 0.4
try:
maxtokens = int(maxtokens)
except:
maxtokens = 2048
print("====")
print(float(temperature))
print("====")
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import start_instance
#print(aws_access_key_id, aws_secret_key, instanceid, region)
status,msg,ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region)
if status.lower() == 'success':
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
from llm.ssh_command import copy_files_to_server
#print(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir)
copy_files_to_server(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir)
promptfile = os.path.basename(promptfile)
if modelType == 'BaseModel':
command = prompt_command + ' ' + 'BaseModel' + ' ' + remote_data_dir + '/' + promptfile + ' ' + str(
model) + ' ' + str(temperature) + ' ' + str(maxtokens)
else:
command = prompt_command+' '+usecaseid+' '+remote_data_dir+'/'+ promptfile+' '+str(model)+' '+str(temperature)+' '+str(maxtokens)
print(command)
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
return buf
else:
return msg
else:
server = llm_utils.hypervisor( hypervisor,instanceid)
if not server.is_machine_running():
started, msg = server.start()
if not started:
raise ValueError( msg)
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
from llm.ssh_command import copy_files_to_server
copy_files_to_server(server.ip,pem_file,promptfile,'',ssh['userName'],'',remote_data_dir,remote_config_dir)
promptfile = os.path.basename(promptfile)
if modelType == 'BaseModel':
command = prompt_command + ' ' + 'BaseModel' + ' ' + remote_data_dir + '/' + promptfile + ' ' + str(
model) + ' ' + str(temperature) + ' ' + str(maxtokens)
else:
command = prompt_command+' '+usecaseid+' '+remote_data_dir+'/'+ promptfile+' '+str(model)+' '+str(temperature)+' '+str(maxtokens)
#command = '/home/aion/llm/sbin/llm_model_finetuning.sh'
#print(command)
from llm.ssh_command import run_ssh_cmd
#print(ssh['userName'],pem_file)
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'],'','',command)
return buf |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
survival_analysis.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter, CoxPHFitter
from lifelines.utils import datetimes_to_durations
import logging
import numpy as np
import re
import sys
import os
class SurvivalAnalysis(object):
def __init__(self, df, pipe, method, event_column, duration_column, filterExpression, train_features_type,start=None, end=None):
pd.options.display.width = 30
self.df = df
self.pipe = pipe
self.train_features_type = train_features_type
self.filterExpression = filterExpression
self.covariateExpression = filterExpression
self.method = method
self.event_column = event_column
if start is not None and end is not None:
self.df['duration'], _ = datetimes_to_durations(start, end)
self.duration_column = 'duration'
else:
self.duration_column = duration_column
self.models = []
self.score = 0
self.log = logging.getLogger('eion')
self.plots = []
def transform_filter_expression(self, covariate, covariate_input):
'''
Filter expression given by user will be encoded if it is categorical and if it is a numerical feature that
is normalised in data profiler, in filter expression feature also it will be converted to normalised value
'''
cols = list(self.df.columns)
if self.duration_column in cols:
cols.remove(self.duration_column)
if self.event_column in cols:
cols.remove(self.event_column)
df_filter = pd.DataFrame([{covariate:covariate_input}], columns=cols)
df_filter[covariate] = df_filter[covariate].astype(self.train_features_type[covariate])
df_transform_array = self.pipe.transform(df_filter)
df_transform = pd.DataFrame(df_transform_array, columns=cols)
return df_transform[covariate].iloc[0]
def learn(self):
self.log.info('\n---------- SurvivalAnalysis learner has started ----------')
self.log.info('\n---------- SurvivalAnalysis learner method is "%s" ----------' % self.method)
if self.method.lower() in ['kaplanmeierfitter', 'kaplanmeier', 'kaplan-meier', 'kaplan meier', 'kaplan', 'km',
'kmf']:
self.log.info('\n---------- SurvivalAnalysis learner method "%s" has started ----------' % self.method)
kmf = KaplanMeierFitter()
T = self.df[self.duration_column]
E = self.df[self.event_column]
self.log.info('\n T : \n%s' % str(T))
self.log.info('\n E : \n%s' % str(E))
K = kmf.fit(T, E)
kmf_sf = K.survival_function_
kmf_sf_json = self.survival_probability_to_json(kmf_sf)
self.models.append(K)
if isinstance(self.filterExpression, str):
df_f, df_n, refined_filter_expression = self.parse_filterExpression()
kmf1 = KaplanMeierFitter()
kmf2 = KaplanMeierFitter()
self.log.info(
'\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------' % self.method)
T1 = df_f[self.duration_column]
E1 = df_f[self.event_column]
T2 = df_n[self.duration_column]
E2 = df_n[self.event_column]
kmf1.fit(T1, E1)
fig, ax = plt.subplots(1, 1)
ax = kmf1.plot_survival_function(ax=ax, label='%s' % refined_filter_expression)
self.log.info(
'\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------' % self.method)
plt.title("KM Survival Functions - Filter vs Negation")
self.log.info(
'\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------' % self.method)
kmf2.fit(T2, E2)
ax = kmf2.plot_survival_function(ax=ax, label='~%s' % refined_filter_expression)
self.log.info(
'\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------' % self.method)
self.models.extend([kmf1, kmf2])
kmf1_sf = kmf1.survival_function_
kmf2_sf = kmf2.survival_function_
kmf1_sf_json = self.survival_probability_to_json(kmf1_sf)
self.plots.append(fig)
self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter')
return kmf1_sf_json
else:
fig, ax = plt.subplots(1, 1)
ax = kmf_sf.plot(ax=ax)
plt.title("KM Survival Functions")
self.plots.append(fig)
self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter')
return kmf_sf_json
elif self.method.lower() in ['coxphfitter', 'coxregression', 'cox-regression', 'cox regression',
'coxproportionalhazard', 'coxph', 'cox', 'cph']:
self.log.info('\n---------- SurvivalAnalysis learner method "%s" has started ----------' % self.method)
cph = CoxPHFitter(penalizer=0.1)
self.df = self.drop_constant_features(self.df)
C = cph.fit(self.df, self.duration_column, self.event_column)
self.models.append(C)
cph_sf = C.baseline_survival_
self.score = C.score(self.df, scoring_method="concordance_index")
self.log.info(
'\n---------- SurvivalAnalysis learner "%s" score is "%s"----------' % (self.method, str(self.score)))
cph_sf_json = self.survival_probability_to_json(cph_sf)
if isinstance(self.covariateExpression, str):
covariate, covariate_inputs, covariate_values = self.parse_covariateExpression()
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.tight_layout()
ax1 = C.plot(ax=ax1, hazard_ratios=True)
self.log.info('\n Summary : \n%s' % str(C.summary))
ax1.set_title("COX hazard ratio")
ax2 = C.plot_partial_effects_on_outcome(covariate, covariate_values, ax=ax2)
mylabels = [covariate + '=' + str(x) for x in covariate_inputs]
mylabels.append('baseline')
ax2.legend(labels=mylabels)
ax2.set_title("Covariate Plot")
self.plots.append(fig)
else:
fig = plt.figure()
ax1 = C.plot(hazard_ratios=True)
self.log.info('\n Summary : \n%s' % str(C.summary))
plt.title("COX hazard ratio")
self.plots.append(fig)
self.log.info('\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: CoxPHFitter')
return cph_sf_json
def parse_filterExpression(self):
import operator
self.log.info('\n---------- Filter Expression parsing has started ----------')
self.log.info('Filter Expression provided : %s' % self.filterExpression)
self.log.info('Shape before filter : %s' % str(self.df.shape))
f = self.filterExpression.split('&')
f = list(filter(None, f))
if len(f) == 1:
p = '[<>=!]=?'
op = re.findall(p, self.filterExpression)[0]
covariate, covariate_input = [x.strip().strip('\'').strip('\"') for x in self.filterExpression.split(op)]
refined_filter_expression = covariate + op + covariate_input
self.log.info('Final refined filter : %s' % refined_filter_expression)
ops = {"==": operator.eq, ">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le,
"!=": operator.ne}
try:
fv = self.transform_filter_expression(covariate, covariate_input)
df_f = self.df[ops[op](self.df[covariate], fv)]
self.log.info('Shape after filter : %s' % str(df_f.shape))
df_n = self.df[~self.df[covariate].isin(df_f[covariate])]
self.log.info('Shape of negation : %s' % str(df_n.shape))
self.log.info('---------- Filter Expression has ended ----------')
return df_f, df_n, refined_filter_expression
except Exception:
self.log.info('\n-----> Filter Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid')
self.log.info('Valid examples are "A>100", "B==category1", "C>=10 && C<=20" etc..')
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
else:
full_f = []
try:
for filterExpression in f:
p = '[<>=!]=?'
op = re.findall(p, filterExpression)[0]
covariate, covariate_input = [x.strip().strip('\'').strip('\"') for x in filterExpression.split(op)]
full_f.append(covariate + op + covariate_input)
ops = {"==": operator.eq, ">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le,
"!=": operator.ne}
fv = self.transform_filter_expression(covariate, covariate_input)
df_f = self.df[ops[op](self.df[covariate], fv)]
df_n = self.df[~self.df[covariate].isin(df_f[covariate])]
refined_filter_expression = " & ".join(full_f)
self.log.info('Final refined filter : %s' % refined_filter_expression)
self.log.info('Shape after filter : %s' % str(df_f.shape))
self.log.info('Shape of negation : %s' % str(df_n.shape))
self.log.info('---------- Filter Expression has ended ----------')
return df_f, df_n, refined_filter_expression
# except (IndexError, ValueError, KeyError):
except Exception:
self.log.info('\n-----> Filter Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid')
self.log.info('Valid examples are "A>100", "B==category1", "C>=10 && C<=20" etc..')
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
def parse_covariateExpression(self):
self.log.info('\n---------- Covariate Expression parsing has started ----------')
self.log.info('\n Covariate Expression provided : %s' % self.covariateExpression)
import ast
p = '[=:]'
try:
op = re.findall(p, self.covariateExpression)[0]
covariate, covariate_inputs = [x.strip().strip('\'').strip('\"') for x in
self.covariateExpression.split(op)]
covariate_inputs = ast.literal_eval(covariate_inputs)
covariate_values = [self.transform_filter_expression(covariate, x) for x in covariate_inputs]
self.log.info('\n---------- Covariate Expression parsing has ended ----------')
return covariate, covariate_inputs, covariate_values
except Exception:
self.log.info('\n-----> Covariate Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given covariate expression '+ self.filterExpression +' is invalid')
self.log.info("\n Valid examples are A=['Yes','No'] or B=[100,500,1000]")
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
def survival_probability_to_json(self, sf):
'''
sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_
returns json of survival probabilities
'''
sf = sf[sf.columns[0]].apply(lambda x: "%4.2f" % (x * 100))
self.log.info('\n Survival probabilities : \n%s' % str(sf))
sf = sf.reset_index()
sf = sf.sort_values(sf.columns[0])
sf_json = sf.to_json(orient='records')
self.log.info('\n Survival probability json : \n%s' % str(sf_json))
return sf_json
def drop_constant_features(self, df):
dropped = []
for col in df.columns:
if (len(df[col].unique()) == 1) and (col not in [self.duration_column, self.event_column]):
df.drop(col, inplace=True, axis=1)
dropped.append(col)
if len(dropped) != 0:
self.log.info('\n Dropping constant features %s' % str(col))
self.log.info('\n After dropping constant features : \n%s' % str(df))
return df
def predict(self):
if self.method == 'KaplanMeierFitter':
return self.model.predict(self.test[self.duration_column])
elif self.method == 'CoxPHFitter':
res = []
for idx, row in self.test.iterrows():
res.append(
self.model.predict_survival_function(self.test, times=row[self.model.duration_col])[idx].values[0])
return pd.DataFrame(res)
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
learning.py | import warnings
import sys
warnings.simplefilter(action='ignore', category=FutureWarning)
import xgboost as xgb
import dask.array as da
import shutil
import dask.distributed
import dask.dataframe as dd
import dask_ml
import logging
from sklearn.metrics import accuracy_score, recall_score, \
roc_auc_score, precision_score, f1_score, \
mean_squared_error, mean_absolute_error, \
r2_score, classification_report, confusion_matrix, \
mean_absolute_percentage_error
import lightgbm as lgb
import re
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from dask_ml.impute import SimpleImputer
from dask_ml.compose import ColumnTransformer
from dask_ml.decomposition import TruncatedSVD, PCA
from dask_ml.preprocessing import StandardScaler, \
MinMaxScaler, \
OneHotEncoder, LabelEncoder
from dask_ml.wrappers import ParallelPostFit
import numpy as np
import json
import time
from sklearn.ensemble import IsolationForest
import joblib
import pickle as pkl
import os
predict_config={}
dask.config.set({"distributed.workers.memory.terminate": 0.99})
dask.config.set({"array.chunk-size": "128 MiB"})
dask.config.set({"distributed.admin.tick.limit": "3h"})
# dask.config.set({"distributed.workers.memory.pause": 0.9})
class MinImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# to_fillna = ['public_meeting', 'scheme_management', 'permit']
# X[to_fillna] = X[to_fillna].fillna(value='NaN')
# X[to_fillna] = X[to_fillna].astype(str)
X = X.fillna(value=X.min())
# X = X.astype(str)
return X
class MaxImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.fillna(value=X.max())
return X
class DropImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.dropna()
return X
class ModeCategoricalImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.fillna(value=X.mode())
return X
class IsoForestOutlierExtractor(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y):
lcf = IsolationForest()
with joblib.parallel_backend('dask'):
lcf.fit(X)
y_pred_train = lcf.predict(X)
y_pred_train = y_pred_train == 1
return X
def load_config_json(json_file):
with open(json_file, 'r') as j:
contents = json.loads(j.read())
return contents
def load_data_dask(data_file, npartitions=500):
big_df = dd.read_csv(data_file, # sep=r'\s*,\s*',
assume_missing=True,
parse_dates=True, infer_datetime_format=True,
sample=1000000,
# dtype={'caliper': 'object',
# 'timestamp': 'object'},
# dtype='object',
na_values=['-','?']
)
big_df = big_df.repartition(npartitions)
return big_df
def get_dask_eda(df_dask):
descr = df_dask.describe().compute()
corr = df_dask.corr().compute()
return descr, corr
def normalization(config):
scaler = config["advance"] \
["profiler"]["normalization"]
scaler_method = None
if scaler["minMax"] == "True":
scaler_method = MinMaxScaler()
if scaler["standardScaler"] == "True":
scaler_method = StandardScaler()
return scaler_method
def categorical_encoding(config):
encoder = config["advance"]["profiler"] \
["categoryEncoding"]
encoder_method = None
if encoder["OneHotEncoding"] == "True":
encoder_method = OneHotEncoder()
# OneHotEncoder(handle_unknown='ignore', sparse=False)
if encoder["LabelEncoding"] == "True":
encoder_method = LabelEncoder()
return encoder_method
def numeric_feature_imputing(config):
imputer_numeric_method = None
imputer_numeric = config["advance"] \
["profiler"]["numericalFillMethod"]
if imputer_numeric["Median"] == "True":
print("Median Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='median')
if imputer_numeric["Mean"] == "True":
print("Mean Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='mean')
if imputer_numeric["Min"] == "True":
print("Min Simple Imputer")
imputer_numeric_method = MinImputer()
if imputer_numeric["Max"] == "True":
print("Max Simple Imputer")
imputer_numeric_method = MaxImputer()
if imputer_numeric["Zero"] == "True":
print("Zero Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='constant',
fill_value=0)
# if imputer_numeric["Drop"] == "True":
# print("Median Simple Imputer")
# imputer_numeric_method = DropImputer()
return imputer_numeric_method
def categorical_feature_imputing(config):
imputer_categorical_method = None
imputer_categorical = config["advance"] \
["profiler"]["categoricalFillMethod"]
if imputer_categorical["MostFrequent"] == "True":
imputer_categorical_method = SimpleImputer(strategy='most_frequent')
if imputer_categorical["Mode"] == "True":
imputer_categorical_method = ModeCategoricalImputer()
if imputer_categorical["Zero"] == "True":
imputer_categorical_method = SimpleImputer(strategy='constant',
fill_value=0)
return imputer_categorical_method
def preprocessing_pipeline(config, X_train):
print("Start preprocessing")
scaler_method = normalization(config)
encoding_method = categorical_encoding(config)
imputer_numeric_method = numeric_feature_imputing(config)
imputer_categorical_method = categorical_feature_imputing(config)
numeric_pipeline = Pipeline(steps=[
('impute', imputer_numeric_method),
('scale', scaler_method)
])
categorical_pipeline = Pipeline(steps=[
('impute', imputer_categorical_method),
('encoding', encoding_method)
])
numerical_features = X_train._get_numeric_data().columns.values.tolist()
categorical_features = list(set(X_train.columns) - set(X_train._get_numeric_data().columns))
print("numerical_features: ", numerical_features)
print("categorical_features: ", categorical_features)
full_processor = ColumnTransformer(transformers=[
('number', numeric_pipeline, numerical_features),
# ('category', categorical_pipeline, categorical_features)
])
return full_processor
def full_pipeline(X_train, X_test, config):
full_processor = preprocessing_pipeline(config, X_train)
reduce_dim = config["advance"] \
["selector"]["featureEngineering"]
feature_reduce = None
if reduce_dim["SVD"] == "True":
feature_reduce = TruncatedSVD(n_components=3)
if reduce_dim["PCA"] == "True":
feature_reduce = PCA(n_components=3)
X_train = full_processor.fit_transform(X_train)
# joblib.dump(full_processor, 'full_processor_pipeline.pkl')
deploy_location = config["basic"]["modelLocation"]
profiler_file = os.path.join(deploy_location,'model','profiler.pkl')
selector_file = os.path.join(deploy_location,'model','selector.pkl')
save_pkl(full_processor, profiler_file)
X_test = full_processor.transform(X_test)
predict_config['profilerLocation'] = 'profiler.pkl'
if feature_reduce != None:
X_train = feature_reduce.fit_transform(X_train.to_dask_array(lengths=True))
save_pkl(feature_reduce, selector_file)
predict_config['selectorLocation'] = 'selector.pkl'
# joblib.dump(feature_reduce, 'feature_reduce_pipeline.pkl')
X_test = feature_reduce.transform(X_test.to_dask_array(lengths=True))
X_train = dd.from_dask_array(X_train)
X_test = dd.from_dask_array(X_test)
else:
predict_config['selectorLocation'] = ''
return X_train, X_test
def train_xgb_classification(client, X_train, y_train, X_test, config):
print("Training XGBoost classification")
model_hyperparams = config["advance"] \
["distributedlearner_config"] \
["modelParams"] \
["classifierModelParams"] \
["Distributed Extreme Gradient Boosting (XGBoost)"]
dask_model = xgb.dask.DaskXGBClassifier(
tree_method=model_hyperparams["tree_method"],
n_estimators=int(model_hyperparams["n_estimators"]),
max_depth=int(model_hyperparams["max_depth"]),
gamma=float(model_hyperparams["gamma"]),
min_child_weight=float(model_hyperparams["min_child_weight"]),
subsample=float(model_hyperparams["subsample"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
learning_rate=float(model_hyperparams["learning_rate"]),
reg_alpha=float(model_hyperparams["reg_alpha"]),
reg_lambda=float(model_hyperparams["reg_lambda"]),
random_state=int(model_hyperparams["random_state"]),
verbosity=3)
dask_model.client = client
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_xgb_regression(client, X_train, y_train, X_test, config):
model_hyperparams = config["advance"] \
["distributedlearner_config"] \
["modelParams"] \
["regressorModelParams"] \
["Distributed Extreme Gradient Boosting (XGBoost)"]
print("Training XGBoost regression")
dask_model = xgb.dask.DaskXGBRegressor(
tree_method=model_hyperparams["tree_method"],
n_estimators=int(model_hyperparams["n_estimators"]),
max_depth=int(model_hyperparams["max_depth"]),
gamma=float(model_hyperparams["gamma"]),
min_child_weight=float(model_hyperparams["min_child_weight"]),
subsample=float(model_hyperparams["subsample"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
learning_rate=float(model_hyperparams["learning_rate"]),
reg_alpha=float(model_hyperparams["reg_alpha"]),
reg_lambda=float(model_hyperparams["reg_lambda"]),
random_state=int(model_hyperparams["random_state"]),
verbosity=3)
dask_model.client = client
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train, eval_set=[(X_test, y_test)])
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_lgbm_regression(client, X_train, y_train, X_test, config):
print("Training lightGBM regression")
model_hyperparams = config["advance"] \
["distributedlearner_config"] \
["modelParams"] \
["regressorModelParams"] \
["Distributed Light Gradient Boosting (LightGBM)"]
dask_model = lgb.DaskLGBMRegressor(
client=client,
n_estimators=int(model_hyperparams["n_estimators"]),
num_leaves=int(model_hyperparams["num_leaves"]),
max_depth =int(model_hyperparams["max_depth"]),
learning_rate=float(model_hyperparams["learning_rate"]),
min_child_samples=int(model_hyperparams["min_child_samples"]),
reg_alpha=int(model_hyperparams["reg_alpha"]),
subsample=float(model_hyperparams["subsample"]),
reg_lambda=int(model_hyperparams["reg_lambda"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
n_jobs=4,
verbosity=3)
X_train, X_test = full_pipeline(X_train, X_test, config)
# print("before X_train.shape, y_train.shape",
# X_train.shape,
# y_train.shape)
# indices = dask_findiforestOutlier(X_train)
# print("X_train type: ", type(X_train))
# print("y_train type: ", type(y_train))
# X_train, y_train = X_train.iloc[indices, :], \
# y_train.iloc[indices]
# print("after X_train.shape, y_train.shape",
# X_train.shape,
# y_train.shape)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train,
# # eval_set=[(X_test,y_test),
# # (X_train,y_train)],
# verbose=20,eval_metric='l2')
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_lgbm_classification(client, X_train, y_train, X_test, config):
print("Training lightGBM classification")
model_hyperparams = config["advance"] \
["distributedlearner_config"] \
["modelParams"] \
["classifierModelParams"] \
["Distributed Light Gradient Boosting (LightGBM)"]
dask_model = lgb.DaskLGBMClassifier(
client=client,
num_leaves=int(model_hyperparams["num_leaves"]),
learning_rate=float(model_hyperparams["learning_rate"]),
feature_fraction=float(model_hyperparams["feature_fraction"]),
bagging_fraction=float(model_hyperparams["bagging_fraction"]),
bagging_freq=int(model_hyperparams["bagging_freq"]),
max_depth=int(model_hyperparams["max_depth"]),
min_data_in_leaf=int(model_hyperparams["min_data_in_leaf"]),
n_estimators=int(model_hyperparams["n_estimators"]),
verbosity=3)
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train,
# eval_set=[(X_test,y_test),
# (X_train,y_train)],
# verbose=20,eval_metric='logloss')
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def evaluate_model_classification(model, config, X_test, y_test, class_names):
metrics = config["basic"]["scoringCriteria"]["classification"]
y_test = y_test.to_dask_array().compute()
log = logging.getLogger('eion')
X_test = X_test.to_dask_array(lengths=True)
y_pred = model.predict(X_test)
if metrics["Accuracy"] == "True":
# ParallelPostFit(estimator=model, scoring='accuracy')
# score = model.score(X_test, y_test) * 100.0
score = accuracy_score(y_test, y_pred) * 100.0
type = 'Accuracy'
log.info('Status:-|... Accuracy Score '+str(score))
if metrics["Recall"] == "True":
score = recall_score(y_test, y_pred)
type = 'Recall'
log.info('Status:-|... Recall Score '+str(score))
if metrics["Precision"] == "True":
score = precision_score(y_test, y_pred)
type = 'Precision'
log.info('Status:-|... Precision Score '+str(score))
if metrics["F1_Score"] == "True":
score = f1_score(y_test, y_pred)
type = 'F1'
log.info('Status:-|... F1 Score '+str(score))
y_pred_prob = model.predict_proba(X_test)
if len(class_names) == 2:
roc_auc = roc_auc_score(y_test, y_pred)
else:
roc_auc = roc_auc_score(y_test, y_pred_prob, multi_class='ovr')
if metrics["ROC_AUC"] == "True":
score = roc_auc
type = 'ROC_AUC'
log.info('Status:-|... ROC AUC Score '+str(score))
class_report = classification_report(y_test, y_pred, output_dict=True, target_names=class_names)
conf_matrix = confusion_matrix(y_test, y_pred)
return type, score, class_report, conf_matrix, roc_auc
def evaluate_model_regression(model, config, X_test, y_test):
metrics = config["basic"]["scoringCriteria"]["regression"]
y_pred = model.predict(X_test).compute()
y_test = y_test.to_dask_array().compute()
X_test = X_test.to_dask_array(lengths=True)
log = logging.getLogger('eion')
mse = mean_squared_error(y_test, y_pred)
rmse = mean_squared_error(y_test, y_pred, squared=False)
norm_rmse = rmse * 100 / (y_test.max() - y_test.min())
mape = mean_absolute_percentage_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
if metrics["Mean Squared Error"] == "True":
type = 'Mean Squared Error'
score = mse
log.info('Status:-|... Mean Squared Error '+str(score))
if metrics["Root Mean Squared Error"] == "True":
type = 'Root Mean Squared Error'
score = rmse
log.info('Status:-|... Root Mean Square Error '+str(score))
if metrics["R-Squared"] == "True":
type = 'R-Squared'
score = r2
log.info('Status:-|... R Squared Error '+str(score))
if metrics["Mean Absolute Error"] == "True":
type = 'Mean Absolute Error'
score = mae
log.info('Status:-|... Mean Absolute Error '+str(score))
return type, score, mse, rmse, norm_rmse, r2, mae, mape
def save_config(config):
deploy_location = config["basic"]["modelLocation"]
saved_model_file = os.path.join(deploy_location,'etc','config.json')
print(predict_config)
with open (saved_model_file,'w') as f:
json.dump(predict_config, f)
f.close()
def save_model(config, model):
model_name = config["basic"]["modelName"]
model_version = config["basic"]["modelVersion"]
analysis_type = config["basic"]["analysisType"]
deploy_location = config["basic"]["modelLocation"]
if analysis_type["classification"] == "True":
problem_type = "classification"
if analysis_type["regression"] == "True":
problem_type = "regression"
print("model_name", model_name)
print("model_version", model_version)
print("problem_type", problem_type)
print("deploy_location", deploy_location)
file_name = problem_type + '_' + model_version + ".sav"
saved_model = os.path.join(deploy_location,'model',file_name)
print("Save trained model to directory: ", save_model)
with open (saved_model,'wb') as f:
pkl.dump(model,f)
f.close()
predict_config['modelLocation'] = file_name
def save_pkl(model, filename):
with open(filename, 'wb') as f:
pkl.dump(model, f,
protocol=pkl.HIGHEST_PROTOCOL)
def dask_findiforestOutlier(X):
print("Outlier removal with Isolation Forest...")
isolation_forest = IsolationForest(n_estimators=100)
with joblib.parallel_backend('dask'):
isolation_forest.fit(X)
y_pred_train = isolation_forest.fit_predict(X)
mask_isoForest = y_pred_train != -1
return mask_isoForest
def training(configFile):
start_time = time.time()
config = load_config_json(configFile)
data_dir = config["basic"]["dataLocation"]
n_workers = int(config["advance"]
["distributedlearner_config"]
["n_workers"])
npartitions = int(config["advance"]
["distributedlearner_config"]
["npartitions"])
threads_per_worker = int(config["advance"]
["distributedlearner_config"]
["threads_per_worker"])
predict_config['modelName'] = config["basic"]["modelName"]
predict_config['modelVersion'] = config["basic"]["modelVersion"]
predict_config['targetFeature'] = config["basic"]["targetFeature"]
predict_config['trainingFeatures'] = config["basic"]["trainingFeatures"]
predict_config['dataLocation'] = config["basic"]["dataLocation"]
predict_config['n_workers'] = n_workers
predict_config['npartitions'] = npartitions
predict_config['threads_per_worker'] = threads_per_worker
if config['basic']['analysisType']["classification"] == "True":
problemType = "classification"
oProblemType = "Distributed Classification"
if config['basic']['analysisType']["regression"] == "True":
problemType = "regression"
oProblemType = "Distributed Regression"
predict_config['analysisType'] = problemType
predict_config['scoringCriteria'] = ''
target_feature = config["basic"]["targetFeature"]
training_features = config["basic"]["trainingFeatures"]
deploy_location = config["basic"]["deployLocation"]
is_xgb_class = config["basic"] \
["algorithms"]["classification"] \
["Distributed Extreme Gradient Boosting (XGBoost)"]
is_lgbm_class = config["basic"] \
["algorithms"]["classification"] \
["Distributed Light Gradient Boosting (LightGBM)"]
is_xgb_regress = config["basic"] \
["algorithms"]["regression"] \
["Distributed Extreme Gradient Boosting (XGBoost)"]
is_lgbm_regress = config["basic"] \
["algorithms"]["regression"] \
["Distributed Light Gradient Boosting (LightGBM)"]
if is_xgb_class=="True" or is_xgb_regress=="True":
algorithm = "Distributed Extreme Gradient Boosting (XGBoost)"
predict_config['algorithm'] = algorithm
if is_lgbm_class=="True" or is_lgbm_regress=="True":
algorithm = "Distributed Light Gradient Boosting (LightGBM)"
predict_config['algorithm'] = algorithm
cluster = dask.distributed.LocalCluster(n_workers=n_workers,
threads_per_worker=threads_per_worker,
# dashboard_address="127.0.0.1:8787"
)
client = dask.distributed.Client(cluster)
df_dask = load_data_dask(data_dir, npartitions=npartitions)
deployFolder = config["basic"]["deployLocation"]
modelName = config["basic"]["modelName"]
modelName = modelName.replace(" ", "_")
modelVersion = config["basic"]["modelVersion"]
modelLocation = os.path.join(deployFolder,modelName)
os.makedirs(modelLocation,exist_ok = True)
deployLocation = os.path.join(modelLocation,modelVersion)
predict_config['deployLocation'] = deployLocation
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
time.sleep(2)
os.makedirs(deployLocation)
modelFolderLocation = os.path.join(deployLocation,'model')
try:
os.makedirs(modelFolderLocation)
except OSError as e:
print("\nModel Folder Already Exists")
etcFolderLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcFolderLocation)
except OSError as e:
print("\ETC Folder Already Exists")
logFolderLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logFolderLocation)
except OSError as e:
print("\nLog Folder Already Exists")
logFileName=os.path.join(logFolderLocation,'model_training_logs.log')
outputjsonFile=os.path.join(deployLocation,'etc','output.json')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('Status:-|... Distributed Learning Started')
config['basic']['modelLocation'] = deployLocation
# Get input for EDA
# descr, corr = get_dask_eda(df_dask=df_dask)
#print(descr)
# print(corr)
#print(df_dask.columns)
#print("target feature", target_feature)
df_dask = df_dask.dropna(subset=[target_feature])
if is_xgb_class == "True" or is_lgbm_class == "True":
df_dask = df_dask.categorize(columns=[target_feature])
df_dask[target_feature] = df_dask[target_feature].astype('category')
df_dask[target_feature] = df_dask[target_feature].cat.as_known()
label_mapping = dict(enumerate(df_dask[target_feature].cat.categories))
df_dask[target_feature] = df_dask[target_feature].cat.codes
label_mapping_file =os.path.join(deployLocation,'etc','label_mapping.json')
with open(label_mapping_file, 'w') as f:
json.dump(label_mapping, f)
if config["advance"]["profiler"]["removeDuplicate"] == "True":
df_dask = df_dask.drop_duplicates()
# Need to dropna for case of categoricalFillMethod
# if config["advance"]["profiler"]["numericalFillMethod"]["Drop"] == "True":
# df_dask = df_dask.dropna()
trainingFeatures = config["basic"]["trainingFeatures"].split(',')
if target_feature not in trainingFeatures:
trainingFeatures.append(target_feature)
df_dask = df_dask[trainingFeatures]
y = df_dask[target_feature]
X = df_dask.drop(target_feature, axis=1)
print("after X.shape, y.shape", X.shape, y.shape)
X_train, X_test, y_train, y_test = dask_ml.model_selection.train_test_split(X, y,
test_size=0.2, random_state=0)
trainingFeatures = config["basic"]["trainingFeatures"].split(',')
outputJson = None
conf_matrix_dict = {}
train_conf_matrix_dict = {}
try:
if is_xgb_class == "True":
modelName = 'Distributed Extreme Gradient Boosting (XGBoost)'
dask_model, X_train, X_test = train_xgb_classification(client, X_train, y_train, X_test, config)
class_names = list(label_mapping.values())
_, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config,
X_train, y_train, class_names)
scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config,
X_test, y_test, class_names)
for i in range(len(conf_matrix)):
conf_matrix_dict_1 = {}
for j in range(len(conf_matrix[i])):
conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j])
conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1
for i in range(len(train_conf_matrix)):
train_conf_matrix_dict_1 = {}
for j in range(len(train_conf_matrix[i])):
train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j])
train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1
# print(roc_auc)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\
'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\
'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_lgbm_class == "True":
modelName = 'Distributed Light Gradient Boosting (LightGBM)'
dask_model, X_train, X_test = train_lgbm_classification(client, X_train, y_train, X_test, config)
class_names = list(label_mapping.values())
_, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config,
X_train, y_train, class_names)
scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config,
X_test, y_test, class_names)
for i in range(len(conf_matrix)):
conf_matrix_dict_1 = {}
for j in range(len(conf_matrix[i])):
conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j])
conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1
for i in range(len(train_conf_matrix)):
train_conf_matrix_dict_1 = {}
for j in range(len(train_conf_matrix[i])):
train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j])
train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\
'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\
'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_xgb_regress == "True":
modelName = 'Distributed Extreme Gradient Boosting (XGBoost)'
dask_model, X_train, X_test = train_xgb_regression(client, X_train, y_train, X_test, config)
_, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config,
X_train, y_train)
scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config,
X_test, y_test)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\
'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \
'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_lgbm_regress == "True":
modelName = 'Distributed Light Gradient Boosting (LightGBM)'
dask_model, X_train, X_test = train_lgbm_regression(client, X_train, y_train, X_test, config)
_, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config,
X_train, y_train)
scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config,
X_test, y_test)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\
'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \
'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','dl_aion_predict.py')
shutil.copy2(src,deployLocation)
os.rename(os.path.join(deployLocation,'dl_aion_predict.py'),os.path.join(deployLocation,'aion_predict.py'))
except Exception as e:
outputJson = {"status":"FAIL","message":str(e)}
print(e)
client.close()
cluster.close()
log.info('Status:-|... Distributed Learning Completed')
with open(outputjsonFile, 'w') as f:
json.dump(outputJson, f)
f.close()
output_json = json.dumps(outputJson)
log.info('aion_learner_status:'+str(output_json))
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
print("\n")
print("aion_learner_status:",output_json)
print("\n")
end_time = time.time()
print("--- %s processing time (sec) ---" % (end_time - start_time)) |
dataset.py | """
The :mod:`dataset <surprise.dataset>` module defines the :class:`Dataset` class
and other subclasses which are used for managing datasets.
Users may use both *built-in* and user-defined datasets (see the
:ref:`getting_started` page for examples). Right now, three built-in datasets
are available:
* The `movielens-100k <https://grouplens.org/datasets/movielens/>`_ dataset.
* The `movielens-1m <https://grouplens.org/datasets/movielens/>`_ dataset.
* The `Jester <https://eigentaste.berkeley.edu/dataset/>`_ dataset 2.
Built-in datasets can all be loaded (or downloaded if you haven't already)
using the :meth:`Dataset.load_builtin` method.
Summary:
.. autosummary::
:nosignatures:
Dataset.load_builtin
Dataset.load_from_file
Dataset.load_from_folds
"""
import itertools
import os
import sys
from collections import defaultdict
from .builtin_datasets import BUILTIN_DATASETS, download_builtin_dataset
from .reader import Reader
from .trainset import Trainset
class Dataset:
"""Base class for loading datasets.
Note that you should never instantiate the :class:`Dataset` class directly
(same goes for its derived classes), but instead use one of the three
available methods for loading datasets."""
def __init__(self, reader):
self.reader = reader
@classmethod
def load_builtin(cls, name="ml-100k", prompt=True):
"""Load a built-in dataset.
If the dataset has not already been loaded, it will be downloaded and
saved. You will have to split your dataset using the :meth:`split
<DatasetAutoFolds.split>` method. See an example in the :ref:`User
Guide <cross_validate_example>`.
Args:
name(:obj:`string`): The name of the built-in dataset to load.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'.
Default is 'ml-100k'.
prompt(:obj:`bool`): Prompt before downloading if dataset is not
already on disk.
Default is True.
Returns:
A :obj:`Dataset` object.
Raises:
ValueError: If the ``name`` parameter is incorrect.
"""
try:
dataset = BUILTIN_DATASETS[name]
except KeyError:
raise ValueError(
"unknown dataset "
+ name
+ ". Accepted values are "
+ ", ".join(BUILTIN_DATASETS.keys())
+ "."
)
# if dataset does not exist, offer to download it
if not os.path.isfile(dataset.path):
answered = not prompt
while not answered:
print(
"Dataset " + name + " could not be found. Do you want "
"to download it? [Y/n] ",
end="",
)
choice = input().lower()
if choice in ["yes", "y", "", "omg this is so nice of you!!"]:
answered = True
elif choice in ["no", "n", "hell no why would i want that?!"]:
answered = True
print("Ok then, I'm out!")
sys.exit()
download_builtin_dataset(name)
reader = Reader(**dataset.reader_params)
return cls.load_from_file(file_path=dataset.path, reader=reader)
@classmethod
def load_from_file(cls, file_path, reader):
"""Load a dataset from a (custom) file.
Use this if you want to use a custom dataset and all of the ratings are
stored in one file. You will have to split your dataset using the
:meth:`split <DatasetAutoFolds.split>` method. See an example in the
:ref:`User Guide <load_from_file_example>`.
Args:
file_path(:obj:`string`): The path to the file containing ratings.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the file.
"""
return DatasetAutoFolds(ratings_file=file_path, reader=reader)
@classmethod
def load_from_folds(cls, folds_files, reader):
"""Load a dataset where folds (for cross-validation) are predefined by
some files.
The purpose of this method is to cover a common use case where a
dataset is already split into predefined folds, such as the
movielens-100k dataset which defines files u1.base, u1.test, u2.base,
u2.test, etc... It can also be used when you don't want to perform
cross-validation but still want to specify your training and testing
data (which comes down to 1-fold cross-validation anyway). See an
example in the :ref:`User Guide <load_from_folds_example>`.
Args:
folds_files(:obj:`iterable` of :obj:`tuples`): The list of the
folds. A fold is a tuple of the form ``(path_to_train_file,
path_to_test_file)``.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the files.
"""
return DatasetUserFolds(folds_files=folds_files, reader=reader)
@classmethod
def load_from_df(cls, df, reader):
"""Load a dataset from a pandas dataframe.
Use this if you want to use a custom dataset that is stored in a pandas
dataframe. See the :ref:`User Guide<load_from_df_example>` for an
example.
Args:
df(`Dataframe`): The dataframe containing the ratings. It must have
three columns, corresponding to the user (raw) ids, the item
(raw) ids, and the ratings, in this order.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the file. Only the ``rating_scale`` field needs to be
specified.
"""
return DatasetAutoFolds(reader=reader, df=df)
def read_ratings(self, file_name):
"""Return a list of ratings (user, item, rating, timestamp) read from
file_name"""
with open(os.path.expanduser(file_name)) as f:
raw_ratings = [
self.reader.parse_line(line)
for line in itertools.islice(f, self.reader.skip_lines, None)
]
return raw_ratings
def construct_trainset(self, raw_trainset):
raw2inner_id_users = {}
raw2inner_id_items = {}
current_u_index = 0
current_i_index = 0
ur = defaultdict(list)
ir = defaultdict(list)
# user raw id, item raw id, translated rating, time stamp
for urid, irid, r, timestamp in raw_trainset:
try:
uid = raw2inner_id_users[urid]
except KeyError:
uid = current_u_index
raw2inner_id_users[urid] = current_u_index
current_u_index += 1
try:
iid = raw2inner_id_items[irid]
except KeyError:
iid = current_i_index
raw2inner_id_items[irid] = current_i_index
current_i_index += 1
ur[uid].append((iid, r))
ir[iid].append((uid, r))
n_users = len(ur) # number of users
n_items = len(ir) # number of items
n_ratings = len(raw_trainset)
trainset = Trainset(
ur,
ir,
n_users,
n_items,
n_ratings,
self.reader.rating_scale,
raw2inner_id_users,
raw2inner_id_items,
)
return trainset
def construct_testset(self, raw_testset):
return [(ruid, riid, r_ui_trans) for (ruid, riid, r_ui_trans, _) in raw_testset]
class DatasetUserFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are predefined."""
def __init__(self, folds_files=None, reader=None):
Dataset.__init__(self, reader)
self.folds_files = folds_files
# check that all files actually exist.
for train_test_files in self.folds_files:
for f in train_test_files:
if not os.path.isfile(os.path.expanduser(f)):
raise ValueError("File " + str(f) + " does not exist.")
class DatasetAutoFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are not predefined. (Or for when there are no folds at
all)."""
def __init__(self, ratings_file=None, reader=None, df=None):
Dataset.__init__(self, reader)
self.has_been_split = False # flag indicating if split() was called.
if ratings_file is not None:
self.ratings_file = ratings_file
self.raw_ratings = self.read_ratings(self.ratings_file)
elif df is not None:
self.df = df
self.raw_ratings = [
(uid, iid, float(r), None)
for (uid, iid, r) in self.df.itertuples(index=False)
]
else:
raise ValueError("Must specify ratings file or dataframe.")
def build_full_trainset(self):
"""Do not split the dataset into folds and just return a trainset as
is, built from the whole dataset.
User can then query for predictions, as shown in the :ref:`User Guide
<train_on_whole_trainset>`.
Returns:
The :class:`Trainset <surprise.Trainset>`.
"""
return self.construct_trainset(self.raw_ratings)
|
dump.py | """
The :mod:`dump <surprise.dump>` module defines the :func:`dump` function.
"""
import pickle
def dump(file_name, predictions=None, algo=None, verbose=0):
"""A basic wrapper around Pickle to serialize a list of prediction and/or
an algorithm on drive.
What is dumped is a dictionary with keys ``'predictions'`` and ``'algo'``.
Args:
file_name(str): The name (with full path) specifying where to dump the
predictions.
predictions(list of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`): The
predictions to dump.
algo(:class:`Algorithm\
<surprise.prediction_algorithms.algo_base.AlgoBase>`, optional):
The algorithm to dump.
verbose(int): Level of verbosity. If ``1``, then a message indicates
that the dumping went successfully. Default is ``0``.
"""
dump_obj = {"predictions": predictions, "algo": algo}
pickle.dump(dump_obj, open(file_name, "wb"), protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print("The dump has been saved as file", file_name)
def load(file_name):
"""A basic wrapper around Pickle to deserialize a list of prediction and/or
an algorithm that were dumped on drive using :func:`dump()
<surprise.dump.dump>`.
Args:
file_name(str): The path of the file from which the algorithm is
to be loaded
Returns:
A tuple ``(predictions, algo)`` where ``predictions`` is a list of
:class:`Prediction
<surprise.prediction_algorithms.predictions.Prediction>` objects and
``algo`` is an :class:`Algorithm
<surprise.prediction_algorithms.algo_base.AlgoBase>` object. Depending
on what was dumped, some of these may be ``None``.
"""
dump_obj = pickle.load(open(file_name, "rb"))
return dump_obj["predictions"], dump_obj["algo"]
|
accuracy.py | """
The :mod:`surprise.accuracy` module provides tools for computing accuracy
metrics on a set of predictions.
Available accuracy metrics:
.. autosummary::
:nosignatures:
rmse
mse
mae
fcp
"""
from collections import defaultdict
import numpy as np
def rmse(predictions, verbose=True):
"""Compute RMSE (Root Mean Squared Error).
.. math::
\\text{RMSE} = \\sqrt{\\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \\in
\\hat{R}}(r_{ui} - \\hat{r}_{ui})^2}.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Root Mean Squared Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
mse = np.mean(
[float((true_r - est) ** 2) for (_, _, true_r, est, _) in predictions]
)
rmse_ = np.sqrt(mse)
if verbose:
print(f"RMSE: {rmse_:1.4f}")
return rmse_
def mse(predictions, verbose=True):
"""Compute MSE (Mean Squared Error).
.. math::
\\text{MSE} = \\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \\in
\\hat{R}}(r_{ui} - \\hat{r}_{ui})^2.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Mean Squared Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
mse_ = np.mean(
[float((true_r - est) ** 2) for (_, _, true_r, est, _) in predictions]
)
if verbose:
print(f"MSE: {mse_:1.4f}")
return mse_
def mae(predictions, verbose=True):
"""Compute MAE (Mean Absolute Error).
.. math::
\\text{MAE} = \\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \\in
\\hat{R}}|r_{ui} - \\hat{r}_{ui}|
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Mean Absolute Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
mae_ = np.mean([float(abs(true_r - est)) for (_, _, true_r, est, _) in predictions])
if verbose:
print(f"MAE: {mae_:1.4f}")
return mae_
def fcp(predictions, verbose=True):
"""Compute FCP (Fraction of Concordant Pairs).
Computed as described in paper `Collaborative Filtering on Ordinal User
Feedback <https://www.ijcai.org/Proceedings/13/Papers/449.pdf>`_ by Koren
and Sill, section 5.2.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Fraction of Concordant Pairs.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
predictions_u = defaultdict(list)
nc_u = defaultdict(int)
nd_u = defaultdict(int)
for u0, _, r0, est, _ in predictions:
predictions_u[u0].append((r0, est))
for u0, preds in predictions_u.items():
for r0i, esti in preds:
for r0j, estj in preds:
if esti > estj and r0i > r0j:
nc_u[u0] += 1
if esti >= estj and r0i < r0j:
nd_u[u0] += 1
nc = np.mean(list(nc_u.values())) if nc_u else 0
nd = np.mean(list(nd_u.values())) if nd_u else 0
try:
fcp = nc / (nc + nd)
except ZeroDivisionError:
raise ValueError(
"cannot compute fcp on this list of prediction. "
+ "Does every user have at least two predictions?"
)
if verbose:
print(f"FCP: {fcp:1.4f}")
return fcp
|
Subsets and Splits